Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/version.py
|
# THIS FILE IS GENERATED FROM SCIPY SETUP.PY
short_version = '1.1.0'
version = '1.1.0'
full_version = '1.1.0'
git_revision = '14142ff70d84a6ce74044a27919c850e893648f7'
release = True
if not release:
version = full_version
| 228 | 19.818182 | 57 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/__init__.py
|
"""
SciPy: A scientific computing package for Python
================================================
Documentation is available in the docstrings and
online at https://docs.scipy.org.
Contents
--------
SciPy imports all the functions from the NumPy namespace, and in
addition provides:
Subpackages
-----------
Using any of these subpackages requires an explicit import. For example,
``import scipy.cluster``.
::
cluster --- Vector Quantization / Kmeans
fftpack --- Discrete Fourier Transform algorithms
integrate --- Integration routines
interpolate --- Interpolation Tools
io --- Data input and output
linalg --- Linear algebra routines
linalg.blas --- Wrappers to BLAS library
linalg.lapack --- Wrappers to LAPACK library
misc --- Various utilities that don't have
another home.
ndimage --- n-dimensional image package
odr --- Orthogonal Distance Regression
optimize --- Optimization Tools
signal --- Signal Processing Tools
signal.windows --- Window functions
sparse --- Sparse Matrices
sparse.linalg --- Sparse Linear Algebra
sparse.linalg.dsolve --- Linear Solvers
sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library:
Conjugate Gradient Method (LOBPCG)
sparse.linalg.eigen --- Sparse Eigenvalue Solvers
sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned
Conjugate Gradient Method (LOBPCG)
spatial --- Spatial data structures and algorithms
special --- Special functions
stats --- Statistical Functions
Utility tools
-------------
::
test --- Run scipy unittests
show_config --- Show scipy build configuration
show_numpy_config --- Show numpy build configuration
__version__ --- Scipy version string
__numpy_version__ --- Numpy version string
"""
from __future__ import division, print_function, absolute_import
__all__ = ['test']
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError(
"Cannot import scipy when running from numpy source directory.")
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space
import numpy as _num
linalg = None
from numpy import *
from numpy.random import rand, randn
from numpy.fft import fft, ifft
from numpy.lib.scimath import *
# Allow distributors to run custom init code
from . import _distributor_init
__all__ += _num.__all__
__all__ += ['randn', 'rand', 'fft', 'ifft']
del _num
# Remove the linalg imported from numpy so that the scipy.linalg package can be
# imported.
del linalg
__all__.remove('linalg')
# We first need to detect if we're being called as part of the scipy
# setup procedure itself in a reliable manner.
try:
__SCIPY_SETUP__
except NameError:
__SCIPY_SETUP__ = False
if __SCIPY_SETUP__:
import sys as _sys
_sys.stderr.write('Running from scipy source directory.\n')
del _sys
else:
try:
from scipy.__config__ import show as show_config
except ImportError:
msg = """Error importing scipy: you cannot import scipy while
being in scipy source directory; please exit the scipy source
tree first, and relaunch your python interpreter."""
raise ImportError(msg)
from scipy.version import version as __version__
from scipy._lib._version import NumpyVersion as _NumpyVersion
if _NumpyVersion(__numpy_version__) < '1.8.2':
import warnings
warnings.warn("Numpy 1.8.2 or above is recommended for this version of "
"scipy (detected version %s)" % __numpy_version__,
UserWarning)
del _NumpyVersion
from scipy._lib._ccallback import LowLevelCallable
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 4,260 | 33.362903 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/__config__.py
|
# This file is generated by /private/var/folders/bb/n7t3rs157850byt_jfdcq9k80000gn/T/pip-3to8_y5h-build/-c
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
openblas_lapack_info={}
lapack_mkl_info={}
atlas_3_10_threads_info={}
atlas_3_10_info={}
atlas_threads_info={}
atlas_info={}
lapack_opt_info={'extra_compile_args': ['-msse3'], 'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)]}
blas_mkl_info={}
openblas_info={}
atlas_3_10_blas_threads_info={}
atlas_3_10_blas_info={}
atlas_blas_threads_info={}
atlas_blas_info={}
blas_opt_info={'extra_compile_args': ['-msse3', '-I/System/Library/Frameworks/vecLib.framework/Headers'], 'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)]}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| 1,381 | 38.485714 | 225 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/odepack.py
|
# Author: Travis Oliphant
from __future__ import division, print_function, absolute_import
__all__ = ['odeint']
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0, tfirst=False):
"""
Integrate a system of ordinary differential equations.
.. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
differential equation.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t, ...) [or func(t, y, ...)]
where y can be a vector.
.. note:: By default, the required order of the first two arguments of
`func` are in the opposite order of the arguments in the system
definition function used by the `scipy.integrate.ode` class and
the function `scipy.integrate.solve_ivp`. To use a function with
the signature ``func(t, y, ...)``, the argument `tfirst` must be
set to ``True``.
Parameters
----------
func : callable(y, t, ...) or callable(t, y, ...)
Computes the derivative of y at t.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t, ...) or callable(t, y, ...)
Gradient (Jacobian) of `func`.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
tfirst: bool, optional
If True, the first two arguments of `func` (and `Dfun`, if given)
must ``t, y`` instead of the default ``y, t``.
.. versionadded:: 1.1.0
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step.
'tcur' vector with the value of t reached for each time step.
(will always be at least as large as the input times).
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected.
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step.
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise.
'lenrw' the length of the double work array required.
'leniw' the length of integer work array required.
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g. singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
solve_ivp : Solve an initial value problem for a system of ODEs.
ode : a more object-oriented integrator based on VODE.
quad : for finding the area under a curve.
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and is initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We will generate a solution at 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords,
int(bool(tfirst)))
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| 10,381 | 40.694779 | 102 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/setup.py
|
from __future__ import division, print_function, absolute_import
import os
from os.path import join
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('integrate', parent_package, top_path)
# Get a local copy of lapack_opt_info
lapack_opt = dict(get_info('lapack_opt',notfound_action=2))
# Pop off the libraries list so it can be combined with
# additional required libraries
lapack_libs = lapack_opt.pop('libraries', [])
mach_src = [join('mach','*.f')]
quadpack_src = [join('quadpack', '*.f')]
lsoda_src = [join('odepack', fn) for fn in [
'blkdta000.f', 'bnorm.f', 'cfode.f',
'ewset.f', 'fnorm.f', 'intdy.f',
'lsoda.f', 'prja.f', 'solsy.f', 'srcma.f',
'stoda.f', 'vmnorm.f', 'xerrwv.f', 'xsetf.f',
'xsetun.f']]
vode_src = [join('odepack', 'vode.f'), join('odepack', 'zvode.f')]
dop_src = [join('dop','*.f')]
quadpack_test_src = [join('tests','_test_multivariate.c')]
odeint_banded_test_src = [join('tests', 'banded5x5.f')]
config.add_library('mach', sources=mach_src,
config_fc={'noopt':(__file__,1)})
config.add_library('quadpack', sources=quadpack_src)
config.add_library('lsoda', sources=lsoda_src)
config.add_library('vode', sources=vode_src)
config.add_library('dop', sources=dop_src)
# Extensions
# quadpack:
include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')]
if 'include_dirs' in lapack_opt:
lapack_opt = dict(lapack_opt)
include_dirs.extend(lapack_opt.pop('include_dirs'))
config.add_extension('_quadpack',
sources=['_quadpackmodule.c'],
libraries=['quadpack', 'mach'] + lapack_libs,
depends=(['__quadpack.h']
+ quadpack_src + mach_src),
include_dirs=include_dirs,
**lapack_opt)
# odepack/lsoda-odeint
odepack_opts = lapack_opt.copy()
odepack_opts.update(numpy_nodepr_api)
config.add_extension('_odepack',
sources=['_odepackmodule.c'],
libraries=['lsoda', 'mach'] + lapack_libs,
depends=(lsoda_src + mach_src),
**odepack_opts)
# vode
config.add_extension('vode',
sources=['vode.pyf'],
libraries=['vode'] + lapack_libs,
depends=vode_src,
**lapack_opt)
# lsoda
config.add_extension('lsoda',
sources=['lsoda.pyf'],
libraries=['lsoda', 'mach'] + lapack_libs,
depends=(lsoda_src + mach_src),
**lapack_opt)
# dop
config.add_extension('_dop',
sources=['dop.pyf'],
libraries=['dop'],
depends=dop_src)
config.add_extension('_test_multivariate',
sources=quadpack_test_src)
# Fortran+f2py extension module for testing odeint.
config.add_extension('_test_odeint_banded',
sources=odeint_banded_test_src,
libraries=['lsoda', 'mach'] + lapack_libs,
depends=(lsoda_src + mach_src),
**lapack_opt)
config.add_subpackage('_ivp')
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 3,794 | 35.84466 | 73 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/quadrature.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simps(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simps(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simps(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
====================================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
====================================================================
-0.742561336672229
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| 29,304 | 31.488914 | 103 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_bvp.py
|
"""Boundary value problem solver."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm, pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
In our case a[i, :, :] and b[i, :, :] are always square.
"""
# Empirical optimization. Use outer Python loop and BLAS for large
# matrices, otherwise use a single einsum call.
if a.shape[1] > 50:
out = np.empty_like(a)
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle: ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp: ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# We also need to control residuals of the boundary conditions. But it
# seems that they become very small eventually as the solver progresses,
# i. e. the tolerance for BC are not very important. We set it 1.5 orders
# lower than the BVP tolerance as well.
tol_bc = 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(bc_res < tol_bc)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Total nodes", "Nodes added"))
def print_iteration_progress(iteration, residual, total_nodes, nodes_added):
print("{:^15}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.rollaxis(c, 1)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# numpy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0):
"""Solve a boundary-value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-dimensional independent variable, y(x) is a n-dimensional
vector-valued function and p is a k-dimensional vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined there must be n + k boundary conditions, i.e. bc must be
(n + k)-dimensional function.
The last singular term in the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, i-th column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k) where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4-th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again we rewrite our equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Setup the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And finally plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = "({})".format(nodes_added)
print_iteration_progress(iteration, max_rms_res, m,
nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, m, nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
else:
status = 0
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}, "
"maximum relative residual {:.2e}."
.format(iteration, x.shape[0], max_rms_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}, "
"maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}, maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| 39,966 | 34.213216 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ode.py
|
# Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f, jac=None)
integrator = integrator.set_integrator(name, **params)
integrator = integrator.set_initial_value(y0, t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1, step=False, relax=False)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real valued system. It supports the real valued solvers (i.e not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
from __future__ import division, print_function, absolute_import
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz
# To wrap cvode to Python, one must write extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccessful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
__version__ = "$Id$"
__docformat__ = "restructuredtext en"
import re
import warnings
from numpy import asarray, array, zeros, int32, isscalar, real, imag, vstack
from . import vode as _vode
from . import _dop
from . import lsoda as _lsoda
# ------------------------------------------------------------------------------
# User interface
# ------------------------------------------------------------------------------
class ode(object):
"""
A generic interface class to numeric integrators.
Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
*Note*: The first two arguments of ``f(t, y, ...)`` are in the
opposite order of the arguments in the system definition function used
by `scipy.integrate.odeint`.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Right-hand side of the differential equation. t is a scalar,
``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
`f` should return a scalar, array or list (not a tuple).
jac : callable ``jac(t, y, *jac_args)``, optional
Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_jac_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Notes
-----
Available integrators are listed below. They can be selected using
the `set_integrator` method.
"vode"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "vode" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
dimension of the matrix must be (lband+uband+1, len(y)).
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
This option is only considered when the user has not supplied a
Jacobian function and has not indicated (by setting either band)
that the Jacobian is banded. In this case, `with_jacobian` specifies
whether the iteration method of the ODE solver's correction step is
chord iteration with an internally generated full Jacobian or
functional iteration with no Jacobian.
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
"zvode"
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "zvode" integrator at the same time.
This integrator accepts the same parameters in `set_integrator`
as the "vode" solver.
.. note::
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
"lsoda"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
automatic method switching between implicit Adams method (for non-stiff
problems) and a method based on backward differentiation formulas (BDF)
(for stiff problems).
Source: http://www.netlib.org/odepack
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "lsoda" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j].
- with_jacobian : bool
*Not used.*
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- max_order_ns : int
Maximum order used in the nonstiff case (default 12).
- max_order_s : int
Maximum order used in the stiff case (default 5).
- max_hnil : int
Maximum number of messages reporting too small step size (t + h = t)
(default 0)
- ixpr : int
Whether to generate extra printing at method switches (default False).
"dopri5"
This is an explicit runge-kutta method of order (4)5 due to Dormand &
Prince (with stepsize control and dense output).
Authors:
E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch
This code is described in [HNW93]_.
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step size by in one step
- beta : float
Beta parameter for stabilised step size control.
- verbosity : int
Switch for printing messages (< 0 for no messages).
"dop853"
This is an explicit runge-kutta method of order 8(5,3) due to Dormand
& Prince (with stepsize control and dense output).
Options and references the same as "dopri5".
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf')
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
... print(r.t+dt, r.integrate(r.t+dt))
1 [-0.71038232+0.23749653j 0.40000271+0.j ]
2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
6.0 [0.58643071+0.339819j 0.08000018+0.j ]
7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
References
----------
.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
"""
def __init__(self, f, jac=None):
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self._y = []
@property
def y(self):
return self._y
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self._y)
if not n_prev:
self.set_integrator('') # find first available integrator
self._y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self._y), self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
integrator_params
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self._y):
self.t = 0.0
self._y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self._y), self.jac is not None)
return self
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
try:
self._y, self.t = mth(self.f, self.jac or (lambda: None),
self._y, self.t, t,
self.f_params, self.jac_params)
except SystemError:
# f2py issue with tuple returns, see ticket 1187.
raise ValueError('Function to integrate must not return a tuple.')
return self._y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success == 1
def get_return_code(self):
"""Extracts the return code for the integration to enable better control
if the integration fails.
In general, a return code > 0 implies success while a return code < 0
implies failure.
Notes
-----
This section describes possible return codes and their meaning, for available
integrators that can be selected by `set_integrator` method.
"vode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"zvode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"dopri5"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"dop853"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"lsoda"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call (perhaps wrong Dfun type).
-2 Excess accuracy requested (tolerances too small).
-3 Illegal input detected (internal error).
-4 Repeated error test failures (internal error).
-5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
-6 Error weight became zero during problem.
-7 Internal workspace insufficient to finish (internal error).
=========== =======
"""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.istate
def set_f_params(self, *args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self, *args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout)
if self._y is not None:
self._integrator.reset(len(self._y), self.jac is not None)
else:
raise ValueError("selected integrator does not support solout,"
" choose another one")
def _transform_banded_jac(bjac):
"""
Convert a real matrix of the form (for example)
[0 0 A B] [0 0 0 B]
[0 0 C D] [0 0 A D]
[E F G H] to [0 F C H]
[I J K L] [E J G L]
[I 0 K 0]
That is, every other column is shifted up one.
"""
# Shift every other column.
newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
newjac[1:, ::2] = bjac[:, ::2]
newjac[:-1, 1::2] = bjac[:, 1::2]
return newjac
class complex_ode(ode):
"""
A wrapper of ode for complex systems.
This functions similarly as `ode`, but re-maps a complex-valued
equation system to a real-valued one before using the integrators.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
jac : callable ``jac(t, y, *jac_args)``
Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_f_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Examples
--------
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
self.cf = f
self.cjac = jac
if jac is None:
ode.__init__(self, self._wrap, None)
else:
ode.__init__(self, self._wrap, self._wrap_jac)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
# self.tmp is a real-valued array containing the interleaved
# real and imaginary parts of f.
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
# jac is the complex Jacobian computed by the user-defined function.
jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
# jac_tmp is the real version of the complex Jacobian. Each complex
# entry in jac, say 2+3j, becomes a 2x2 block of the form
# [2 -3]
# [3 2]
jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
jac_tmp[1::2, ::2] = imag(jac)
jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
ml = getattr(self._integrator, 'ml', None)
mu = getattr(self._integrator, 'mu', None)
if ml is not None or mu is not None:
# Jacobian is banded. The user's Jacobian function has computed
# the complex Jacobian in packed format. The corresponding
# real-valued version has every other column shifted up.
jac_tmp = _transform_banded_jac(jac_tmp)
return jac_tmp
@property
def y(self):
return self._y[::2] + 1j * self._y[1::2]
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
integrator_params
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode must be used with ode, not complex_ode")
lband = integrator_params.get('lband')
uband = integrator_params.get('uband')
if lband is not None or uband is not None:
# The Jacobian is banded. Override the user-supplied bandwidths
# (which are for the complex Jacobian) with the bandwidths of
# the corresponding real-valued Jacobian wrapper of the complex
# Jacobian.
integrator_params['lband'] = 2 * (lband or 0) + 1
integrator_params['uband'] = 2 * (uband or 0) + 1
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size * 2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j * y[1::2]
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout, complex=True)
else:
raise TypeError("selected integrator does not support solouta,"
+ "choose another one")
# ------------------------------------------------------------------------------
# ODE integrators
# ------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name, cl.__name__, re.I):
return cl
return None
class IntegratorConcurrencyError(RuntimeError):
"""
Failure due to concurrent usage of an integrator that can be used
only for a single problem at a time.
"""
def __init__(self, name):
msg = ("Integrator `%s` can be used to solve only a single problem "
"at a time. If you want to integrate multiple problems, "
"consider using a different integrator "
"(see `ode.set_integrator`)") % name
RuntimeError.__init__(self, msg)
class IntegratorBase(object):
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
istate = None # istate > 0 means success, istate < 0 means failure
supports_run_relax = None
supports_step = None
supports_solout = False
integrator_classes = []
scalar = float
def acquire_new_handle(self):
# Some of the integrators have internal state (ancient
# Fortran...), and so only one instance can use them at a time.
# We keep track of this, and fail when concurrent usage is tried.
self.__class__.active_global_handle += 1
self.handle = self.__class__.active_global_handle
def check_handle(self):
if self.handle is not self.__class__.active_global_handle:
raise IntegratorConcurrencyError(self.__class__.__name__)
def reset(self, n, has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)')
def step(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
# XXX: __str__ method for getting visual state of the integrator
def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
"""
Wrap a banded Jacobian function with a function that pads
the Jacobian with `ml` rows of zeros.
"""
def jac_wrapper(t, y):
jac = asarray(jacfunc(t, y, *jac_params))
padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
return padded_jac
return jac_wrapper
class vode(IntegratorBase):
runner = getattr(_vode, 'dvode', None)
messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
-2: 'Excess accuracy requested. (Tolerances too small.)',
-3: 'Illegal input detected. (See printed message.)',
-4: 'Repeated error test failures. (Check all input.)',
-5: 'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6: 'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
active_global_handle = 0
def __init__(self,
method='adams',
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
order=12,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
):
if re.match(method, r'adams', re.I):
self.meth = 1
elif re.match(method, r'bdf', re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
self.initialized = False
def _determine_mf_and_set_bands(self, has_jac):
"""
Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
In the Fortran code, the legal values of `MF` are:
10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
-11, -12, -14, -15, -21, -22, -24, -25
but this python wrapper does not use negative values.
Returns
mf = 10*self.meth + miter
self.meth is the linear multistep method:
self.meth == 1: method="adams"
self.meth == 2: method="bdf"
miter is the correction iteration method:
miter == 0: Functional iteraton; no Jacobian involved.
miter == 1: Chord iteration with user-supplied full Jacobian
miter == 2: Chord iteration with internally computed full Jacobian
miter == 3: Chord iteration with internally computed diagonal Jacobian
miter == 4: Chord iteration with user-supplied banded Jacobian
miter == 5: Chord iteration with internally computed banded Jacobian
Side effects: If either self.mu or self.ml is not None and the other is None,
then the one that is None is set to 0.
"""
jac_is_banded = self.mu is not None or self.ml is not None
if jac_is_banded:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
# has_jac is True if the user provided a jacobian function.
if has_jac:
if jac_is_banded:
miter = 4
else:
miter = 1
else:
if jac_is_banded:
if self.ml == self.mu == 0:
miter = 3 # Chord iteration with internal diagonal Jacobian.
else:
miter = 5 # Chord iteration with internal banded Jacobian.
else:
# self.with_jacobian is set by the user in the call to ode.set_integrator.
if self.with_jacobian:
miter = 2 # Chord iteration with internal full Jacobian.
else:
miter = 0 # Functional iteraton; no Jacobian involved.
mf = 10 * self.meth + miter
return mf
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf == 10:
lrw = 20 + 16 * n
elif mf in [11, 12]:
lrw = 22 + 16 * n + 2 * n * n
elif mf == 13:
lrw = 22 + 17 * n
elif mf in [14, 15]:
lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
elif mf == 20:
lrw = 20 + 9 * n
elif mf in [21, 22]:
lrw = 22 + 9 * n + 2 * n * n
elif mf == 23:
lrw = 22 + 10 * n
elif mf in [24, 25]:
lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
else:
raise ValueError('Unexpected mf=%s' % mf)
if mf % 10 in [0, 3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
if self.ml is not None and self.ml > 0:
# Banded Jacobian. Wrap the user-provided function with one
# that pads the Jacobian array with the extra `self.ml` rows
# required by the f2py-generated wrapper.
jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
(f_params, jac_params))
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode, 'zvode', None)
supports_run_relax = 1
supports_step = 1
scalar = complex
active_global_handle = 0
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf in (10,):
lzw = 15 * n
elif mf in (11, 12):
lzw = 15 * n + 2 * n ** 2
elif mf in (-11, -12):
lzw = 15 * n + n ** 2
elif mf in (13,):
lzw = 16 * n
elif mf in (14, 15):
lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-14, -15):
lzw = 16 * n + (2 * self.ml + self.mu) * n
elif mf in (20,):
lzw = 8 * n
elif mf in (21, 22):
lzw = 8 * n + 2 * n ** 2
elif mf in (-21, -22):
lzw = 8 * n + n ** 2
elif mf in (23,):
lzw = 9 * n
elif mf in (24, 25):
lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-24, -25):
lzw = 9 * n + (2 * self.ml + self.mu) * n
lrw = 20 + n
if mf % 10 in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.zwork, self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
supports_solout = True
messages = {1: 'computation successful',
2: 'comput. successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nsteps is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.verbosity = verbosity
self.success = 1
self.set_solout(None)
def set_solout(self, solout, complex=False):
self.solout = solout
self.solout_cmplx = complex
if solout is None:
self.iout = 0
else:
self.iout = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
return y, x
def _solout(self, nr, xold, x, y, nd, icomp, con):
if self.solout is not None:
if self.solout_cmplx:
y = y[::2] + 1j * y[1::2]
return self.solout(x, y)
else:
return 1
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop, 'dop853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
super(self.__class__, self).__init__(rtol, atol, nsteps, max_step,
first_step, safety, ifactor,
dfactor, beta, method,
verbosity)
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
class lsoda(IntegratorBase):
runner = getattr(_lsoda, 'lsoda', None)
active_global_handle = 0
messages = {
2: "Integration successful.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def __init__(self,
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
ixpr=0,
max_hnil=0,
max_order_ns=12,
max_order_s=5,
method=None
):
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.max_order_ns = max_order_ns
self.max_order_s = max_order_s
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.ixpr = ixpr
self.max_hnil = max_hnil
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
jt = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 4
else:
if self.mu is None and self.ml is None:
jt = 2
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 5
lrn = 20 + (self.max_order_ns + 4) * n
if jt in [1, 2]:
lrs = 22 + (self.max_order_s + 4) * n + n * n
elif jt in [4, 5]:
lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
else:
raise ValueError('Unexpected jt=%s' % jt)
lrw = max(lrn, lrs)
liw = 20 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.ixpr
iwork[5] = self.nsteps
iwork[6] = self.max_hnil
iwork[7] = self.max_order_ns
iwork[8] = self.max_order_s
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, jt]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
args = [f, y0, t0, t1] + self.call_args[:-1] + \
[jac, self.call_args[-1], f_params, 0, jac_params]
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if lsoda.runner:
IntegratorBase.integrator_classes.append(lsoda)
| 48,002 | 33.987609 | 90 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/__init__.py
|
"""
=============================================
Integration and ODEs (:mod:`scipy.integrate`)
=============================================
.. currentmodule:: scipy.integrate
Integrating functions, given function object
============================================
.. autosummary::
:toctree: generated/
quad -- General purpose integration
dblquad -- General purpose double integration
tplquad -- General purpose triple integration
nquad -- General purpose n-dimensional integration
fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
quadrature -- Integrate with given tolerance using Gaussian quadrature
romberg -- Integrate func using Romberg integration
quad_explain -- Print information for use of quad
newton_cotes -- Weights and error coefficient for Newton-Cotes integration
IntegrationWarning -- Warning on issues during integration
Integrating functions, given fixed samples
==========================================
.. autosummary::
:toctree: generated/
trapz -- Use trapezoidal rule to compute integral.
cumtrapz -- Use trapezoidal rule to cumulatively compute integral.
simps -- Use Simpson's rule to compute integral from samples.
romb -- Use Romberg Integration to compute integral from
-- (2**k + 1) evenly-spaced samples.
.. seealso::
:mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Solving initial value problems for ODE systems
==============================================
The solvers are implemented as individual classes which can be used directly
(low-level usage) or through a convenience function.
.. autosummary::
:toctree: generated/
solve_ivp -- Convenient function for ODE integration.
RK23 -- Explicit Runge-Kutta solver of order 3(2).
RK45 -- Explicit Runge-Kutta solver of order 5(4).
Radau -- Implicit Runge-Kutta solver of order 5.
BDF -- Implicit multi-step variable order (1 to 5) solver.
LSODA -- LSODA solver from ODEPACK Fortran package.
OdeSolver -- Base class for ODE solvers.
DenseOutput -- Local interpolant for computing a dense output.
OdeSolution -- Class which represents a continuous ODE solution.
Old API
-------
These are the routines developed earlier for scipy. They wrap older solvers
implemented in Fortran (mostly ODEPACK). While the interface to them is not
particularly convenient and certain features are missing compared to the new
API, the solvers themselves are of good quality and work fast as compiled
Fortran code. In some cases it might be worth using this old API.
.. autosummary::
:toctree: generated/
odeint -- General integration of ordinary differential equations.
ode -- Integrate ODE using VODE and ZVODE routines.
complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
Solving boundary value problems for ODE systems
===============================================
.. autosummary::
:toctree: generated/
solve_bvp -- Solve a boundary value problem for a system of ODEs.
"""
from __future__ import division, print_function, absolute_import
from .quadrature import *
from .odepack import *
from .quadpack import *
from ._ode import *
from ._bvp import solve_bvp
from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
OdeSolver, RK23, RK45, Radau, BDF, LSODA)
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 3,725 | 35.529412 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/quadpack.py
|
# Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
from __future__ import division, print_function, absolute_import
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance.
epsrel : float or int, optional
Relative error tolerance.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e. it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions. The possible values of weight and the corresponding
weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == Inf or a == Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'errabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
msg = ("Sine or cosine weighted intergals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = ("Number of break points ({:d})"
" must be less than subinterval"
" limit ({:d})").format(len(points), limit)
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
#Duplicates force function evaluation at sinular points
the_points = numpy.unique(points)
the_points = the_points[a < the_points]
the_points = the_points[the_points < b]
the_points = numpy.concatenate((the_points, (0., 0.)))
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight.startswith('alg'):
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1)
(0.6666666666666667, 7.401486830834377e-15)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : function or float
The upper boundary curve in y (same requirements as `gfun`).
qfun : function or float
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float or a float
indicating a constant boundary surface.
rfun : function or float
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simps: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
Examples
--------
Compute the triple integral of ``x * y * z``, over ``x`` ranging
from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 1, 2, lambda x: 2, lambda x: 3,
... lambda x, y: 0, lambda x, y: 1)
(1.8750000000000002, 3.324644794257407e-14)
"""
# f(z, y, x)
# qfun/rfun (x, y)
# gfun/hfun(x)
# nquad will hand (y, x, t0, ...) to ranges0
# nquad will hand (x, t0, ...) to ranges1
# Stupid different API...
def ranges0(*args):
return [qfun(args[1], args[0]) if callable(qfun) else qfun,
rfun(args[1], args[0]) if callable(rfun) else rfun]
def ranges1(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def nquad(func, ranges, args=None, opts=None, full_output=False):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : {callable, scipy.LowLevelCallable}
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, tm``, where integration is carried out over ``x0, ... xn``, which
must be floats. Function signature should be
``func(x0, x1, ..., xn, t0, t1, ..., tm)``. Integration is carried out
in order. That is, integration over ``x0`` is the innermost integral,
and ``xn`` is the outermost.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
where ``n`` is the number of extra parameters and args is an array
of doubles of the additional parameters, the ``xx`` array contains the
coordinates. The ``user_data`` is the data contained in the
`scipy.LowLevelCallable`.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available,
as well as any parametric arguments. e.g. if
``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`, `ranges`, and
``opts``.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quad are used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.
opts[0] corresponds to integration over x0, and so on. If a callable,
the signature must be the same as for ``ranges``. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
For more information on these options, see `quad` and `quad_explain`.
full_output : bool, optional
Partial implementation of ``full_output`` from scipy.integrate.quad.
The number of integrand function evaluations ``neval`` can be obtained
by setting ``full_output=True`` when calling nquad.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
out_dict : dict, optional
A dict containing additional information on the integration.
See Also
--------
quad : 1-dimensional numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> points = [[lambda x1,x2,x3 : 0.2*x3 + 0.5 + 0.25*x1], [], [], []]
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}], full_output=True)
(1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts, full_output).integrate(*args)
class _RangeFunc(object):
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad(object):
def __init__(self, func, ranges, opts, full_output):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
self.full_output = full_output
if self.full_output:
self.out_dict = {'neval': 0}
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
quad_r = quad(f, low, high, args=args, full_output=self.full_output,
**opt)
value = quad_r[0]
abserr = quad_r[1]
if self.full_output:
infodict = quad_r[2]
# The 'neval' parameter in full_output returns the total
# number of times the integrand function was evaluated.
# Therefore, only the innermost integration loop counts.
if depth + 1 == self.maxdepth:
self.out_dict['neval'] += infodict['neval']
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of n-D integration with error
if self.full_output:
return value, self.abserr, self.out_dict
else:
return value, self.abserr
| 36,157 | 40.135381 | 468 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/test_bvp.py
|
from __future__ import division, print_function, absolute_import
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from numpy.testing import (assert_, assert_array_equal, assert_allclose,
assert_equal)
from pytest import raises as assert_raises
from scipy.sparse import coo_matrix
from scipy.special import erf
from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
estimate_bc_jac, compute_jac_indices,
construct_global_jac, solve_bvp)
def exp_fun(x, y):
return np.vstack((y[1], y[0]))
def exp_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = 1
df_dy[1, 1] = 0
return df_dy
def exp_bc(ya, yb):
return np.hstack((ya[0] - 1, yb[0]))
def exp_bc_complex(ya, yb):
return np.hstack((ya[0] - 1 - 1j, yb[0]))
def exp_bc_jac(ya, yb):
dbc_dya = np.array([
[1, 0],
[0, 0]
])
dbc_dyb = np.array([
[0, 0],
[1, 0]
])
return dbc_dya, dbc_dyb
def exp_sol(x):
return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2))
def sl_fun(x, y, p):
return np.vstack((y[1], -p[0]**2 * y[0]))
def sl_fun_jac(x, y, p):
n, m = y.shape
df_dy = np.empty((n, 2, m))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = -p[0]**2
df_dy[1, 1] = 0
df_dp = np.empty((n, 1, m))
df_dp[0, 0] = 0
df_dp[1, 0] = -2 * p[0] * y[0]
return df_dy, df_dp
def sl_bc(ya, yb, p):
return np.hstack((ya[0], yb[0], ya[1] - p[0]))
def sl_bc_jac(ya, yb, p):
dbc_dya = np.zeros((3, 2))
dbc_dya[0, 0] = 1
dbc_dya[2, 1] = 1
dbc_dyb = np.zeros((3, 2))
dbc_dyb[1, 0] = 1
dbc_dp = np.zeros((3, 1))
dbc_dp[2, 0] = -1
return dbc_dya, dbc_dyb, dbc_dp
def sl_sol(x, p):
return np.sin(p[0] * x)
def emden_fun(x, y):
return np.vstack((y[1], -y[0]**5))
def emden_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = -5 * y[0]**4
df_dy[1, 1] = 0
return df_dy
def emden_bc(ya, yb):
return np.array([ya[1], yb[0] - (3/4)**0.5])
def emden_bc_jac(ya, yb):
dbc_dya = np.array([
[0, 1],
[0, 0]
])
dbc_dyb = np.array([
[0, 0],
[1, 0]
])
return dbc_dya, dbc_dyb
def emden_sol(x):
return (1 + x**2/3)**-0.5
def undefined_fun(x, y):
return np.zeros_like(y)
def undefined_bc(ya, yb):
return np.array([ya[0], yb[0] - 1])
def big_fun(x, y):
f = np.zeros_like(y)
f[::2] = y[1::2]
return f
def big_bc(ya, yb):
return np.hstack((ya[::2], yb[::2] - 1))
def big_sol(x, n):
y = np.ones((2 * n, x.size))
y[::2] = x
return x
def shock_fun(x, y):
eps = 1e-3
return np.vstack((
y[1],
-(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) +
np.pi * x * np.sin(np.pi * x)) / eps
))
def shock_bc(ya, yb):
return np.array([ya[0] + 2, yb[0]])
def shock_sol(x):
eps = 1e-3
k = np.sqrt(2 * eps)
return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
def test_modify_mesh():
x = np.array([0, 1, 3, 9], dtype=float)
x_new = modify_mesh(x, np.array([0]), np.array([2]))
assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9]))
x = np.array([-6, -3, 0, 3, 6], dtype=float)
x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3]))
assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6])
def test_compute_fun_jac():
x = np.linspace(0, 1, 5)
y = np.empty((2, x.shape[0]))
y[0] = 0.01
y[1] = 0.02
p = np.array([])
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p)
df_dy_an = exp_fun_jac(x, y)
assert_allclose(df_dy, df_dy_an)
assert_(df_dp is None)
x = np.linspace(0, np.pi, 5)
y = np.empty((2, x.shape[0]))
y[0] = np.sin(x)
y[1] = np.cos(x)
p = np.array([1.0])
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
df_dy_an, df_dp_an = sl_fun_jac(x, y, p)
assert_allclose(df_dy, df_dy_an)
assert_allclose(df_dp, df_dp_an)
x = np.linspace(0, 1, 10)
y = np.empty((2, x.shape[0]))
y[0] = (3/4)**0.5
y[1] = 1e-4
p = np.array([])
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p)
df_dy_an = emden_fun_jac(x, y)
assert_allclose(df_dy, df_dy_an)
assert_(df_dp is None)
def test_compute_bc_jac():
ya = np.array([-1.0, 2])
yb = np.array([0.5, 3])
p = np.array([])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p)
dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_(dbc_dp is None)
ya = np.array([0.0, 1])
yb = np.array([0.0, -1])
p = np.array([0.5])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p)
dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_allclose(dbc_dp, dbc_dp_an)
ya = np.array([0.5, 100])
yb = np.array([-1000, 10.5])
p = np.array([])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p)
dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_(dbc_dp is None)
def test_compute_jac_indices():
n = 2
m = 4
k = 2
i, j = compute_jac_indices(n, m, k)
s = coo_matrix((np.ones_like(i), (i, j))).toarray()
s_true = np.array([
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
])
assert_array_equal(s, s_true)
def test_compute_global_jac():
n = 2
m = 5
k = 1
i_jac, j_jac = compute_jac_indices(2, 5, 1)
x = np.linspace(0, 1, 5)
h = np.diff(x)
y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x)))
p = np.array([3.0])
f = sl_fun(x, y, p)
x_middle = x[:-1] + 0.5 * h
y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1])
df_dy, df_dp = sl_fun_jac(x, y, p)
df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p)
dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p)
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
J = J.toarray()
def J_block(h, p):
return np.array([
[h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h],
[0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12]
])
J_true = np.zeros((m * n + k, m * n + k))
for i in range(m - 1):
J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p)
J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:])
J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) +
h**2/6 * (y[1, :-1] - y[1, 1:]))
J_true[8, 0] = 1
J_true[9, 8] = 1
J_true[10, 1] = 1
J_true[10, 10] = -1
assert_allclose(J, J_true, rtol=1e-10)
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p)
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p)
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
J = J.toarray()
assert_allclose(J, J_true, rtol=1e-8, atol=1e-9)
def test_parameter_validation():
x = [0, 1, 0.5]
y = np.zeros((2, 3))
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
x = np.linspace(0, 1, 5)
y = np.zeros((2, 4))
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
fun = lambda x, y, p: exp_fun(x, y)
bc = lambda ya, yb, p: exp_bc(ya, yb)
y = np.zeros((2, x.shape[0]))
assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1])
def wrong_shape_fun(x, y):
return np.zeros(3)
assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y)
S = np.array([[0, 0]])
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S)
def test_no_params():
x = np.linspace(0, 1, 5)
x_test = np.linspace(0, 1, 100)
y = np.zeros((2, x.shape[0]))
for fun_jac in [None, exp_fun_jac]:
for bc_jac in [None, exp_bc_jac]:
sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_equal(sol.x.size, 5)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5)
f_test = exp_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res**2, axis=0)**0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_with_params():
x = np.linspace(0, np.pi, 5)
x_test = np.linspace(0, np.pi, 100)
y = np.ones((2, x.shape[0]))
for fun_jac in [None, sl_fun_jac]:
for bc_jac in [None, sl_bc_jac]:
sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_(sol.x.size < 10)
assert_allclose(sol.p, [1], rtol=1e-4)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], sl_sol(x_test, [1]),
rtol=1e-4, atol=1e-4)
f_test = sl_fun(x_test, sol_test, [1])
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_singular_term():
x = np.linspace(0, 1, 10)
x_test = np.linspace(0.05, 1, 100)
y = np.empty((2, 10))
y[0] = (3/4)**0.5
y[1] = 1e-4
S = np.array([[0, 0], [0, -2]])
for fun_jac in [None, emden_fun_jac]:
for bc_jac in [None, emden_bc_jac]:
sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_equal(sol.x.size, 10)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5)
f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_complex():
# The test is essentially the same as test_no_params, but boundary
# conditions are turned into complex.
x = np.linspace(0, 1, 5)
x_test = np.linspace(0, 1, 100)
y = np.zeros((2, x.shape[0]), dtype=complex)
for fun_jac in [None, exp_fun_jac]:
for bc_jac in [None, exp_bc_jac]:
sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5)
assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5)
f_test = exp_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(np.real(rel_res * np.conj(rel_res)),
axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_failures():
x = np.linspace(0, 1, 2)
y = np.zeros((2, x.size))
res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5)
assert_equal(res.status, 1)
assert_(not res.success)
x = np.linspace(0, 1, 5)
y = np.zeros((2, x.size))
res = solve_bvp(undefined_fun, undefined_bc, x, y)
assert_equal(res.status, 2)
assert_(not res.success)
def test_big_problem():
n = 30
x = np.linspace(0, 1, 5)
y = np.zeros((2 * n, x.size))
sol = solve_bvp(big_fun, big_bc, x, y)
assert_equal(sol.status, 0)
assert_(sol.success)
sol_test = sol.sol(x)
assert_allclose(sol_test[0], big_sol(x, n))
f_test = big_fun(x, sol_test)
r = sol.sol(x, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_shock_layer():
x = np.linspace(-1, 1, 5)
x_test = np.linspace(-1, 1, 100)
y = np.zeros((2, x.size))
sol = solve_bvp(shock_fun, shock_bc, x, y)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_(sol.x.size < 110)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5)
f_test = shock_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_verbose():
# Smoke test that checks the printing does something and does not crash
x = np.linspace(0, 1, 5)
y = np.zeros((2, x.shape[0]))
for verbose in [0, 1, 2]:
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose)
text = sys.stdout.getvalue()
finally:
sys.stdout = old_stdout
assert_(sol.success)
if verbose == 0:
assert_(not text, text)
if verbose >= 1:
assert_("Solved in" in text, text)
if verbose >= 2:
assert_("Max residual" in text, text)
| 15,523 | 27.021661 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/banded5x5.f
|
c banded5x5.f
c
c This Fortran library contains implementations of the
c differential equation
c dy/dt = A*y
c where A is a 5x5 banded matrix (see below for the actual
c values). These functions will be used to test
c scipy.integrate.odeint.
c
c The idea is to solve the system two ways: pure Fortran, and
c using odeint. The "pure Fortran" solver is implemented in
c the subroutine banded5x5_solve below. It calls LSODA to
c solve the system.
c
c To solve the same system using odeint, the functions in this
c file are given a python wrapper using f2py. Then the code
c in test_odeint_jac.py uses the wrapper to implement the
c equation and Jacobian functions required by odeint. Because
c those functions ultimately call the Fortran routines defined
c in this file, the two method (pure Fortran and odeint) should
c produce exactly the same results. (That's assuming floating
c point calculations are deterministic, which can be an
c incorrect assumption.) If we simply re-implemented the
c equation and Jacobian functions using just python and numpy,
c the floating point calculations would not be performed in
c the same sequence as in the Fortran code, and we would obtain
c different answers. The answer for either method would be
c numerically "correct", but the errors would be different,
c and the counts of function and Jacobian evaluations would
c likely be different.
c
block data jacobian
implicit none
double precision bands
dimension bands(4,5)
common /jac/ bands
c The data for a banded Jacobian stored in packed banded
c format. The full Jacobian is
c
c -1, 0.25, 0, 0, 0
c 0.25, -5, 0.25, 0, 0
c 0.10, 0.25, -25, 0.25, 0
c 0, 0.10, 0.25, -125, 0.25
c 0, 0, 0.10, 0.25, -625
c
c The columns in the following layout of numbers are
c the upper diagonal, main diagonal and two lower diagonals
c (i.e. each row in the layout is a column of the packed
c banded Jacobian). The values 0.00D0 are in the "don't
c care" positions.
data bands/
+ 0.00D0, -1.0D0, 0.25D0, 0.10D0,
+ 0.25D0, -5.0D0, 0.25D0, 0.10D0,
+ 0.25D0, -25.0D0, 0.25D0, 0.10D0,
+ 0.25D0, -125.0D0, 0.25D0, 0.00D0,
+ 0.25D0, -625.0D0, 0.00D0, 0.00D0
+ /
end
subroutine getbands(jac)
double precision jac
dimension jac(4, 5)
cf2py intent(out) jac
double precision bands
dimension bands(4,5)
common /jac/ bands
integer i, j
do 5 i = 1, 4
do 5 j = 1, 5
jac(i, j) = bands(i, j)
5 continue
return
end
c
c Differential equations, right-hand-side
c
subroutine banded5x5(n, t, y, f)
implicit none
integer n
double precision t, y, f
dimension y(n), f(n)
double precision bands
dimension bands(4,5)
common /jac/ bands
f(1) = bands(2,1)*y(1) + bands(1,2)*y(2)
f(2) = bands(3,1)*y(1) + bands(2,2)*y(2) + bands(1,3)*y(3)
f(3) = bands(4,1)*y(1) + bands(3,2)*y(2) + bands(2,3)*y(3)
+ + bands(1,4)*y(4)
f(4) = bands(4,2)*y(2) + bands(3,3)*y(3) + bands(2,4)*y(4)
+ + bands(1,5)*y(5)
f(5) = bands(4,3)*y(3) + bands(3,4)*y(4) + bands(2,5)*y(5)
return
end
c
c Jacobian
c
c The subroutine assumes that the full Jacobian is to be computed.
c ml and mu are ignored, and nrowpd is assumed to be n.
c
subroutine banded5x5_jac(n, t, y, ml, mu, jac, nrowpd)
implicit none
integer n, ml, mu, nrowpd
double precision t, y, jac
dimension y(n), jac(nrowpd, n)
integer i, j
double precision bands
dimension bands(4,5)
common /jac/ bands
do 15 i = 1, 4
do 15 j = 1, 5
if ((i - j) .gt. 0) then
jac(i - j, j) = bands(i, j)
end if
15 continue
return
end
c
c Banded Jacobian
c
c ml = 2, mu = 1
c
subroutine banded5x5_bjac(n, t, y, ml, mu, bjac, nrowpd)
implicit none
integer n, ml, mu, nrowpd
double precision t, y, bjac
dimension y(5), bjac(nrowpd, n)
integer i, j
double precision bands
dimension bands(4,5)
common /jac/ bands
do 20 i = 1, 4
do 20 j = 1, 5
bjac(i, j) = bands(i, j)
20 continue
return
end
subroutine banded5x5_solve(y, nsteps, dt, jt, nst, nfe, nje)
c jt is the Jacobian type:
c jt = 1 Use the full Jacobian.
c jt = 4 Use the banded Jacobian.
c nst, nfe and nje are outputs:
c nst: Total number of internal steps
c nfe: Total number of function (i.e. right-hand-side)
c evaluations
c nje: Total number of Jacobian evaluations
implicit none
external banded5x5
external banded5x5_jac
external banded5x5_bjac
external LSODA
c Arguments...
double precision y, dt
integer nsteps, jt, nst, nfe, nje
cf2py intent(inout) y
cf2py intent(in) nsteps, dt, jt
cf2py intent(out) nst, nfe, nje
c Local variables...
double precision atol, rtol, t, tout, rwork
integer iwork
dimension y(5), rwork(500), iwork(500)
integer neq, i
integer itol, iopt, itask, istate, lrw, liw
c Common block...
double precision jacband
dimension jacband(4,5)
common /jac/ jacband
c --- t range ---
t = 0.0D0
c --- Solver tolerances ---
rtol = 1.0D-11
atol = 1.0D-13
itol = 1
c --- Other LSODA parameters ---
neq = 5
itask = 1
istate = 1
iopt = 0
iwork(1) = 2
iwork(2) = 1
lrw = 500
liw = 500
c --- Call LSODA in a loop to compute the solution ---
do 40 i = 1, nsteps
tout = i*dt
if (jt .eq. 1) then
call LSODA(banded5x5, neq, y, t, tout,
& itol, rtol, atol, itask, istate, iopt,
& rwork, lrw, iwork, liw,
& banded5x5_jac, jt)
else
call LSODA(banded5x5, neq, y, t, tout,
& itol, rtol, atol, itask, istate, iopt,
& rwork, lrw, iwork, liw,
& banded5x5_bjac, jt)
end if
40 if (istate .lt. 0) goto 80
nst = iwork(11)
nfe = iwork(12)
nje = iwork(13)
return
80 write (6,89) istate
89 format(1X,"Error: istate=",I3)
return
end
| 6,668 | 26.672199 | 66 |
f
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/test_quadpack.py
|
from __future__ import division, print_function, absolute_import
import sys
import math
import numpy as np
from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf
from numpy.testing import (assert_,
assert_allclose, assert_array_less, assert_almost_equal)
import pytest
from pytest import raises as assert_raises
from scipy.integrate import quad, dblquad, tplquad, nquad
from scipy._lib.six import xrange
from scipy._lib._ccallback import LowLevelCallable
import ctypes
import ctypes.util
from scipy._lib._ccallback_c import sine_ctypes
import scipy.integrate._test_multivariate as clib_test
def assert_quad(value_and_err, tabled_value, errTol=1.5e-8):
value, err = value_and_err
assert_allclose(value, tabled_value, atol=err, rtol=0)
if errTol is not None:
assert_array_less(err, errTol)
def get_clib_test_routine(name, restype, *argtypes):
ptr = getattr(clib_test, name)
return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes))
class TestCtypesQuad(object):
def setup_method(self):
if sys.platform == 'win32':
if sys.version_info < (3, 5):
files = [ctypes.util.find_msvcrt()]
else:
files = ['api-ms-win-crt-math-l1-1-0.dll']
elif sys.platform == 'darwin':
files = ['libm.dylib']
else:
files = ['libm.so', 'libm.so.6']
for file in files:
try:
self.lib = ctypes.CDLL(file)
break
except OSError:
pass
else:
# This test doesn't work on some Linux platforms (Fedora for
# example) that put an ld script in libm.so - see gh-5370
self.skipTest("Ctypes can't import libm.so")
restype = ctypes.c_double
argtypes = (ctypes.c_double,)
for name in ['sin', 'cos', 'tan']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
def test_typical(self):
assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
def test_ctypes_sine(self):
quad(LowLevelCallable(sine_ctypes), 0, 1)
def test_ctypes_variants(self):
sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double,
ctypes.c_double, ctypes.c_void_p)
sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double,
ctypes.c_int, ctypes.POINTER(ctypes.c_double),
ctypes.c_void_p)
sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double,
ctypes.c_double)
sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double,
ctypes.c_int, ctypes.POINTER(ctypes.c_double))
sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double,
ctypes.c_int, ctypes.c_double)
all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
legacy_sigs = [sin_2, sin_4]
legacy_only_sigs = [sin_4]
# LowLevelCallables work for new signatures
for j, func in enumerate(all_sigs):
callback = LowLevelCallable(func)
if func in legacy_only_sigs:
assert_raises(ValueError, quad, callback, 0, pi)
else:
assert_allclose(quad(callback, 0, pi)[0], 2.0)
# Plain ctypes items work only for legacy signatures
for j, func in enumerate(legacy_sigs):
if func in legacy_sigs:
assert_allclose(quad(func, 0, pi)[0], 2.0)
else:
assert_raises(ValueError, quad, func, 0, pi)
class TestMultivariateCtypesQuad(object):
def setup_method(self):
restype = ctypes.c_double
argtypes = (ctypes.c_int, ctypes.c_double)
for name in ['_multivariate_typical', '_multivariate_indefinite',
'_multivariate_sin']:
func = get_clib_test_routine(name, restype, *argtypes)
setattr(self, name, func)
def test_typical(self):
# 1) Typical function with two extra arguments:
assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
assert_quad(quad(self._multivariate_indefinite, 0, Inf),
0.577215664901532860606512)
def test_threadsafety(self):
# Ensure multivariate ctypes are threadsafe
def threadsafety(y):
return y + quad(self._multivariate_sin, 0, 1)[0]
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
class TestQuad(object):
def test_typical(self):
# 1) Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
def myfunc(x): # Euler's constant integrand
return -exp(-x)*log(x)
assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512)
def test_singular(self):
# 3) Singular points in region of integration.
def myfunc(x):
if 0 < x < 2.5:
return sin(x)
elif 2.5 <= x <= 5.0:
return exp(-x)
else:
return 0.0
assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
1 - cos(2.5) + exp(-2.5) - exp(-5.0))
def test_sine_weighted_finite(self):
# 4) Sine weighted integral (finite limits)
def myfunc(x, a):
return exp(a*(x-1))
ome = 2.0**3.4
assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
(20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
def test_sine_weighted_infinite(self):
# 5) Sine weighted integral (infinite limits)
def myfunc(x, a):
return exp(-x*a)
a = 4.0
ome = 3.0
assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome),
ome/(a**2 + ome**2))
def test_cosine_weighted_infinite(self):
# 6) Cosine weighted integral (negative infinite limits)
def myfunc(x, a):
return exp(x*a)
a = 2.5
ome = 2.3
assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome),
a/(a**2 + ome**2))
def test_algebraic_log_weight(self):
# 6) Algebraic-logarithmic weight.
def myfunc(x, a):
return 1/(1+x+2**(-a))
a = 1.5
assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
wvar=(-0.5, -0.5)),
pi/sqrt((1+2**(-a))**2 - 1))
def test_cauchypv_weight(self):
# 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
def myfunc(x, a):
return 2.0**(-a)/((x-1)**2+4.0**(-a))
a = 0.4
tabledValue = ((2.0**(-0.4)*log(1.5) -
2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
arctan(2.0**(a+2)) -
arctan(2.0**a)) /
(4.0**(-a) + 1))
assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
tabledValue, errTol=1.9e-8)
def test_b_less_than_a(self):
def f(x, p, q):
return p * np.exp(-q*x)
val_1, err_1 = quad(f, 0, np.inf, args=(2, 3))
val_2, err_2 = quad(f, np.inf, 0, args=(2, 3))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_2(self):
def f(x, s):
return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s)
val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,))
val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_3(self):
def f(x):
return 1.0
val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0))
val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_full_output(self):
def f(x):
return 1.0
res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True)
res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True)
err = max(res_1[1], res_2[1])
assert_allclose(res_1[0], -res_2[0], atol=err)
def test_double_integral(self):
# 8) Double Integral test
def simpfunc(y, x): # Note order of arguments.
return x+y
a, b = 1.0, 2.0
assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
5/6.0 * (b**3.0-a**3.0))
def test_double_integral2(self):
def func(x0, x1, t0, t1):
return x0 + x1 + t0 + t1
g = lambda x: x
h = lambda x: 2 * x
args = 1, 2
assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
def test_double_integral3(self):
def func(x0, x1):
return x0 + x1 + 1 + 2
assert_quad(dblquad(func, 1, 2, 1, 2),6.)
def test_triple_integral(self):
# 9) Triple Integral test
def simpfunc(z, y, x, t): # Note order of arguments.
return (x+y+z)*t
a, b = 1.0, 2.0
assert_quad(tplquad(simpfunc, a, b,
lambda x: x, lambda x: 2*x,
lambda x, y: x - y, lambda x, y: x + y,
(2.,)),
2*8/3.0 * (b**4.0 - a**4.0))
class TestNQuad(object):
def test_fixed_limits(self):
def func1(x0, x1, x2, x3):
val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
(1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
return val
def opts_basic(*args):
return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
opts=[opts_basic, {}, {}, {}], full_output=True)
assert_quad(res[:-1], 1.5267454070738635)
assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
def test_variable_limits(self):
scale = .1
def func2(x0, x1, x2, x3, t0, t1):
val = (x0*x1*x3**2 + np.sin(x2) + 1 +
(1 if x0 + t1*x1 - t0 > 0 else 0))
return val
def lim0(x1, x2, x3, t0, t1):
return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
def lim1(x2, x3, t0, t1):
return [scale * (t0*x2 + t1*x3) - 1,
scale * (t0*x2 + t1*x3) + 1]
def lim2(x3, t0, t1):
return [scale * (x3 + t0**2*t1**3) - 1,
scale * (x3 + t0**2*t1**3) + 1]
def lim3(t0, t1):
return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
def opts0(x1, x2, x3, t0, t1):
return {'points': [t0 - t1*x1]}
def opts1(x2, x3, t0, t1):
return {}
def opts2(x3, t0, t1):
return {}
def opts3(t0, t1):
return {}
res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
opts=[opts0, opts1, opts2, opts3])
assert_quad(res, 25.066666666666663)
def test_square_separate_ranges_and_opts(self):
def f(y, x):
return 1.0
assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
def test_square_aliased_ranges_and_opts(self):
def f(y, x):
return 1.0
r = [-1, 1]
opt = {}
assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
def test_square_separate_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range0(*args):
return (-1, 1)
def fn_range1(*args):
return (-1, 1)
def fn_opt0(*args):
return {}
def fn_opt1(*args):
return {}
ranges = [fn_range0, fn_range1]
opts = [fn_opt0, fn_opt1]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_square_aliased_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range(*args):
return (-1, 1)
def fn_opt(*args):
return {}
ranges = [fn_range, fn_range]
opts = [fn_opt, fn_opt]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_matching_quad(self):
def func(x):
return x**2 + 1
res, reserr = quad(func, 0, 4)
res2, reserr2 = nquad(func, ranges=[[0, 4]])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_dblquad(self):
def func2d(x0, x1):
return x0**2 + x1**3 - x0 * x1 + 1
res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_tplquad(self):
def func3d(x0, x1, x2, c0, c1):
return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
lambda x, y: -np.pi, lambda x, y: np.pi,
args=(2, 3))
res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
assert_almost_equal(res, res2)
def test_dict_as_opts(self):
try:
out = nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
except(TypeError):
assert False
| 14,086 | 32.620525 | 86 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/_test_multivariate.c
|
#include <Python.h>
#include "math.h"
const double PI = 3.141592653589793238462643383279502884;
static double
_multivariate_typical(int n, double *args)
{
return cos(args[1] * args[0] - args[2] * sin(args[0])) / PI;
}
static double
_multivariate_indefinite(int n, double *args)
{
return -exp(-args[0]) * log(args[0]);
}
static double
_multivariate_sin(int n, double *args)
{
return sin(args[0]);
}
static double
_sin_0(double x, void *user_data)
{
return sin(x);
}
static double
_sin_1(int ndim, double *x, void *user_data)
{
return sin(x[0]);
}
static double
_sin_2(double x)
{
return sin(x);
}
static double
_sin_3(int ndim, double *x)
{
return sin(x[0]);
}
typedef struct {
char *name;
void *ptr;
} routine_t;
static const routine_t routines[] = {
{"_multivariate_typical", &_multivariate_typical},
{"_multivariate_indefinite", &_multivariate_indefinite},
{"_multivariate_sin", &_multivariate_sin},
{"_sin_0", &_sin_0},
{"_sin_1", &_sin_1},
{"_sin_2", &_sin_2},
{"_sin_3", &_sin_3}
};
static int create_pointers(PyObject *module)
{
PyObject *d, *obj = NULL;
int i;
d = PyModule_GetDict(module);
if (d == NULL) {
goto fail;
}
for (i = 0; i < sizeof(routines) / sizeof(routine_t); ++i) {
obj = PyLong_FromVoidPtr(routines[i].ptr);
if (obj == NULL) {
goto fail;
}
if (PyDict_SetItemString(d, routines[i].name, obj)) {
goto fail;
}
Py_DECREF(obj);
obj = NULL;
}
Py_XDECREF(obj);
return 0;
fail:
Py_XDECREF(obj);
return -1;
}
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_test_multivariate",
NULL,
-1,
NULL, /* Empty methods section */
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC
PyInit__test_multivariate(void)
{
PyObject *m;
m = PyModule_Create(&moduledef);
if (m == NULL) {
return NULL;
}
if (create_pointers(m)) {
Py_DECREF(m);
return NULL;
}
return m;
}
#else
PyMODINIT_FUNC
init_test_multivariate(void)
{
PyObject *m;
m = Py_InitModule("_test_multivariate", NULL);
if (m == NULL) {
return;
}
create_pointers(m);
}
#endif
| 2,305 | 15.35461 | 64 |
c
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/test_quadrature.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import cos, sin, pi
from numpy.testing import assert_equal, \
assert_almost_equal, assert_allclose, assert_
from scipy._lib._numpy_compat import suppress_warnings
from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
cumtrapz, quad, simps, fixed_quad)
from scipy.integrate.quadrature import AccuracyWarning
class TestFixedQuad(object):
def test_scalar(self):
n = 4
func = lambda x: x**(2*n - 1)
expected = 1/(2*n)
got, _ = fixed_quad(func, 0, 1, n=n)
# quadrature exact for this input
assert_allclose(got, expected, rtol=1e-12)
def test_vector(self):
n = 4
p = np.arange(1, 2*n)
func = lambda x: x**p[:,None]
expected = 1/(p + 1)
got, _ = fixed_quad(func, 0, 1, n=n)
assert_allclose(got, expected, rtol=1e-12)
class TestQuadrature(object):
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_quadrature_rtol(self):
def myfunc(x, n, z): # Bessel function integrand
return 1e90 * cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_quadrature_miniter(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
table_val = 0.30614353532540296487
for miniter in [5, 52]:
val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
assert_almost_equal(val, table_val, decimal=7)
assert_(err < 1.0)
def test_quadrature_single_args(self):
def myfunc(x, n):
return 1e90 * cos(n*x-1.8*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg_rtol(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return 1e19*cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
table_val = 1e19*0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romb(self):
assert_equal(romb(np.arange(17)), 128)
def test_romb_gh_3731(self):
# Check that romb makes maximal use of data points
x = np.arange(2**4+1)
y = np.cos(0.2*x)
val = romb(y)
val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
assert_allclose(val, val2, rtol=1e-8, atol=0)
# should be equal to romb with 2**k+1 samples
with suppress_warnings() as sup:
sup.filter(AccuracyWarning, "divmax .4. exceeded")
val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
assert_allclose(val, val3, rtol=1e-12, atol=0)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
def test_newton_cotes(self):
"""Test the first few degrees, for evenly spaced points."""
n = 1
wts, errcoff = newton_cotes(n, 1)
assert_equal(wts, n*np.array([0.5, 0.5]))
assert_almost_equal(errcoff, -n**3/12.0)
n = 2
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
assert_almost_equal(errcoff, -n**5/2880.0)
n = 3
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
assert_almost_equal(errcoff, -n**5/6480.0)
n = 4
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
assert_almost_equal(errcoff, -n**7/1935360.0)
def test_newton_cotes2(self):
"""Test newton_cotes with points that are not evenly spaced."""
x = np.array([0.0, 1.5, 2.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 8.0/3
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
x = np.array([0.0, 1.4, 2.1, 3.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 9.0
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
def test_simps(self):
y = np.arange(17)
assert_equal(simps(y), 128)
assert_equal(simps(y, dx=0.5), 64)
assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32)
y = np.arange(4)
x = 2**y
assert_equal(simps(y, x=x, even='avg'), 13.875)
assert_equal(simps(y, x=x, even='first'), 13.75)
assert_equal(simps(y, x=x, even='last'), 14)
class TestCumtrapz(object):
def test_1d(self):
x = np.linspace(-2, 2, num=5)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = [0., -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, x, initial=None)
assert_allclose(y_int, y_expected[1:])
def test_y_nd_x_nd(self):
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = np.array([[[0., 0.5, 2., 4.5],
[0., 4.5, 10., 16.5]],
[[0., 8.5, 18., 28.5],
[0., 12.5, 26., 40.5]],
[[0., 16.5, 34., 52.5],
[0., 20.5, 42., 64.5]]])
assert_allclose(y_int, y_expected)
# Try with all axes
shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
for axis, shape in zip([0, 1, 2], shapes):
y_int = cumtrapz(y, x, initial=3.45, axis=axis)
assert_equal(y_int.shape, (3, 2, 4))
y_int = cumtrapz(y, x, initial=None, axis=axis)
assert_equal(y_int.shape, shape)
def test_y_nd_x_1d(self):
y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
x = np.arange(4)**2
# Try with all axes
ys_expected = (
np.array([[[4., 5., 6., 7.],
[8., 9., 10., 11.]],
[[40., 44., 48., 52.],
[56., 60., 64., 68.]]]),
np.array([[[2., 3., 4., 5.]],
[[10., 11., 12., 13.]],
[[18., 19., 20., 21.]]]),
np.array([[[0.5, 5., 17.5],
[4.5, 21., 53.5]],
[[8.5, 37., 89.5],
[12.5, 53., 125.5]],
[[16.5, 69., 161.5],
[20.5, 85., 197.5]]]))
for axis, y_expected in zip([0, 1, 2], ys_expected):
y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None)
assert_allclose(y_int, y_expected)
def test_x_none(self):
y = np.linspace(-2, 2, num=5)
y_int = cumtrapz(y)
y_expected = [-1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, initial=1.23)
y_expected = [1.23, -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3)
y_expected = [-4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3, initial=1.23)
y_expected = [1.23, -4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
| 8,515 | 35.393162 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/test_ivp.py
|
from __future__ import division, print_function, absolute_import
from itertools import product
from numpy.testing import (assert_, assert_allclose,
assert_equal, assert_no_warnings)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
import numpy as np
from scipy.optimize._numdiff import group_columns
from scipy.integrate import solve_ivp, RK23, RK45, Radau, BDF, LSODA
from scipy.integrate import OdeSolution
from scipy.integrate._ivp.common import num_jac
from scipy.integrate._ivp.base import ConstantDenseOutput
from scipy.sparse import coo_matrix, csc_matrix
def fun_linear(t, y):
return np.array([-y[0] - 5 * y[1], y[0] + y[1]])
def jac_linear():
return np.array([[-1, -5], [1, 1]])
def sol_linear(t):
return np.vstack((-5 * np.sin(2 * t),
2 * np.cos(2 * t) + np.sin(2 * t)))
def fun_rational(t, y):
return np.array([y[1] / t,
y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])
def fun_rational_vectorized(t, y):
return np.vstack((y[1] / t,
y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))
def jac_rational(t, y):
return np.array([
[0, 1 / t],
[-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
(y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
])
def jac_rational_sparse(t, y):
return csc_matrix([
[0, 1 / t],
[-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
(y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
])
def sol_rational(t):
return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))
def fun_medazko(t, y):
n = y.shape[0] // 2
k = 100
c = 4
phi = 2 if t <= 5 else 0
y = np.hstack((phi, 0, y, y[-2]))
d = 1 / n
j = np.arange(n) + 1
alpha = 2 * (j * d - 1) ** 3 / c ** 2
beta = (j * d - 1) ** 4 / c ** 2
j_2_p1 = 2 * j + 2
j_2_m3 = 2 * j - 2
j_2_m1 = 2 * j
j_2 = 2 * j + 1
f = np.empty(2 * n)
f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +
beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -
k * y[j_2_m1] * y[j_2])
f[1::2] = -k * y[j_2] * y[j_2_m1]
return f
def medazko_sparsity(n):
cols = []
rows = []
i = np.arange(n) * 2
cols.append(i[1:])
rows.append(i[1:] - 2)
cols.append(i)
rows.append(i)
cols.append(i)
rows.append(i + 1)
cols.append(i[:-1])
rows.append(i[:-1] + 2)
i = np.arange(n) * 2 + 1
cols.append(i)
rows.append(i)
cols.append(i)
rows.append(i - 1)
cols = np.hstack(cols)
rows = np.hstack(rows)
return coo_matrix((np.ones_like(cols), (cols, rows)))
def fun_complex(t, y):
return -y
def jac_complex(t, y):
return -np.eye(y.shape[0])
def jac_complex_sparse(t, y):
return csc_matrix(jac_complex(t, y))
def sol_complex(t):
y = (0.5 + 1j) * np.exp(-t)
return y.reshape((1, -1))
def compute_error(y, y_true, rtol, atol):
e = (y - y_true) / (atol + rtol * np.abs(y_true))
return np.sqrt(np.sum(np.real(e * e.conj()), axis=0) / e.shape[0])
def test_integration():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for vectorized, method, t_span, jac in product(
[False, True],
['RK23', 'RK45', 'Radau', 'BDF', 'LSODA'],
[[5, 9], [5, 1]],
[None, jac_rational, jac_rational_sparse]):
if vectorized:
fun = fun_rational_vectorized
else:
fun = fun_rational
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The following arguments have no effect for a chosen solver: `jac`")
res = solve_ivp(fun, t_span, y0, rtol=rtol,
atol=atol, method=method, dense_output=True,
jac=jac, vectorized=vectorized)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_(res.nfev < 40)
if method in ['RK23', 'RK45', 'LSODA']:
assert_equal(res.njev, 0)
assert_equal(res.nlu, 0)
else:
assert_(0 < res.njev < 3)
assert_(0 < res.nlu < 10)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
tc = np.linspace(*t_span)
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
tc = (t_span[0] + t_span[-1]) / 2
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
# LSODA for some reasons doesn't pass the polynomial through the
# previous points exactly after the order change. It might be some
# bug in LSOSA implementation or maybe we missing something.
if method != 'LSODA':
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
def test_integration_complex():
rtol = 1e-3
atol = 1e-6
y0 = [0.5 + 1j]
t_span = [0, 1]
tc = np.linspace(t_span[0], t_span[1])
for method, jac in product(['RK23', 'RK45', 'BDF'],
[None, jac_complex, jac_complex_sparse]):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The following arguments have no effect for a chosen solver: `jac`")
res = solve_ivp(fun_complex, t_span, y0, method=method,
dense_output=True, rtol=rtol, atol=atol, jac=jac)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_(res.nfev < 25)
if method == 'BDF':
assert_equal(res.njev, 1)
assert_(res.nlu < 6)
else:
assert_equal(res.njev, 0)
assert_equal(res.nlu, 0)
y_true = sol_complex(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
yc_true = sol_complex(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
def test_integration_sparse_difference():
n = 200
t_span = [0, 20]
y0 = np.zeros(2 * n)
y0[1::2] = 1
sparsity = medazko_sparsity(n)
for method in ['BDF', 'Radau']:
res = solve_ivp(fun_medazko, t_span, y0, method=method,
jac_sparsity=sparsity)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)
assert_allclose(res.y[79, -1], 0, atol=1e-3)
assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)
assert_allclose(res.y[149, -1], 0, atol=1e-3)
assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)
assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)
assert_allclose(res.y[238, -1], 0, atol=1e-3)
assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)
def test_integration_const_jac():
rtol = 1e-3
atol = 1e-6
y0 = [0, 2]
t_span = [0, 2]
J = jac_linear()
J_sparse = csc_matrix(J)
for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):
res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,
method=method, dense_output=True, jac=jac)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_(res.nfev < 100)
assert_equal(res.njev, 0)
assert_(0 < res.nlu < 15)
y_true = sol_linear(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 10))
tc = np.linspace(*t_span)
yc_true = sol_linear(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 15))
assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)
def test_events():
def event_rational_1(t, y):
return y[0] - y[1] ** 0.7
def event_rational_2(t, y):
return y[1] ** 0.6 - y[0]
def event_rational_3(t, y):
return t - 7.4
event_rational_3.terminal = True
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[1][0] < 7.7)
event_rational_1.direction = 1
event_rational_2.direction = 1
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_(5.3 < res.t_events[0][0] < 5.7)
event_rational_1.direction = -1
event_rational_2.direction = -1
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
event_rational_1.direction = 0
event_rational_2.direction = 0
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2,
event_rational_3), dense_output=True)
assert_equal(res.status, 1)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_equal(res.t_events[2].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[2][0] < 7.5)
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=event_rational_1, dense_output=True)
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
# Also test that termination by event doesn't break interpolants.
tc = np.linspace(res.t[0], res.t[-1])
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, 1e-3, 1e-6)
assert_(np.all(e < 5))
# Test in backward direction.
event_rational_1.direction = 0
event_rational_2.direction = 0
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[1][0] < 7.7)
event_rational_1.direction = -1
event_rational_2.direction = -1
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_(5.3 < res.t_events[0][0] < 5.7)
event_rational_1.direction = 1
event_rational_2.direction = 1
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
event_rational_1.direction = 0
event_rational_2.direction = 0
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2,
event_rational_3), dense_output=True)
assert_equal(res.status, 1)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_equal(res.t_events[2].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
assert_(7.3 < res.t_events[2][0] < 7.5)
# Also test that termination by event doesn't break interpolants.
tc = np.linspace(res.t[-1], res.t[0])
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, 1e-3, 1e-6)
assert_(np.all(e < 5))
def test_max_step():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for method in [RK23, RK45, Radau, BDF, LSODA]:
for t_span in ([5, 9], [5, 1]):
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
max_step=0.5, atol=atol, method=method,
dense_output=True)
assert_equal(res.t[0], t_span[0])
assert_equal(res.t[-1], t_span[-1])
assert_(np.all(np.abs(np.diff(res.t)) <= 0.5))
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
tc = np.linspace(*t_span)
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
# See comment in test_integration.
if method is not LSODA:
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
assert_raises(ValueError, method, fun_rational, t_span[0], y0,
t_span[1], max_step=-1)
if method is not LSODA:
solver = method(fun_rational, t_span[0], y0, t_span[1],
rtol=rtol, atol=atol, max_step=1e-20)
message = solver.step()
assert_equal(solver.status, 'failed')
assert_("step size is less" in message)
assert_raises(RuntimeError, solver.step)
def test_t_eval():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for t_span in ([5, 9], [5, 1]):
t_eval = np.linspace(t_span[0], t_span[1], 10)
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [5, 5.01, 7, 8, 8.01, 9]
res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]
res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
t_eval = [5.01, 7, 8, 8.01]
res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [4.99, 3, 1.5, 1.1, 1.01]
res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
t_eval = [4, 6]
assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,
rtol=rtol, atol=atol, t_eval=t_eval)
def test_no_integration():
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],
method=method, dense_output=True)
assert_equal(sol.sol(4), [2, 3])
assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])
def test_no_integration_class():
for method in [RK23, RK45, Radau, BDF, LSODA]:
solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)
solver.step()
assert_equal(solver.status, 'finished')
sol = solver.dense_output()
assert_equal(sol(0.0), [10.0, 0.0])
assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])
solver = method(lambda t, y: -y, 0.0, [], np.inf)
solver.step()
assert_equal(solver.status, 'finished')
sol = solver.dense_output()
assert_equal(sol(100.0), [])
assert_equal(sol([0, 1, 2]), np.empty((0, 3)))
def test_empty():
def fun(t, y):
return np.zeros((0,))
y0 = np.zeros((0,))
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,
method=method, dense_output=True)
assert_equal(sol.sol(10), np.zeros((0,)))
assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,
method=method, dense_output=True)
assert_equal(sol.sol(10), np.zeros((0,)))
assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
def test_ConstantDenseOutput():
sol = ConstantDenseOutput(0, 1, np.array([1, 2]))
assert_allclose(sol(1.5), [1, 2])
assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])
sol = ConstantDenseOutput(0, 1, np.array([]))
assert_allclose(sol(1.5), np.empty(0))
assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))
def test_classes():
y0 = [1 / 3, 2 / 9]
for cls in [RK23, RK45, Radau, BDF, LSODA]:
solver = cls(fun_rational, 5, y0, np.inf)
assert_equal(solver.n, 2)
assert_equal(solver.status, 'running')
assert_equal(solver.t_bound, np.inf)
assert_equal(solver.direction, 1)
assert_equal(solver.t, 5)
assert_equal(solver.y, y0)
assert_(solver.step_size is None)
if cls is not LSODA:
assert_(solver.nfev > 0)
assert_(solver.njev >= 0)
assert_equal(solver.nlu, 0)
else:
assert_equal(solver.nfev, 0)
assert_equal(solver.njev, 0)
assert_equal(solver.nlu, 0)
assert_raises(RuntimeError, solver.dense_output)
message = solver.step()
assert_equal(solver.status, 'running')
assert_equal(message, None)
assert_equal(solver.n, 2)
assert_equal(solver.t_bound, np.inf)
assert_equal(solver.direction, 1)
assert_(solver.t > 5)
assert_(not np.all(np.equal(solver.y, y0)))
assert_(solver.step_size > 0)
assert_(solver.nfev > 0)
assert_(solver.njev >= 0)
assert_(solver.nlu >= 0)
sol = solver.dense_output()
assert_allclose(sol(5), y0, rtol=1e-15, atol=0)
def test_OdeSolution():
ts = np.array([0, 2, 5], dtype=float)
s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
sol = OdeSolution(ts, [s1, s2])
assert_equal(sol(-1), [-1])
assert_equal(sol(1), [-1])
assert_equal(sol(2), [-1])
assert_equal(sol(3), [1])
assert_equal(sol(5), [1])
assert_equal(sol(6), [1])
assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),
np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))
ts = np.array([10, 4, -3])
s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
sol = OdeSolution(ts, [s1, s2])
assert_equal(sol(11), [-1])
assert_equal(sol(10), [-1])
assert_equal(sol(5), [-1])
assert_equal(sol(4), [-1])
assert_equal(sol(0), [1])
assert_equal(sol(-3), [1])
assert_equal(sol(-4), [1])
assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),
np.array([[-1, 1, -1, 1, -1, 1, -1]]))
ts = np.array([1, 1])
s = ConstantDenseOutput(1, 1, np.array([10]))
sol = OdeSolution(ts, [s])
assert_equal(sol(0), [10])
assert_equal(sol(1), [10])
assert_equal(sol(2), [10])
assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))
def test_num_jac():
def fun(t, y):
return np.vstack([
-0.04 * y[0] + 1e4 * y[1] * y[2],
0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,
3e7 * y[1] ** 2
])
def jac(t, y):
return np.array([
[-0.04, 1e4 * y[2], 1e4 * y[1]],
[0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],
[0, 6e7 * y[1], 0]
])
t = 1
y = np.array([1, 0, 0])
J_true = jac(t, y)
threshold = 1e-5
f = fun(t, y).ravel()
J_num, factor = num_jac(fun, t, y, f, threshold, None)
assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
J_num, factor = num_jac(fun, t, y, f, threshold, factor)
assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
def test_num_jac_sparse():
def fun(t, y):
e = y[1:]**3 - y[:-1]**2
z = np.zeros(y.shape[1])
return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))
def structure(n):
A = np.zeros((n, n), dtype=int)
A[0, 0] = 1
A[0, 1] = 1
for i in range(1, n - 1):
A[i, i - 1: i + 2] = 1
A[-1, -1] = 1
A[-1, -2] = 1
return A
np.random.seed(0)
n = 20
y = np.random.randn(n)
A = structure(n)
groups = group_columns(A)
f = fun(0, y[:, None]).ravel()
# Compare dense and sparse results, assuming that dense implementation
# is correct (as it is straightforward).
J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,
sparsity=(A, groups))
J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)
assert_allclose(J_num_dense, J_num_sparse.toarray(),
rtol=1e-12, atol=1e-14)
assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
# Take small factors to trigger their recomputing inside.
factor = np.random.uniform(0, 1e-12, size=n)
J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,
sparsity=(A, groups))
J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)
assert_allclose(J_num_dense, J_num_sparse.toarray(),
rtol=1e-12, atol=1e-14)
assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
| 23,440 | 31.199176 | 91 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/test_odeint_jac.py
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy.integrate import odeint
import scipy.integrate._test_odeint_banded as banded5x5
def rhs(y, t):
dydt = np.zeros_like(y)
banded5x5.banded5x5(t, y, dydt)
return dydt
def jac(y, t):
n = len(y)
jac = np.zeros((n, n), order='F')
banded5x5.banded5x5_jac(t, y, 1, 1, jac)
return jac
def bjac(y, t):
n = len(y)
bjac = np.zeros((4, n), order='F')
banded5x5.banded5x5_bjac(t, y, 1, 1, bjac)
return bjac
JACTYPE_FULL = 1
JACTYPE_BANDED = 4
def check_odeint(jactype):
if jactype == JACTYPE_FULL:
ml = None
mu = None
jacobian = jac
elif jactype == JACTYPE_BANDED:
ml = 2
mu = 1
jacobian = bjac
else:
raise ValueError("invalid jactype: %r" % (jactype,))
y0 = np.arange(1.0, 6.0)
# These tolerances must match the tolerances used in banded5x5.f.
rtol = 1e-11
atol = 1e-13
dt = 0.125
nsteps = 64
t = dt * np.arange(nsteps+1)
sol, info = odeint(rhs, y0, t,
Dfun=jacobian, ml=ml, mu=mu,
atol=atol, rtol=rtol, full_output=True)
yfinal = sol[-1]
odeint_nst = info['nst'][-1]
odeint_nfe = info['nfe'][-1]
odeint_nje = info['nje'][-1]
y1 = y0.copy()
# Pure Fortran solution. y1 is modified in-place.
nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype)
# It is likely that yfinal and y1 are *exactly* the same, but
# we'll be cautious and use assert_allclose.
assert_allclose(yfinal, y1, rtol=1e-12)
assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje))
def test_odeint_full_jac():
check_odeint(JACTYPE_FULL)
def test_odeint_banded_jac():
check_odeint(JACTYPE_BANDED)
| 1,821 | 22.973684 | 71 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/test_integrate.py
|
# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
"""
Tests for numerical integration.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
allclose)
from scipy._lib._numpy_compat import _assert_warns
from scipy._lib.six import xrange
from numpy.testing import (
assert_, assert_array_almost_equal,
assert_allclose, assert_array_equal, assert_equal)
from pytest import raises as assert_raises
from scipy.integrate import odeint, ode, complex_ode
#------------------------------------------------------------------------------
# Test ODE integrators
#------------------------------------------------------------------------------
class TestOdeint(object):
# Check integrate.odeint
def _do_problem(self, problem):
t = arange(0.0, problem.stop_t, 0.05)
# Basic case
z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
assert_(problem.verify(z, t))
# Use tfirst=True
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
full_output=True, tfirst=True)
assert_(problem.verify(z, t))
if hasattr(problem, 'jac'):
# Use Dfun
z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
full_output=True)
assert_(problem.verify(z, t))
# Use Dfun and tfirst=True
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
Dfun=lambda t, y: problem.jac(y, t),
full_output=True, tfirst=True)
assert_(problem.verify(z, t))
def test_odeint(self):
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem)
class TestODEClass(object):
ode_class = None # Set in subclass.
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
f = lambda t, z: problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
jac = lambda t, z: problem.jac(z, t)
integrator_params = {}
if problem.lband is not None or problem.uband is not None:
integrator_params['uband'] = problem.uband
integrator_params['lband'] = problem.lband
ig = self.ode_class(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method,
**integrator_params)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
assert_array_equal(z, ig.y)
assert_(ig.successful(), (problem, method))
assert_(ig.get_return_code() > 0, (problem, method))
assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
class TestOde(TestODEClass):
ode_class = ode
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
self._do_problem(problem, 'vode', 'bdf')
def test_zvode(self):
# Check the zvode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'zvode', 'adams')
self._do_problem(problem, 'zvode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
def test_concurrent_fail(self):
for sol in ('vode', 'zvode', 'lsoda'):
f = lambda t, y: 1.0
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_raises(RuntimeError, r.integrate, r.t + 0.1)
def test_concurrent_ok(self):
f = lambda t, y: 1.0
for k in xrange(3):
for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.1)
assert_allclose(r2.y, 0.2)
for sol in ('dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.3)
assert_allclose(r2.y, 0.2)
class TestComplexOde(TestODEClass):
ode_class = complex_ode
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
else:
self._do_problem(problem, 'vode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
class TestSolout(object):
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_after_initial_test(self, integrator):
# Check if solout works even if it is set after the initial value.
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_initial_value(y0, t0)
ig.set_solout(solout)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout_after_initial(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_after_initial_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
class TestComplexSolout(object):
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
#------------------------------------------------------------------------------
# Test problems
#------------------------------------------------------------------------------
class ODE:
"""
ODE problem
"""
stiff = False
cmplx = False
stop_t = 1
z0 = []
lband = None
uband = None
atol = 1e-6
rtol = 1e-5
class SimpleOscillator(ODE):
r"""
Free vibration of a simple oscillator::
m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
Solution::
u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
"""
stop_t = 1 + 0.09
z0 = array([1.0, 0.1], float)
k = 4.0
m = 1.0
def f(self, z, t):
tmp = zeros((2, 2), float)
tmp[0, 1] = 1.0
tmp[1, 0] = -self.k / self.m
return dot(tmp, z)
def verify(self, zs, t):
omega = sqrt(self.k / self.m)
u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
class ComplexExp(ODE):
r"""The equation :lm:`\dot u = i u`"""
stop_t = 1.23*pi
z0 = exp([1j, 2j, 3j, 4j, 5j])
cmplx = True
def f(self, z, t):
return 1j*z
def jac(self, z, t):
return 1j*eye(5)
def verify(self, zs, t):
u = self.z0 * exp(1j*t)
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
class Pi(ODE):
r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
stop_t = 20
z0 = [0]
cmplx = True
def f(self, z, t):
return array([1./(t - 10 + 1j)])
def verify(self, zs, t):
u = -2j * np.arctan(10)
return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
class CoupledDecay(ODE):
r"""
3 coupled decays suited for banded treatment
(banded mode makes it necessary when N>>3)
"""
stiff = True
stop_t = 0.5
z0 = [5.0, 7.0, 13.0]
lband = 1
uband = 0
lmbd = [0.17, 0.23, 0.29] # fictitious decay constants
def f(self, z, t):
lmbd = self.lmbd
return np.array([-lmbd[0]*z[0],
-lmbd[1]*z[1] + lmbd[0]*z[0],
-lmbd[2]*z[2] + lmbd[1]*z[1]])
def jac(self, z, t):
# The full Jacobian is
#
# [-lmbd[0] 0 0 ]
# [ lmbd[0] -lmbd[1] 0 ]
# [ 0 lmbd[1] -lmbd[2]]
#
# The lower and upper bandwidths are lband=1 and uband=0, resp.
# The representation of this array in packed format is
#
# [-lmbd[0] -lmbd[1] -lmbd[2]]
# [ lmbd[0] lmbd[1] 0 ]
lmbd = self.lmbd
j = np.zeros((self.lband + self.uband + 1, 3), order='F')
def set_j(ri, ci, val):
j[self.uband + ri - ci, ci] = val
set_j(0, 0, -lmbd[0])
set_j(1, 0, lmbd[0])
set_j(1, 1, -lmbd[1])
set_j(2, 1, lmbd[1])
set_j(2, 2, -lmbd[2])
return j
def verify(self, zs, t):
# Formulae derived by hand
lmbd = np.array(self.lmbd)
d10 = lmbd[1] - lmbd[0]
d21 = lmbd[2] - lmbd[1]
d20 = lmbd[2] - lmbd[0]
e0 = np.exp(-lmbd[0] * t)
e1 = np.exp(-lmbd[1] * t)
e2 = np.exp(-lmbd[2] * t)
u = np.vstack((
self.z0[0] * e0,
self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
lmbd[1] * lmbd[0] * self.z0[0] / d10 *
(1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
#------------------------------------------------------------------------------
def f(t, x):
dxdt = [x[1], -x[0]]
return dxdt
def jac(t, x):
j = array([[0.0, 1.0],
[-1.0, 0.0]])
return j
def f1(t, x, omega):
dxdt = [omega*x[1], -omega*x[0]]
return dxdt
def jac1(t, x, omega):
j = array([[0.0, omega],
[-omega, 0.0]])
return j
def f2(t, x, omega1, omega2):
dxdt = [omega1*x[1], -omega2*x[0]]
return dxdt
def jac2(t, x, omega1, omega2):
j = array([[0.0, omega1],
[-omega2, 0.0]])
return j
def fv(t, x, omega):
dxdt = [omega[0]*x[1], -omega[1]*x[0]]
return dxdt
def jacv(t, x, omega):
j = array([[0.0, omega[0]],
[-omega[1], 0.0]])
return j
class ODECheckParameterUse(object):
"""Call an ode-class solver with several cases of parameter use."""
# solver_name must be set before tests can be run with this class.
# Set these in subclasses.
solver_name = ''
solver_uses_jac = False
def _get_solver(self, f, jac):
solver = ode(f, jac)
if self.solver_uses_jac:
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
with_jacobian=self.solver_uses_jac)
else:
# XXX Shouldn't set_integrator *always* accept the keyword arg
# 'with_jacobian', and perhaps raise an exception if it is set
# to True if the solver can't actually use it?
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
return solver
def _check_solver(self, solver):
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
solver.integrate(pi)
assert_array_almost_equal(solver.y, [-1.0, 0.0])
def test_no_params(self):
solver = self._get_solver(f, jac)
self._check_solver(solver)
def test_one_scalar_param(self):
solver = self._get_solver(f1, jac1)
omega = 1.0
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_two_scalar_params(self):
solver = self._get_solver(f2, jac2)
omega1 = 1.0
omega2 = 1.0
solver.set_f_params(omega1, omega2)
if self.solver_uses_jac:
solver.set_jac_params(omega1, omega2)
self._check_solver(solver)
def test_vector_param(self):
solver = self._get_solver(fv, jacv)
omega = [1.0, 1.0]
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_warns_on_failure(self):
# Set nsteps small to ensure failure
solver = self._get_solver(f, jac)
solver.set_integrator(self.solver_name, nsteps=1)
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
_assert_warns(UserWarning, solver.integrate, pi)
class TestDOPRI5CheckParameterUse(ODECheckParameterUse):
solver_name = 'dopri5'
solver_uses_jac = False
class TestDOP853CheckParameterUse(ODECheckParameterUse):
solver_name = 'dop853'
solver_uses_jac = False
class TestVODECheckParameterUse(ODECheckParameterUse):
solver_name = 'vode'
solver_uses_jac = True
class TestZVODECheckParameterUse(ODECheckParameterUse):
solver_name = 'zvode'
solver_uses_jac = True
class TestLSODACheckParameterUse(ODECheckParameterUse):
solver_name = 'lsoda'
solver_uses_jac = True
def test_odeint_trivial_time():
# Test that odeint succeeds when given a single time point
# and full_output=True. This is a regression test for gh-4282.
y0 = 1
t = [0]
y, info = odeint(lambda y, t: -y, y0, t, full_output=True)
assert_array_equal(y, np.array([[y0]]))
def test_odeint_banded_jacobian():
# Test the use of the `Dfun`, `ml` and `mu` options of odeint.
def func(y, t, c):
return c.dot(y)
def jac(y, t, c):
return c
def jac_transpose(y, t, c):
return c.T.copy(order='C')
def bjac_rows(y, t, c):
jac = np.row_stack((np.r_[0, np.diag(c, 1)],
np.diag(c),
np.r_[np.diag(c, -1), 0],
np.r_[np.diag(c, -2), 0, 0]))
return jac
def bjac_cols(y, t, c):
return bjac_rows(y, t, c).T.copy(order='C')
c = array([[-205, 0.01, 0.00, 0.0],
[0.1, -2.50, 0.02, 0.0],
[1e-3, 0.01, -2.0, 0.01],
[0.00, 0.00, 0.1, -1.0]])
y0 = np.ones(4)
t = np.array([0, 5, 10, 100])
# Use the full Jacobian.
sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=jac)
# Use the transposed full Jacobian, with col_deriv=True.
sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=jac_transpose, col_deriv=True)
# Use the banded Jacobian.
sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=bjac_rows, ml=2, mu=1)
# Use the transposed banded Jacobian, with col_deriv=True.
sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)
assert_allclose(sol1, sol2, err_msg="sol1 != sol2")
assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3")
assert_allclose(sol3, sol4, err_msg="sol3 != sol4")
# Verify that the number of jacobian evaluations was the same for the
# calls of odeint with a full jacobian and with a banded jacobian. This is
# a regression test--there was a bug in the handling of banded jacobians
# that resulted in an incorrect jacobian matrix being passed to the LSODA
# code. That would cause errors or excessive jacobian evaluations.
assert_array_equal(info1['nje'], info2['nje'])
assert_array_equal(info3['nje'], info4['nje'])
# Test the use of tfirst
sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),
full_output=True, atol=1e-13, rtol=1e-11,
mxstep=10000,
Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)
# The code should execute the exact same sequence of floating point
# calculations, so these should be exactly equal. We'll be safe and use
# a small tolerance.
assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty")
def test_odeint_errors():
def sys1d(x, t):
return -100*x
def bad1(x, t):
return 1.0/0
def bad2(x, t):
return "foo"
def bad_jac1(x, t):
return 1.0/0
def bad_jac2(x, t):
return [["foo"]]
def sys2d(x, t):
return [-100*x[0], -0.1*x[1]]
def sys2d_bad_jac(x, t):
return [[1.0/0, 0], [0, -0.1]]
assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
Dfun=sys2d_bad_jac)
def test_odeint_bad_shapes():
# Tests of some errors that can occur with odeint.
def badrhs(x, t):
return [1, -1]
def sys1(x, t):
return -100*x
def badjac(x, t):
return [[0, 0, 0]]
# y0 must be at most 1-d.
bad_y0 = [[0, 0], [0, 0]]
assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])
# t must be at most 1-d.
bad_t = [[0, 1], [2, 3]]
assert_raises(ValueError, odeint, sys1, [10.0], bad_t)
# y0 is 10, but badrhs(x, t) returns [1, -1].
assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])
# shape of array returned by badjac(x, t) is not correct.
assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)
| 23,603 | 28.212871 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py
|
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import assert_allclose
from scipy.integrate import ode
def _band_count(a):
"""Returns ml and mu, the lower and upper band sizes of a."""
nrows, ncols = a.shape
ml = 0
for k in range(-nrows+1, 0):
if np.diag(a, k).any():
ml = -k
break
mu = 0
for k in range(nrows-1, 0, -1):
if np.diag(a, k).any():
mu = k
break
return ml, mu
def _linear_func(t, y, a):
"""Linear system dy/dt = a * y"""
return a.dot(y)
def _linear_jac(t, y, a):
"""Jacobian of a * y is a."""
return a
def _linear_banded_jac(t, y, a):
"""Banded Jacobian."""
ml, mu = _band_count(a)
bjac = []
for k in range(mu, 0, -1):
bjac.append(np.r_[[0] * k, np.diag(a, k)])
bjac.append(np.diag(a))
for k in range(-1, -ml-1, -1):
bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
return bjac
def _solve_linear_sys(a, y0, tend=1, dt=0.1,
solver=None, method='bdf', use_jac=True,
with_jacobian=False, banded=False):
"""Use scipy.integrate.ode to solve a linear system of ODEs.
a : square ndarray
Matrix of the linear system to be solved.
y0 : ndarray
Initial condition
tend : float
Stop time.
dt : float
Step size of the output.
solver : str
If not None, this must be "vode", "lsoda" or "zvode".
method : str
Either "bdf" or "adams".
use_jac : bool
Determines if the jacobian function is passed to ode().
with_jacobian : bool
Passed to ode.set_integrator().
banded : bool
Determines whether a banded or full jacobian is used.
If `banded` is True, `lband` and `uband` are determined by the
values in `a`.
"""
if banded:
lband, uband = _band_count(a)
else:
lband = None
uband = None
if use_jac:
if banded:
r = ode(_linear_func, _linear_banded_jac)
else:
r = ode(_linear_func, _linear_jac)
else:
r = ode(_linear_func)
if solver is None:
if np.iscomplexobj(a):
solver = "zvode"
else:
solver = "vode"
r.set_integrator(solver,
with_jacobian=with_jacobian,
method=method,
lband=lband, uband=uband,
rtol=1e-9, atol=1e-10,
)
t0 = 0
r.set_initial_value(y0, t0)
r.set_f_params(a)
r.set_jac_params(a)
t = [t0]
y = [y0]
while r.successful() and r.t < tend:
r.integrate(r.t + dt)
t.append(r.t)
y.append(r.y)
t = np.array(t)
y = np.array(y)
return t, y
def _analytical_solution(a, y0, t):
"""
Analytical solution to the linear differential equations dy/dt = a*y.
The solution is only valid if `a` is diagonalizable.
Returns a 2-d array with shape (len(t), len(y0)).
"""
lam, v = np.linalg.eig(a)
c = np.linalg.solve(v, y0)
e = c * np.exp(lam * t.reshape(-1, 1))
sol = e.dot(v.T)
return sol
def test_banded_ode_solvers():
# Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
# with a system that has a banded Jacobian matrix.
t_exact = np.linspace(0, 1.0, 5)
# --- Real arrays for testing the "lsoda" and "vode" solvers ---
# lband = 2, uband = 1:
a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
[0.2, -0.5, 0.9, 0.0, 0.0],
[0.1, 0.1, -0.4, 0.1, 0.0],
[0.0, 0.3, -0.1, -0.9, -0.3],
[0.0, 0.0, 0.1, 0.1, -0.7]])
# lband = 0, uband = 1:
a_real_upper = np.triu(a_real)
# lband = 2, uband = 0:
a_real_lower = np.tril(a_real)
# lband = 0, uband = 0:
a_real_diag = np.triu(a_real_lower)
real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
real_solutions = []
for a in real_matrices:
y0 = np.arange(1, a.shape[0] + 1)
y_exact = _analytical_solution(a, y0, t_exact)
real_solutions.append((y0, t_exact, y_exact))
def check_real(idx, solver, meth, use_jac, with_jac, banded):
a = real_matrices[idx]
y0, t_exact, y_exact = real_solutions[idx]
t, y = _solve_linear_sys(a, y0,
tend=t_exact[-1],
dt=t_exact[1] - t_exact[0],
solver=solver,
method=meth,
use_jac=use_jac,
with_jacobian=with_jac,
banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(real_matrices)):
p = [['vode', 'lsoda'], # solver
['bdf', 'adams'], # method
[False, True], # use_jac
[False, True], # with_jacobian
[False, True]] # banded
for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
check_real(idx, solver, meth, use_jac, with_jac, banded)
# --- Complex arrays for testing the "zvode" solver ---
# complex, lband = 2, uband = 1:
a_complex = a_real - 0.5j * a_real
# complex, lband = 0, uband = 0:
a_complex_diag = np.diag(np.diag(a_complex))
complex_matrices = [a_complex, a_complex_diag]
complex_solutions = []
for a in complex_matrices:
y0 = np.arange(1, a.shape[0] + 1) + 1j
y_exact = _analytical_solution(a, y0, t_exact)
complex_solutions.append((y0, t_exact, y_exact))
def check_complex(idx, solver, meth, use_jac, with_jac, banded):
a = complex_matrices[idx]
y0, t_exact, y_exact = complex_solutions[idx]
t, y = _solve_linear_sys(a, y0,
tend=t_exact[-1],
dt=t_exact[1] - t_exact[0],
solver=solver,
method=meth,
use_jac=use_jac,
with_jacobian=with_jac,
banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(complex_matrices)):
p = [['bdf', 'adams'], # method
[False, True], # use_jac
[False, True], # with_jacobian
[False, True]] # banded
for meth, use_jac, with_jac, banded in itertools.product(*p):
check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
| 6,782 | 29.146667 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ivp/radau.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse import csc_matrix, issparse, eye
from scipy.sparse.linalg import splu
from scipy.optimize._numdiff import group_columns
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, num_jac, EPS, warn_extraneous)
from .base import OdeSolver, DenseOutput
S6 = 6 ** 0.5
# Butcher tableau. A is not used directly, see below.
C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1])
E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3
# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue
# and a complex conjugate pair. They are written below.
MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3)
MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3))
- 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6)))
# These are transformation matrices.
T = np.array([
[0.09443876248897524, -0.14125529502095421, 0.03002919410514742],
[0.25021312296533332, 0.20412935229379994, -0.38294211275726192],
[1, 1, 0]])
TI = np.array([
[4.17871859155190428, 0.32768282076106237, 0.52337644549944951],
[-4.17871859155190428, -0.32768282076106237, 0.47662355450055044],
[0.50287263494578682, -2.57192694985560522, 0.59603920482822492]])
# These linear combinations are used in the algorithm.
TI_REAL = TI[0]
TI_COMPLEX = TI[1] + 1j * TI[2]
# Interpolator coefficients.
P = np.array([
[13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6],
[13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6],
[1/3, -8/3, 10/3]])
NEWTON_MAXITER = 6 # Maximum number of Newton iterations.
MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
MAX_FACTOR = 10 # Maximum allowed increase in a step size.
def solve_collocation_system(fun, t, y, h, Z0, scale, tol,
LU_real, LU_complex, solve_lu):
"""Solve the collocation system.
Parameters
----------
fun : callable
Right-hand side of the system.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
h : float
Step to try.
Z0 : ndarray, shape (3, n)
Initial guess for the solution. It determines new values of `y` at
``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants.
scale : float
Problem tolerance scale, i.e. ``rtol * abs(y) + atol``.
tol : float
Tolerance to which solve the system. This value is compared with
the normalized by `scale` error.
LU_real, LU_complex
LU decompositions of the system Jacobians.
solve_lu : callable
Callable which solves a linear system given a LU decomposition. The
signature is ``solve_lu(LU, b)``.
Returns
-------
converged : bool
Whether iterations converged.
n_iter : int
Number of completed iterations.
Z : ndarray, shape (3, n)
Found solution.
rate : float
The rate of convergence.
"""
n = y.shape[0]
M_real = MU_REAL / h
M_complex = MU_COMPLEX / h
W = TI.dot(Z0)
Z = Z0
F = np.empty((3, n))
ch = h * C
dW_norm_old = None
dW = np.empty_like(W)
converged = False
for k in range(NEWTON_MAXITER):
for i in range(3):
F[i] = fun(t + ch[i], y + Z[i])
if not np.all(np.isfinite(F)):
break
f_real = F.T.dot(TI_REAL) - M_real * W[0]
f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2])
dW_real = solve_lu(LU_real, f_real)
dW_complex = solve_lu(LU_complex, f_complex)
dW[0] = dW_real
dW[1] = dW_complex.real
dW[2] = dW_complex.imag
dW_norm = norm(dW / scale)
if dW_norm_old is not None:
rate = dW_norm / dW_norm_old
else:
rate = None
if (rate is not None and (rate >= 1 or
rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)):
break
W += dW
Z = T.dot(W)
if (dW_norm == 0 or
rate is not None and rate / (1 - rate) * dW_norm < tol):
converged = True
break
dW_norm_old = dW_norm
return converged, k + 1, Z, rate
def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old):
"""Predict by which factor to increase/decrease the step size.
The algorithm is described in [1]_.
Parameters
----------
h_abs, h_abs_old : float
Current and previous values of the step size, `h_abs_old` can be None
(see Notes).
error_norm, error_norm_old : float
Current and previous values of the error norm, `error_norm_old` can
be None (see Notes).
Returns
-------
factor : float
Predicted factor.
Notes
-----
If `h_abs_old` and `error_norm_old` are both not None then a two-step
algorithm is used, otherwise a one-step algorithm is used.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8.
"""
if error_norm_old is None or h_abs_old is None or error_norm == 0:
multiplier = 1
else:
multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25
with np.errstate(divide='ignore'):
factor = min(1, multiplier) * error_norm ** -0.25
return factor
class Radau(OdeSolver):
"""Implicit Runge-Kutta method of Radau IIA family of order 5.
The implementation follows [1]_. The error is controlled with a
third-order accurate embedded formula. A cubic polynomial which satisfies
the collocation conditions is used for the dense output.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for this solver).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : {None, array_like, sparse_matrix, callable}, optional
Jacobian matrix of the right-hand side of the system with respect to
y, required by this method. The Jacobian matrix has shape (n, n) and
its element (i, j) is equal to ``d f_i / d y_j``.
There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)`` as necessary.
For the 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines a sparsity structure of the Jacobian matrix for a
finite-difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
elements in *each* row, providing the sparsity structure will greatly
speed up the computations [2]_. A zero entry means that a corresponding
element in the Jacobian is always zero. If None (default), the Jacobian
is assumed to be dense.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
References
----------
.. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
Stiff and Differential-Algebraic Problems", Sec. IV.8.
.. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
"""
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
vectorized=False, **extraneous):
warn_extraneous(extraneous)
super(Radau, self).__init__(fun, t0, y0, t_bound, vectorized)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
# Select initial step assuming the same order which is used to control
# the error.
self.h_abs = select_initial_step(
self.fun, self.t, self.y, self.f, self.direction,
3, self.rtol, self.atol)
self.h_abs_old = None
self.error_norm_old = None
self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
self.sol = None
self.jac_factor = None
self.jac, self.J = self._validate_jac(jac, jac_sparsity)
if issparse(self.J):
def lu(A):
self.nlu += 1
return splu(A)
def solve_lu(LU, b):
return LU.solve(b)
I = eye(self.n, format='csc')
else:
def lu(A):
self.nlu += 1
return lu_factor(A, overwrite_a=True)
def solve_lu(LU, b):
return lu_solve(LU, b, overwrite_b=True)
I = np.identity(self.n)
self.lu = lu
self.solve_lu = solve_lu
self.I = I
self.current_jac = True
self.LU_real = None
self.LU_complex = None
self.Z = None
def _validate_jac(self, jac, sparsity):
t0 = self.t
y0 = self.y
if jac is None:
if sparsity is not None:
if issparse(sparsity):
sparsity = csc_matrix(sparsity)
groups = group_columns(sparsity)
sparsity = (sparsity, groups)
def jac_wrapped(t, y, f):
self.njev += 1
J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
self.atol, self.jac_factor,
sparsity)
return J
J = jac_wrapped(t0, y0, self.f)
elif callable(jac):
J = jac(t0, y0)
self.njev = 1
if issparse(J):
J = csc_matrix(J)
def jac_wrapped(t, y, _=None):
self.njev += 1
return csc_matrix(jac(t, y), dtype=float)
else:
J = np.asarray(J, dtype=float)
def jac_wrapped(t, y, _=None):
self.njev += 1
return np.asarray(jac(t, y), dtype=float)
if J.shape != (self.n, self.n):
raise ValueError("`jac` is expected to have shape {}, but "
"actually has {}."
.format((self.n, self.n), J.shape))
else:
if issparse(jac):
J = csc_matrix(jac)
else:
J = np.asarray(jac, dtype=float)
if J.shape != (self.n, self.n):
raise ValueError("`jac` is expected to have shape {}, but "
"actually has {}."
.format((self.n, self.n), J.shape))
jac_wrapped = None
return jac_wrapped, J
def _step_impl(self):
t = self.t
y = self.y
f = self.f
max_step = self.max_step
atol = self.atol
rtol = self.rtol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
h_abs_old = None
error_norm_old = None
elif self.h_abs < min_step:
h_abs = min_step
h_abs_old = None
error_norm_old = None
else:
h_abs = self.h_abs
h_abs_old = self.h_abs_old
error_norm_old = self.error_norm_old
J = self.J
LU_real = self.LU_real
LU_complex = self.LU_complex
current_jac = self.current_jac
jac = self.jac
rejected = False
step_accepted = False
message = None
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
if self.sol is None:
Z0 = np.zeros((3, y.shape[0]))
else:
Z0 = self.sol(t + h * C).T - y
scale = atol + np.abs(y) * rtol
converged = False
while not converged:
if LU_real is None or LU_complex is None:
LU_real = self.lu(MU_REAL / h * self.I - J)
LU_complex = self.lu(MU_COMPLEX / h * self.I - J)
converged, n_iter, Z, rate = solve_collocation_system(
self.fun, t, y, h, Z0, scale, self.newton_tol,
LU_real, LU_complex, self.solve_lu)
if not converged:
if current_jac:
break
J = self.jac(t, y, f)
current_jac = True
LU_real = None
LU_complex = None
if not converged:
h_abs *= 0.5
LU_real = None
LU_complex = None
continue
y_new = y + Z[-1]
ZE = Z.T.dot(E) / h
error = self.solve_lu(LU_real, f + ZE)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = norm(error / scale)
safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
+ n_iter)
if rejected and error_norm > 1:
error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE)
error_norm = norm(error / scale)
if error_norm > 1:
factor = predict_factor(h_abs, h_abs_old,
error_norm, error_norm_old)
h_abs *= max(MIN_FACTOR, safety * factor)
LU_real = None
LU_complex = None
rejected = True
else:
step_accepted = True
recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3
factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old)
factor = min(MAX_FACTOR, safety * factor)
if not recompute_jac and factor < 1.2:
factor = 1
else:
LU_real = None
LU_complex = None
f_new = self.fun(t_new, y_new)
if recompute_jac:
J = jac(t_new, y_new, f_new)
current_jac = True
elif jac is not None:
current_jac = False
self.h_abs_old = self.h_abs
self.error_norm_old = error_norm
self.h_abs = h_abs * factor
self.y_old = y
self.t = t_new
self.y = y_new
self.f = f_new
self.Z = Z
self.LU_real = LU_real
self.LU_complex = LU_complex
self.current_jac = current_jac
self.J = J
self.t_old = t
self.sol = self._compute_dense_output()
return step_accepted, message
def _compute_dense_output(self):
Q = np.dot(self.Z.T, P)
return RadauDenseOutput(self.t_old, self.t, self.y_old, Q)
def _dense_output_impl(self):
return self.sol
class RadauDenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, Q):
super(RadauDenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.Q = Q
self.order = Q.shape[1] - 1
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
p = np.tile(x, self.order + 1)
p = np.cumprod(p)
else:
p = np.tile(x, (self.order + 1, 1))
p = np.cumprod(p, axis=0)
# Here we don't multiply by h, not a mistake.
y = np.dot(self.Q, p)
if y.ndim == 2:
y += self.y_old[:, None]
else:
y += self.y_old
return y
| 18,850 | 32.843806 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ivp/base.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
def check_arguments(fun, y0, support_complex):
"""Helper function for checking arguments common to all solvers."""
y0 = np.asarray(y0)
if np.issubdtype(y0.dtype, np.complexfloating):
if not support_complex:
raise ValueError("`y0` is complex, but the chosen solver does "
"not support integration in a complex domain.")
dtype = complex
else:
dtype = float
y0 = y0.astype(dtype, copy=False)
if y0.ndim != 1:
raise ValueError("`y0` must be 1-dimensional.")
def fun_wrapped(t, y):
return np.asarray(fun(t, y), dtype=dtype)
return fun_wrapped, y0
class OdeSolver(object):
"""Base class for ODE solvers.
In order to implement a new solver you need to follow the guidelines:
1. A constructor must accept parameters presented in the base class
(listed below) along with any other parameters specific to a solver.
2. A constructor must accept arbitrary extraneous arguments
``**extraneous``, but warn that these arguments are irrelevant
using `common.warn_extraneous` function. Do not pass these
arguments to the base class.
3. A solver must implement a private method `_step_impl(self)` which
propagates a solver one step further. It must return tuple
``(success, message)``, where ``success`` is a boolean indicating
whether a step was successful, and ``message`` is a string
containing description of a failure if a step failed or None
otherwise.
4. A solver must implement a private method `_dense_output_impl(self)`
which returns a `DenseOutput` object covering the last successful
step.
5. A solver must have attributes listed below in Attributes section.
Note that `t_old` and `step_size` are updated automatically.
6. Use `fun(self, t, y)` method for the system rhs evaluation, this
way the number of function evaluations (`nfev`) will be tracked
automatically.
7. For convenience a base class provides `fun_single(self, t, y)` and
`fun_vectorized(self, t, y)` for evaluating the rhs in
non-vectorized and vectorized fashions respectively (regardless of
how `fun` from the constructor is implemented). These calls don't
increment `nfev`.
8. If a solver uses a Jacobian matrix and LU decompositions, it should
track the number of Jacobian evaluations (`njev`) and the number of
LU decompositions (`nlu`).
9. By convention the function evaluations used to compute a finite
difference approximation of the Jacobian should not be counted in
`nfev`, thus use `fun_single(self, t, y)` or
`fun_vectorized(self, t, y)` when computing a finite difference
approximation of the Jacobian.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar and there are two options for ndarray ``y``.
It can either have shape (n,), then ``fun`` must return array_like with
shape (n,). Or alternatively it can have shape (n, n_points), then
``fun`` must return array_like with shape (n, n_points) (each column
corresponds to a single column in ``y``). The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time --- the integration won't continue beyond it. It also
determines the direction of the integration.
vectorized : bool
Whether `fun` is implemented in a vectorized fashion.
support_complex : bool, optional
Whether integration in a complex domain should be supported.
Generally determined by a derived solver class capabilities.
Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number of the system's rhs evaluations.
njev : int
Number of the Jacobian evaluations.
nlu : int
Number of LU decompositions.
"""
TOO_SMALL_STEP = "Required step size is less than spacing between numbers."
def __init__(self, fun, t0, y0, t_bound, vectorized,
support_complex=False):
self.t_old = None
self.t = t0
self._fun, self.y = check_arguments(fun, y0, support_complex)
self.t_bound = t_bound
self.vectorized = vectorized
if vectorized:
def fun_single(t, y):
return self._fun(t, y[:, None]).ravel()
fun_vectorized = self._fun
else:
fun_single = self._fun
def fun_vectorized(t, y):
f = np.empty_like(y)
for i, yi in enumerate(y.T):
f[:, i] = self._fun(t, yi)
return f
def fun(t, y):
self.nfev += 1
return self.fun_single(t, y)
self.fun = fun
self.fun_single = fun_single
self.fun_vectorized = fun_vectorized
self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1
self.n = self.y.size
self.status = 'running'
self.nfev = 0
self.njev = 0
self.nlu = 0
@property
def step_size(self):
if self.t_old is None:
return None
else:
return np.abs(self.t - self.t_old)
def step(self):
"""Perform one integration step.
Returns
-------
message : string or None
Report from the solver. Typically a reason for a failure if
`self.status` is 'failed' after the step was taken or None
otherwise.
"""
if self.status != 'running':
raise RuntimeError("Attempt to step on a failed or finished "
"solver.")
if self.n == 0 or self.t == self.t_bound:
# Handle corner cases of empty solver or no integration.
self.t_old = self.t
self.t = self.t_bound
message = None
self.status = 'finished'
else:
t = self.t
success, message = self._step_impl()
if not success:
self.status = 'failed'
else:
self.t_old = t
if self.direction * (self.t - self.t_bound) >= 0:
self.status = 'finished'
return message
def dense_output(self):
"""Compute a local interpolant over the last successful step.
Returns
-------
sol : `DenseOutput`
Local interpolant over the last successful step.
"""
if self.t_old is None:
raise RuntimeError("Dense output is available after a successful "
"step was made.")
if self.n == 0 or self.t == self.t_old:
# Handle corner cases of empty solver and no integration.
return ConstantDenseOutput(self.t_old, self.t, self.y)
else:
return self._dense_output_impl()
def _step_impl(self):
raise NotImplementedError
def _dense_output_impl(self):
raise NotImplementedError
class DenseOutput(object):
"""Base class for local interpolant over step made by an ODE solver.
It interpolates between `t_min` and `t_max` (see Attributes below).
Evaluation outside this interval is not forbidden, but the accuracy is not
guaranteed.
Attributes
----------
t_min, t_max : float
Time range of the interpolation.
"""
def __init__(self, t_old, t):
self.t_old = t_old
self.t = t
self.t_min = min(t, t_old)
self.t_max = max(t, t_old)
def __call__(self, t):
"""Evaluate the interpolant.
Parameters
----------
t : float or array_like with shape (n_points,)
Points to evaluate the solution at.
Returns
-------
y : ndarray, shape (n,) or (n, n_points)
Computed values. Shape depends on whether `t` was a scalar or a
1-d array.
"""
t = np.asarray(t)
if t.ndim > 1:
raise ValueError("`t` must be float or 1-d array.")
return self._call_impl(t)
def _call_impl(self, t):
raise NotImplementedError
class ConstantDenseOutput(DenseOutput):
"""Constant value interpolator.
This class used for degenerate integration cases: equal integration limits
or a system with 0 equations.
"""
def __init__(self, t_old, t, value):
super(ConstantDenseOutput, self).__init__(t_old, t)
self.value = value
def _call_impl(self, t):
if t.ndim == 0:
return self.value
else:
ret = np.empty((self.value.shape[0], t.shape[0]))
ret[:] = self.value[:, None]
return ret
| 9,643 | 33.942029 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ivp/lsoda.py
|
import numpy as np
from scipy.integrate import ode
from .common import validate_tol, warn_extraneous
from .base import OdeSolver, DenseOutput
class LSODA(OdeSolver):
"""Adams/BDF method with automatic stiffness detection and switching.
This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches
automatically between the nonstiff Adams method and the stiff BDF method.
The method was originally detailed in [2]_.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for this solver).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
min_step : float, optional
Minimum allowed step size. Default is 0.0, i.e. the step size is not
bounded and determined solely by the solver.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : None or callable, optional
Jacobian matrix of the right-hand side of the system with respect to
``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is
equal to ``d f_i / d y_j``. The function will be called as
``jac(t, y)``. If None (default), the Jacobian will be
approximated by finite differences. It is generally recommended to
provide the Jacobian rather than relying on a finite-difference
approximation.
lband, uband : int or None
Parameters defining the bandwidth of the Jacobian,
i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting
these requires your jac routine to return the Jacobian in the packed format:
the returned array must have ``n`` columns and ``uband + lband + 1``
rows in which Jacobian diagonals are written. Specifically
``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used
in `scipy.linalg.solve_banded` (check for an illustration).
These parameters can be also used with ``jac=None`` to reduce the
number of Jacobian elements estimated by finite differences.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. A vectorized
implementation offers no advantages for this solver. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
References
----------
.. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
pp. 55-64, 1983.
.. [2] L. Petzold, "Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations", SIAM Journal
on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
1983.
"""
def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,
max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,
uband=None, vectorized=False, **extraneous):
warn_extraneous(extraneous)
super(LSODA, self).__init__(fun, t0, y0, t_bound, vectorized)
if first_step is None:
first_step = 0 # LSODA value for automatic selection.
elif first_step <= 0:
raise ValueError("`first_step` must be positive or None.")
if max_step == np.inf:
max_step = 0 # LSODA value for infinity.
elif max_step <= 0:
raise ValueError("`max_step` must be positive.")
if min_step < 0:
raise ValueError("`min_step` must be nonnegative.")
rtol, atol = validate_tol(rtol, atol, self.n)
if jac is None: # No lambda as PEP8 insists.
def jac():
return None
solver = ode(self.fun, jac)
solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,
min_step=min_step, first_step=first_step,
lband=lband, uband=uband)
solver.set_initial_value(y0, t0)
# Inject t_bound into rwork array as needed for itask=5.
solver._integrator.rwork[0] = self.t_bound
solver._integrator.call_args[4] = solver._integrator.rwork
self._lsoda_solver = solver
def _step_impl(self):
solver = self._lsoda_solver
integrator = solver._integrator
# From lsoda.step and lsoda.integrate itask=5 means take a single
# step and do not go past t_bound.
itask = integrator.call_args[2]
integrator.call_args[2] = 5
solver._y, solver.t = integrator.run(
solver.f, solver.jac, solver._y, solver.t,
self.t_bound, solver.f_params, solver.jac_params)
integrator.call_args[2] = itask
if solver.successful():
self.t = solver.t
self.y = solver._y
# From LSODA Fortran source njev is equal to nlu.
self.njev = integrator.iwork[12]
self.nlu = integrator.iwork[12]
return True, None
else:
return False, 'Unexpected istate in LSODA.'
def _dense_output_impl(self):
iwork = self._lsoda_solver._integrator.iwork
rwork = self._lsoda_solver._integrator.rwork
order = iwork[14]
h = rwork[11]
yh = np.reshape(rwork[20:20 + (order + 1) * self.n],
(self.n, order + 1), order='F').copy()
return LsodaDenseOutput(self.t_old, self.t, h, order, yh)
class LsodaDenseOutput(DenseOutput):
def __init__(self, t_old, t, h, order, yh):
super(LsodaDenseOutput, self).__init__(t_old, t)
self.h = h
self.yh = yh
self.p = np.arange(order + 1)
def _call_impl(self, t):
if t.ndim == 0:
x = ((t - self.t) / self.h) ** self.p
else:
x = ((t - self.t) / self.h) ** self.p[:, None]
return np.dot(self.yh, x)
| 8,066 | 41.235602 | 84 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ivp/ivp.py
|
from __future__ import division, print_function, absolute_import
import inspect
import numpy as np
from .bdf import BDF
from .radau import Radau
from .rk import RK23, RK45
from .lsoda import LSODA
from scipy.optimize import OptimizeResult
from .common import EPS, OdeSolution
from .base import OdeSolver
METHODS = {'RK23': RK23,
'RK45': RK45,
'Radau': Radau,
'BDF': BDF,
'LSODA': LSODA}
MESSAGES = {0: "The solver successfully reached the end of the integration interval.",
1: "A termination event occurred."}
class OdeResult(OptimizeResult):
pass
def prepare_events(events):
"""Standardize event functions and extract is_terminal and direction."""
if callable(events):
events = (events,)
if events is not None:
is_terminal = np.empty(len(events), dtype=bool)
direction = np.empty(len(events))
for i, event in enumerate(events):
try:
is_terminal[i] = event.terminal
except AttributeError:
is_terminal[i] = False
try:
direction[i] = event.direction
except AttributeError:
direction[i] = 0
else:
is_terminal = None
direction = None
return events, is_terminal, direction
def solve_event_equation(event, sol, t_old, t):
"""Solve an equation corresponding to an ODE event.
The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
ODE solver using some sort of interpolation. It is solved by
`scipy.optimize.brentq` with xtol=atol=4*EPS.
Parameters
----------
event : callable
Function ``event(t, y)``.
sol : callable
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
t_old, t : float
Previous and new values of time. They will be used as a bracketing
interval.
Returns
-------
root : float
Found solution.
"""
from scipy.optimize import brentq
return brentq(lambda t: event(t, sol(t)), t_old, t,
xtol=4 * EPS, rtol=4 * EPS)
def handle_events(sol, events, active_events, is_terminal, t_old, t):
"""Helper function to handle events.
Parameters
----------
sol : DenseOutput
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
events : list of callables, length n_events
Event functions with signatures ``event(t, y)``.
active_events : ndarray
Indices of events which occurred.
is_terminal : ndarray, shape (n_events,)
Which events are terminal.
t_old, t : float
Previous and new values of time.
Returns
-------
root_indices : ndarray
Indices of events which take zero between `t_old` and `t` and before
a possible termination.
roots : ndarray
Values of t at which events occurred.
terminate : bool
Whether a terminal event occurred.
"""
roots = []
for event_index in active_events:
roots.append(solve_event_equation(events[event_index], sol, t_old, t))
roots = np.asarray(roots)
if np.any(is_terminal[active_events]):
if t > t_old:
order = np.argsort(roots)
else:
order = np.argsort(-roots)
active_events = active_events[order]
roots = roots[order]
t = np.nonzero(is_terminal[active_events])[0][0]
active_events = active_events[:t + 1]
roots = roots[:t + 1]
terminate = True
else:
terminate = False
return active_events, roots, terminate
def find_active_events(g, g_new, direction):
"""Find which event occurred during an integration step.
Parameters
----------
g, g_new : array_like, shape (n_events,)
Values of event functions at a current and next points.
direction : ndarray, shape (n_events,)
Event "direction" according to the definition in `solve_ivp`.
Returns
-------
active_events : ndarray
Indices of events which occurred during the step.
"""
g, g_new = np.asarray(g), np.asarray(g_new)
up = (g <= 0) & (g_new >= 0)
down = (g >= 0) & (g_new <= 0)
either = up | down
mask = (up & (direction > 0) |
down & (direction < 0) |
either & (direction == 0))
return np.nonzero(mask)[0]
def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
events=None, vectorized=False, **options):
"""Solve an initial value problem for a system of ODEs.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
Some of the solvers support integration in the complex domain, but note that
for stiff ODE solvers, the right-hand side must be complex-differentiable
(satisfy Cauchy-Riemann equations [11]_). To solve a problem in the complex
domain, pass y0 with a complex data type. Another option is always to
rewrite your problem for real and imaginary parts separately.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for stiff solvers).
t_span : 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0 : array_like, shape (n,)
Initial state. For problems in the complex domain, pass `y0` with a
complex data type (even if the initial guess is purely real).
method : string or `OdeSolver`, optional
Integration method to use:
* 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
The error is controlled assuming accuracy of the fourth-order
method, but steps are taken using the fifth-order accurate formula
(local extrapolation is done). A quartic interpolation polynomial
is used for the dense output [2]_. Can be applied in the complex domain.
* 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
is controlled assuming accuracy of the second-order method, but
steps are taken using the third-order accurate formula (local
extrapolation is done). A cubic Hermite polynomial is used for the
dense output. Can be applied in the complex domain.
* 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
order 5 [4]_. The error is controlled with a third-order accurate
embedded formula. A cubic polynomial which satisfies the
collocation conditions is used for the dense output.
* 'BDF': Implicit multi-step variable-order (1 to 5) method based
on a backward differentiation formula for the derivative
approximation [5]_. The implementation follows the one described
in [6]_. A quasi-constant step scheme is used and accuracy is
enhanced using the NDF modification. Can be applied in the complex
domain.
* 'LSODA': Adams/BDF method with automatic stiffness detection and
switching [7]_, [8]_. This is a wrapper of the Fortran solver
from ODEPACK.
You should use the 'RK45' or 'RK23' method for non-stiff problems and
'Radau' or 'BDF' for stiff problems [9]_. If not sure, first try to run
'RK45'. If needs unusually many iterations, diverges, or fails, your
problem is likely to be stiff and you should use 'Radau' or 'BDF'.
'LSODA' can also be a good universal choice, but it might be somewhat
less convenient to work with as it wraps old Fortran code.
You can also pass an arbitrary class derived from `OdeSolver` which
implements the solver.
dense_output : bool, optional
Whether to compute a continuous solution. Default is False.
t_eval : array_like or None, optional
Times at which to store the computed solution, must be sorted and lie
within `t_span`. If None (default), use points selected by the solver.
events : callable, list of callables or None, optional
Types of events to track. Each is defined by a continuous function of
time and state that becomes zero value in case of an event. Each function
must have the signature ``event(t, y)`` and return a float. The solver will
find an accurate value of ``t`` at which ``event(t, y(t)) = 0`` using a
root-finding algorithm. Additionally each ``event`` function might have
the following attributes:
* terminal: bool, whether to terminate integration if this
event occurs. Implicitly False if not assigned.
* direction: float, direction of a zero crossing. If `direction`
is positive, `event` must go from negative to positive, and
vice versa if `direction` is negative. If 0, then either direction
will count. Implicitly 0 if not assigned.
You can assign attributes like ``event.terminal = True`` to any
function in Python. If None (default), events won't be tracked.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
options
Options passed to a chosen solver. All options available for already
implemented solvers are listed below.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : {None, array_like, sparse_matrix, callable}, optional
Jacobian matrix of the right-hand side of the system with respect to
y, required by the 'Radau', 'BDF' and 'LSODA' method. The Jacobian matrix
has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``.
There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant. Not supported by 'LSODA'.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)`` as necessary.
For the 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines a sparsity structure of the Jacobian matrix for a
finite-difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
elements in *each* row, providing the sparsity structure will greatly
speed up the computations [10]_. A zero entry means that a corresponding
element in the Jacobian is always zero. If None (default), the Jacobian
is assumed to be dense.
Not supported by 'LSODA', see `lband` and `uband` instead.
lband, uband : int or None
Parameters defining the bandwidth of the Jacobian for the 'LSODA' method,
i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting
these requires your jac routine to return the Jacobian in the packed format:
the returned array must have ``n`` columns and ``uband + lband + 1``
rows in which Jacobian diagonals are written. Specifically
``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used
in `scipy.linalg.solve_banded` (check for an illustration).
These parameters can be also used with ``jac=None`` to reduce the
number of Jacobian elements estimated by finite differences.
min_step, first_step : float, optional
The minimum allowed step size and the initial step size respectively
for 'LSODA' method. By default `min_step` is zero and `first_step` is
selected automatically.
Returns
-------
Bunch object with the following fields defined:
t : ndarray, shape (n_points,)
Time points.
y : ndarray, shape (n, n_points)
Values of the solution at `t`.
sol : `OdeSolution` or None
Found solution as `OdeSolution` instance; None if `dense_output` was
set to False.
t_events : list of ndarray or None
Contains for each event type a list of arrays at which an event of
that type event was detected. None if `events` was None.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
status : int
Reason for algorithm termination:
* -1: Integration step failed.
* 0: The solver successfully reached the end of `tspan`.
* 1: A termination event occurred.
message : string
Human-readable description of the termination reason.
success : bool
True if the solver reached the interval end or a termination event
occurred (``status >= 0``).
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
.. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
.. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
Stiff and Differential-Algebraic Problems", Sec. IV.8.
.. [5] `Backward Differentiation Formula
<https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_
on Wikipedia.
.. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
.. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
pp. 55-64, 1983.
.. [8] L. Petzold, "Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations", SIAM Journal
on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
1983.
.. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on
Wikipedia.
.. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
.. [11] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
Basic exponential decay showing automatically chosen time points.
>>> from scipy.integrate import solve_ivp
>>> def exponential_decay(t, y): return -0.5 * y
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
>>> print(sol.t)
[ 0. 0.11487653 1.26364188 3.06061781 4.85759374
6.65456967 8.4515456 10. ]
>>> print(sol.y)
[[2. 1.88836035 1.06327177 0.43319312 0.17648948 0.0719045
0.02929499 0.01350938]
[4. 3.7767207 2.12654355 0.86638624 0.35297895 0.143809
0.05858998 0.02701876]
[8. 7.5534414 4.25308709 1.73277247 0.7059579 0.287618
0.11717996 0.05403753]]
Specifying points where the solution is desired.
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
... t_eval=[0, 1, 2, 4, 10])
>>> print(sol.t)
[ 0 1 2 4 10]
>>> print(sol.y)
[[2. 1.21305369 0.73534021 0.27066736 0.01350938]
[4. 2.42610739 1.47068043 0.54133472 0.02701876]
[8. 4.85221478 2.94136085 1.08266944 0.05403753]]
Cannon fired upward with terminal event upon impact. The ``terminal`` and
``direction`` fields of an event are applied by monkey patching a function.
Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts at
position 0 with velocity +10. Note that the integration never reaches t=100
because the event is terminal.
>>> def upward_cannon(t, y): return [y[1], -0.5]
>>> def hit_ground(t, y): return y[1]
>>> hit_ground.terminal = True
>>> hit_ground.direction = -1
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
>>> print(sol.t_events)
[array([ 20.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 2.00000000e+01]
"""
if method not in METHODS and not (
inspect.isclass(method) and issubclass(method, OdeSolver)):
raise ValueError("`method` must be one of {} or OdeSolver class."
.format(METHODS))
t0, tf = float(t_span[0]), float(t_span[1])
if t_eval is not None:
t_eval = np.asarray(t_eval)
if t_eval.ndim != 1:
raise ValueError("`t_eval` must be 1-dimensional.")
if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
raise ValueError("Values in `t_eval` are not within `t_span`.")
d = np.diff(t_eval)
if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
raise ValueError("Values in `t_eval` are not properly sorted.")
if tf > t0:
t_eval_i = 0
else:
# Make order of t_eval decreasing to use np.searchsorted.
t_eval = t_eval[::-1]
# This will be an upper bound for slices.
t_eval_i = t_eval.shape[0]
if method in METHODS:
method = METHODS[method]
solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
if t_eval is None:
ts = [t0]
ys = [y0]
else:
ts = []
ys = []
interpolants = []
events, is_terminal, event_dir = prepare_events(events)
if events is not None:
g = [event(t0, y0) for event in events]
t_events = [[] for _ in range(len(events))]
else:
t_events = None
status = None
while status is None:
message = solver.step()
if solver.status == 'finished':
status = 0
elif solver.status == 'failed':
status = -1
break
t_old = solver.t_old
t = solver.t
y = solver.y
if dense_output:
sol = solver.dense_output()
interpolants.append(sol)
else:
sol = None
if events is not None:
g_new = [event(t, y) for event in events]
active_events = find_active_events(g, g_new, event_dir)
if active_events.size > 0:
if sol is None:
sol = solver.dense_output()
root_indices, roots, terminate = handle_events(
sol, events, active_events, is_terminal, t_old, t)
for e, te in zip(root_indices, roots):
t_events[e].append(te)
if terminate:
status = 1
t = roots[-1]
y = sol(t)
g = g_new
if t_eval is None:
ts.append(t)
ys.append(y)
else:
# The value in t_eval equal to t will be included.
if solver.direction > 0:
t_eval_i_new = np.searchsorted(t_eval, t, side='right')
t_eval_step = t_eval[t_eval_i:t_eval_i_new]
else:
t_eval_i_new = np.searchsorted(t_eval, t, side='left')
# It has to be done with two slice operations, because
# you can't slice to 0-th element inclusive using backward
# slicing.
t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
if t_eval_step.size > 0:
if sol is None:
sol = solver.dense_output()
ts.append(t_eval_step)
ys.append(sol(t_eval_step))
t_eval_i = t_eval_i_new
message = MESSAGES.get(status, message)
if t_events is not None:
t_events = [np.asarray(te) for te in t_events]
if t_eval is None:
ts = np.array(ts)
ys = np.vstack(ys).T
else:
ts = np.hstack(ts)
ys = np.hstack(ys)
if dense_output:
sol = OdeSolution(ts, interpolants)
else:
sol = None
return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, nfev=solver.nfev,
njev=solver.njev, nlu=solver.nlu, status=status,
message=message, success=status >= 0)
| 22,803 | 40.088288 | 86 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ivp/bdf.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse import issparse, csc_matrix, eye
from scipy.sparse.linalg import splu
from scipy.optimize._numdiff import group_columns
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, EPS, num_jac, warn_extraneous)
from .base import OdeSolver, DenseOutput
MAX_ORDER = 5
NEWTON_MAXITER = 4
MIN_FACTOR = 0.2
MAX_FACTOR = 10
def compute_R(order, factor):
"""Compute the matrix for changing the differences array."""
I = np.arange(1, order + 1)[:, None]
J = np.arange(1, order + 1)
M = np.zeros((order + 1, order + 1))
M[1:, 1:] = (I - 1 - factor * J) / I
M[0] = 1
return np.cumprod(M, axis=0)
def change_D(D, order, factor):
"""Change differences array in-place when step size is changed."""
R = compute_R(order, factor)
U = compute_R(order, 1)
RU = R.dot(U)
D[:order + 1] = np.dot(RU.T, D[:order + 1])
def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):
"""Solve the algebraic system resulting from BDF method."""
d = 0
y = y_predict.copy()
dy_norm_old = None
converged = False
for k in range(NEWTON_MAXITER):
f = fun(t_new, y)
if not np.all(np.isfinite(f)):
break
dy = solve_lu(LU, c * f - psi - d)
dy_norm = norm(dy / scale)
if dy_norm_old is None:
rate = None
else:
rate = dy_norm / dy_norm_old
if (rate is not None and (rate >= 1 or
rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)):
break
y += dy
d += dy
if (dy_norm == 0 or
rate is not None and rate / (1 - rate) * dy_norm < tol):
converged = True
break
dy_norm_old = dy_norm
return converged, k + 1, y, d
class BDF(OdeSolver):
"""Implicit method based on backward-differentiation formulas.
This is a variable order method with the order varying automatically from
1 to 5. The general framework of the BDF algorithm is described in [1]_.
This class implements a quasi-constant step size as explained in [2]_.
The error estimation strategy for the constant-step BDF is derived in [3]_.
An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for this solver).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : {None, array_like, sparse_matrix, callable}, optional
Jacobian matrix of the right-hand side of the system with respect to y,
required by this method. The Jacobian matrix has shape (n, n) and its
element (i, j) is equal to ``d f_i / d y_j``.
There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)`` as necessary.
For the 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines a sparsity structure of the Jacobian matrix for a
finite-difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
elements in *each* row, providing the sparsity structure will greatly
speed up the computations [4]_. A zero entry means that a corresponding
element in the Jacobian is always zero. If None (default), the Jacobian
is assumed to be dense.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
References
----------
.. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
Solution of Ordinary Differential Equations", ACM Transactions on
Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
.. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
.. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
Nonstiff Problems", Sec. III.2.
.. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
"""
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
vectorized=False, **extraneous):
warn_extraneous(extraneous)
super(BDF, self).__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
f = self.fun(self.t, self.y)
self.h_abs = select_initial_step(self.fun, self.t, self.y, f,
self.direction, 1,
self.rtol, self.atol)
self.h_abs_old = None
self.error_norm_old = None
self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
self.jac_factor = None
self.jac, self.J = self._validate_jac(jac, jac_sparsity)
if issparse(self.J):
def lu(A):
self.nlu += 1
return splu(A)
def solve_lu(LU, b):
return LU.solve(b)
I = eye(self.n, format='csc', dtype=self.y.dtype)
else:
def lu(A):
self.nlu += 1
return lu_factor(A, overwrite_a=True)
def solve_lu(LU, b):
return lu_solve(LU, b, overwrite_b=True)
I = np.identity(self.n, dtype=self.y.dtype)
self.lu = lu
self.solve_lu = solve_lu
self.I = I
kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
self.alpha = (1 - kappa) * self.gamma
self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
D[0] = self.y
D[1] = f * self.h_abs * self.direction
self.D = D
self.order = 1
self.n_equal_steps = 0
self.LU = None
def _validate_jac(self, jac, sparsity):
t0 = self.t
y0 = self.y
if jac is None:
if sparsity is not None:
if issparse(sparsity):
sparsity = csc_matrix(sparsity)
groups = group_columns(sparsity)
sparsity = (sparsity, groups)
def jac_wrapped(t, y):
self.njev += 1
f = self.fun_single(t, y)
J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
self.atol, self.jac_factor,
sparsity)
return J
J = jac_wrapped(t0, y0)
elif callable(jac):
J = jac(t0, y0)
self.njev += 1
if issparse(J):
J = csc_matrix(J, dtype=y0.dtype)
def jac_wrapped(t, y):
self.njev += 1
return csc_matrix(jac(t, y), dtype=y0.dtype)
else:
J = np.asarray(J, dtype=y0.dtype)
def jac_wrapped(t, y):
self.njev += 1
return np.asarray(jac(t, y), dtype=y0.dtype)
if J.shape != (self.n, self.n):
raise ValueError("`jac` is expected to have shape {}, but "
"actually has {}."
.format((self.n, self.n), J.shape))
else:
if issparse(jac):
J = csc_matrix(jac, dtype=y0.dtype)
else:
J = np.asarray(jac, dtype=y0.dtype)
if J.shape != (self.n, self.n):
raise ValueError("`jac` is expected to have shape {}, but "
"actually has {}."
.format((self.n, self.n), J.shape))
jac_wrapped = None
return jac_wrapped, J
def _step_impl(self):
t = self.t
D = self.D
max_step = self.max_step
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
change_D(D, self.order, max_step / self.h_abs)
self.n_equal_steps = 0
elif self.h_abs < min_step:
h_abs = min_step
change_D(D, self.order, min_step / self.h_abs)
self.n_equal_steps = 0
else:
h_abs = self.h_abs
atol = self.atol
rtol = self.rtol
order = self.order
alpha = self.alpha
gamma = self.gamma
error_const = self.error_const
J = self.J
LU = self.LU
current_jac = self.jac is None
step_accepted = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
change_D(D, order, np.abs(t_new - t) / h_abs)
self.n_equal_steps = 0
LU = None
h = t_new - t
h_abs = np.abs(h)
y_predict = np.sum(D[:order + 1], axis=0)
scale = atol + rtol * np.abs(y_predict)
psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
converged = False
c = h / alpha[order]
while not converged:
if LU is None:
LU = self.lu(self.I - c * J)
converged, n_iter, y_new, d = solve_bdf_system(
self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
scale, self.newton_tol)
if not converged:
if current_jac:
break
J = self.jac(t_new, y_predict)
LU = None
current_jac = True
if not converged:
factor = 0.5
h_abs *= factor
change_D(D, order, factor)
self.n_equal_steps = 0
LU = None
continue
safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
+ n_iter)
scale = atol + rtol * np.abs(y_new)
error = error_const[order] * d
error_norm = norm(error / scale)
if error_norm > 1:
factor = max(MIN_FACTOR,
safety * error_norm ** (-1 / (order + 1)))
h_abs *= factor
change_D(D, order, factor)
self.n_equal_steps = 0
# As we didn't have problems with convergence, we don't
# reset LU here.
else:
step_accepted = True
self.n_equal_steps += 1
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.J = J
self.LU = LU
# Update differences. The principal relation here is
# D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
# contained difference for previous interpolating polynomial and
# d = D^{k + 1} y_n. Thus this elegant code follows.
D[order + 2] = d - D[order + 1]
D[order + 1] = d
for i in reversed(range(order + 1)):
D[i] += D[i + 1]
if self.n_equal_steps < order + 1:
return True, None
if order > 1:
error_m = error_const[order - 1] * D[order]
error_m_norm = norm(error_m / scale)
else:
error_m_norm = np.inf
if order < MAX_ORDER:
error_p = error_const[order + 1] * D[order + 2]
error_p_norm = norm(error_p / scale)
else:
error_p_norm = np.inf
error_norms = np.array([error_m_norm, error_norm, error_p_norm])
factors = error_norms ** (-1 / np.arange(order, order + 3))
delta_order = np.argmax(factors) - 1
order += delta_order
self.order = order
factor = min(MAX_FACTOR, safety * np.max(factors))
self.h_abs *= factor
change_D(D, order, factor)
self.n_equal_steps = 0
self.LU = None
return True, None
def _dense_output_impl(self):
return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
self.order, self.D[:self.order + 1].copy())
class BdfDenseOutput(DenseOutput):
def __init__(self, t_old, t, h, order, D):
super(BdfDenseOutput, self).__init__(t_old, t)
self.order = order
self.t_shift = self.t - h * np.arange(self.order)
self.denom = h * (1 + np.arange(self.order))
self.D = D
def _call_impl(self, t):
if t.ndim == 0:
x = (t - self.t_shift) / self.denom
p = np.cumprod(x)
else:
x = (t - self.t_shift[:, None]) / self.denom[:, None]
p = np.cumprod(x, axis=0)
y = np.dot(self.D[1:].T, p)
if y.ndim == 1:
y += self.D[0]
else:
y += self.D[0, :, None]
return y
| 16,579 | 35.043478 | 83 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ivp/rk.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from .base import OdeSolver, DenseOutput
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, warn_extraneous)
# Multiply steps computed from asymptotic behaviour of errors by this.
SAFETY = 0.9
MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
MAX_FACTOR = 10 # Maximum allowed increase in a step size.
def rk_step(fun, t, y, f, h, A, B, C, E, K):
"""Perform a single Runge-Kutta step.
This function computes a prediction of an explicit Runge-Kutta method and
also estimates the error of a less accurate method.
Notation for Butcher tableau is as in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Current value of the derivative, i.e. ``fun(x, y)``.
h : float
Step to use.
A : list of ndarray, length n_stages - 1
Coefficients for combining previous RK stages to compute the next
stage. For explicit methods the coefficients above the main diagonal
are zeros, so `A` is stored as a list of arrays of increasing lengths.
The first stage is always just `f`, thus no coefficients for it
are required.
B : ndarray, shape (n_stages,)
Coefficients for combining RK stages for computing the final
prediction.
C : ndarray, shape (n_stages - 1,)
Coefficients for incrementing time for consecutive RK stages.
The value for the first stage is always zero, thus it is not stored.
E : ndarray, shape (n_stages + 1,)
Coefficients for estimating the error of a less accurate method. They
are computed as the difference between b's in an extended tableau.
K : ndarray, shape (n_stages + 1, n)
Storage array for putting RK stages here. Stages are stored in rows.
Returns
-------
y_new : ndarray, shape (n,)
Solution at t + h computed with a higher accuracy.
f_new : ndarray, shape (n,)
Derivative ``fun(t + h, y_new)``.
error : ndarray, shape (n,)
Error estimate of a less accurate method.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
K[0] = f
for s, (a, c) in enumerate(zip(A, C)):
dy = np.dot(K[:s + 1].T, a) * h
K[s + 1] = fun(t + c * h, y + dy)
y_new = y + h * np.dot(K[:-1].T, B)
f_new = fun(t + h, y_new)
K[-1] = f_new
error = np.dot(K.T, E) * h
return y_new, f_new, error
class RungeKutta(OdeSolver):
"""Base class for explicit Runge-Kutta methods."""
C = NotImplemented
A = NotImplemented
B = NotImplemented
E = NotImplemented
P = NotImplemented
order = NotImplemented
n_stages = NotImplemented
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False, **extraneous):
warn_extraneous(extraneous)
super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
self.h_abs = select_initial_step(
self.fun, self.t, self.y, self.f, self.direction,
self.order, self.rtol, self.atol)
self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
def _step_impl(self):
t = self.t
y = self.y
max_step = self.max_step
rtol = self.rtol
atol = self.atol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
elif self.h_abs < min_step:
h_abs = min_step
else:
h_abs = self.h_abs
order = self.order
step_accepted = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
y_new, f_new, error = rk_step(self.fun, t, y, self.f, h, self.A,
self.B, self.C, self.E, self.K)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = norm(error / scale)
if error_norm == 0.0:
h_abs *= MAX_FACTOR
step_accepted = True
elif error_norm < 1:
h_abs *= min(MAX_FACTOR,
max(1, SAFETY * error_norm ** (-1 / (order + 1))))
step_accepted = True
else:
h_abs *= max(MIN_FACTOR,
SAFETY * error_norm ** (-1 / (order + 1)))
self.y_old = y
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.f = f_new
return True, None
def _dense_output_impl(self):
Q = self.K.T.dot(self.P)
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
class RK23(RungeKutta):
"""Explicit Runge-Kutta method of order 3(2).
This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
assuming accuracy of the second-order method, but steps are taken using the
third-order accurate formula (local extrapolation is done). A cubic Hermite
polynomial is used for the dense output.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar and there are two options for ndarray ``y``.
It can either have shape (n,), then ``fun`` must return array_like with
shape (n,). Or alternatively it can have shape (n, k), then ``fun``
must return array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
"""
order = 2
n_stages = 3
C = np.array([1/2, 3/4])
A = [np.array([1/2]),
np.array([0, 3/4])]
B = np.array([2/9, 1/3, 4/9])
E = np.array([5/72, -1/12, -1/9, 1/8])
P = np.array([[1, -4 / 3, 5 / 9],
[0, 1, -2/3],
[0, 4/3, -8/9],
[0, -1, 1]])
class RK45(RungeKutta):
"""Explicit Runge-Kutta method of order 5(4).
This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
assuming accuracy of the fourth-order method accuracy, but steps are taken
using the fifth-order accurate formula (local extrapolation is done).
A quartic interpolation polynomial is used for the dense output [2]_.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
"""
order = 4
n_stages = 6
C = np.array([1/5, 3/10, 4/5, 8/9, 1])
A = [np.array([1/5]),
np.array([3/40, 9/40]),
np.array([44/45, -56/15, 32/9]),
np.array([19372/6561, -25360/2187, 64448/6561, -212/729]),
np.array([9017/3168, -355/33, 46732/5247, 49/176, -5103/18656])]
B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
1/40])
# Corresponds to the optimum value of c_6 from [2]_.
P = np.array([
[1, -8048581381/2820520608, 8663915743/2820520608,
-12715105075/11282082432],
[0, 0, 0, 0],
[0, 131558114200/32700410799, -68118460800/10900136933,
87487479700/32700410799],
[0, -1754552775/470086768, 14199869525/1410260304,
-10690763975/1880347072],
[0, 127303824393/49829197408, -318862633887/49829197408,
701980252875 / 199316789632],
[0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
[0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
class RkDenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, Q):
super(RkDenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.Q = Q
self.order = Q.shape[1] - 1
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
p = np.tile(x, self.order + 1)
p = np.cumprod(p)
else:
p = np.tile(x, (self.order + 1, 1))
p = np.cumprod(p, axis=0)
y = self.h * np.dot(self.Q, p)
if y.ndim == 2:
y += self.y_old[:, None]
else:
y += self.y_old
return y
| 14,175 | 36.305263 | 107 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ivp/common.py
|
from __future__ import division, print_function, absolute_import
from itertools import groupby
from warnings import warn
import numpy as np
from scipy.sparse import find, coo_matrix
EPS = np.finfo(float).eps
def validate_max_step(max_step):
"""Assert that max_Step is valid and return it."""
if max_step <= 0:
raise ValueError("`max_step` must be positive.")
return max_step
def warn_extraneous(extraneous):
"""Display a warning for extraneous keyword arguments.
The initializer of each solver class is expected to collect keyword
arguments that it doesn't understand and warn about them. This function
prints a warning for each key in the supplied dictionary.
Parameters
----------
extraneous : dict
Extraneous keyword arguments
"""
if extraneous:
warn("The following arguments have no effect for a chosen solver: {}."
.format(", ".join("`{}`".format(x) for x in extraneous)))
def validate_tol(rtol, atol, n):
"""Validate tolerance values."""
if rtol < 100 * EPS:
warn("`rtol` is too low, setting to {}".format(100 * EPS))
rtol = 100 * EPS
atol = np.asarray(atol)
if atol.ndim > 0 and atol.shape != (n,):
raise ValueError("`atol` has wrong shape.")
if np.any(atol < 0):
raise ValueError("`atol` must be positive.")
return rtol, atol
def norm(x):
"""Compute RMS norm."""
return np.linalg.norm(x) / x.size ** 0.5
def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):
"""Empirically select a good initial step.
The algorithm is described in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t0 : float
Initial value of the independent variable.
y0 : ndarray, shape (n,)
Initial value of the dependent variable.
f0 : ndarray, shape (n,)
Initial value of the derivative, i. e. ``fun(t0, y0)``.
direction : float
Integration direction.
order : float
Method order.
rtol : float
Desired relative tolerance.
atol : float
Desired absolute tolerance.
Returns
-------
h_abs : float
Absolute value of the suggested initial step.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
if y0.size == 0:
return np.inf
scale = atol + np.abs(y0) * rtol
d0 = norm(y0 / scale)
d1 = norm(f0 / scale)
if d0 < 1e-5 or d1 < 1e-5:
h0 = 1e-6
else:
h0 = 0.01 * d0 / d1
y1 = y0 + h0 * direction * f0
f1 = fun(t0 + h0 * direction, y1)
d2 = norm((f1 - f0) / scale) / h0
if d1 <= 1e-15 and d2 <= 1e-15:
h1 = max(1e-6, h0 * 1e-3)
else:
h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
return min(100 * h0, h1)
class OdeSolution(object):
"""Continuous ODE solution.
It is organized as a collection of `DenseOutput` objects which represent
local interpolants. It provides an algorithm to select a right interpolant
for each given point.
The interpolants cover the range between `t_min` and `t_max` (see
Attributes below). Evaluation outside this interval is not forbidden, but
the accuracy is not guaranteed.
When evaluating at a breakpoint (one of the values in `ts`) a segment with
the lower index is selected.
Parameters
----------
ts : array_like, shape (n_segments + 1,)
Time instants between which local interpolants are defined. Must
be strictly increasing or decreasing (zero segment with two points is
also allowed).
interpolants : list of DenseOutput with n_segments elements
Local interpolants. An i-th interpolant is assumed to be defined
between ``ts[i]`` and ``ts[i + 1]``.
Attributes
----------
t_min, t_max : float
Time range of the interpolation.
"""
def __init__(self, ts, interpolants):
ts = np.asarray(ts)
d = np.diff(ts)
# The first case covers integration on zero segment.
if not ((ts.size == 2 and ts[0] == ts[-1])
or np.all(d > 0) or np.all(d < 0)):
raise ValueError("`ts` must be strictly increasing or decreasing.")
self.n_segments = len(interpolants)
if ts.shape != (self.n_segments + 1,):
raise ValueError("Numbers of time stamps and interpolants "
"don't match.")
self.ts = ts
self.interpolants = interpolants
if ts[-1] >= ts[0]:
self.t_min = ts[0]
self.t_max = ts[-1]
self.ascending = True
self.ts_sorted = ts
else:
self.t_min = ts[-1]
self.t_max = ts[0]
self.ascending = False
self.ts_sorted = ts[::-1]
def _call_single(self, t):
# Here we preserve a certain symmetry that when t is in self.ts,
# then we prioritize a segment with a lower index.
if self.ascending:
ind = np.searchsorted(self.ts_sorted, t, side='left')
else:
ind = np.searchsorted(self.ts_sorted, t, side='right')
segment = min(max(ind - 1, 0), self.n_segments - 1)
if not self.ascending:
segment = self.n_segments - 1 - segment
return self.interpolants[segment](t)
def __call__(self, t):
"""Evaluate the solution.
Parameters
----------
t : float or array_like with shape (n_points,)
Points to evaluate at.
Returns
-------
y : ndarray, shape (n_states,) or (n_states, n_points)
Computed values. Shape depends on whether `t` is a scalar or a
1-d array.
"""
t = np.asarray(t)
if t.ndim == 0:
return self._call_single(t)
order = np.argsort(t)
reverse = np.empty_like(order)
reverse[order] = np.arange(order.shape[0])
t_sorted = t[order]
# See comment in self._call_single.
if self.ascending:
segments = np.searchsorted(self.ts_sorted, t_sorted, side='left')
else:
segments = np.searchsorted(self.ts_sorted, t_sorted, side='right')
segments -= 1
segments[segments < 0] = 0
segments[segments > self.n_segments - 1] = self.n_segments - 1
if not self.ascending:
segments = self.n_segments - 1 - segments
ys = []
group_start = 0
for segment, group in groupby(segments):
group_end = group_start + len(list(group))
y = self.interpolants[segment](t_sorted[group_start:group_end])
ys.append(y)
group_start = group_end
ys = np.hstack(ys)
ys = ys[:, reverse]
return ys
NUM_JAC_DIFF_REJECT = EPS ** 0.875
NUM_JAC_DIFF_SMALL = EPS ** 0.75
NUM_JAC_DIFF_BIG = EPS ** 0.25
NUM_JAC_MIN_FACTOR = 1e3 * EPS
NUM_JAC_FACTOR_INCREASE = 10
NUM_JAC_FACTOR_DECREASE = 0.1
def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
"""Finite differences Jacobian approximation tailored for ODE solvers.
This function computes finite difference approximation to the Jacobian
matrix of `fun` with respect to `y` using forward differences.
The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
``d f_i / d y_j``.
A special feature of this function is the ability to correct the step
size from iteration to iteration. The main idea is to keep the finite
difference significantly separated from its round-off error which
approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
huge error and assures that the estimated derivative are reasonably close
to the true values (i.e. the finite difference approximation is at least
qualitatively reflects the structure of the true Jacobian).
Parameters
----------
fun : callable
Right-hand side of the system implemented in a vectorized fashion.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Value of the right hand side at (t, y).
threshold : float
Threshold for `y` value used for computing the step size as
``factor * np.maximum(np.abs(y), threshold)``. Typically the value of
absolute tolerance (atol) for a solver should be passed as `threshold`.
factor : ndarray with shape (n,) or None
Factor to use for computing the step size. Pass None for the very
evaluation, then use the value returned from this function.
sparsity : tuple (structure, groups) or None
Sparsity structure of the Jacobian, `structure` must be csc_matrix.
Returns
-------
J : ndarray or csc_matrix, shape (n, n)
Jacobian matrix.
factor : ndarray, shape (n,)
Suggested `factor` for the next evaluation.
"""
y = np.asarray(y)
n = y.shape[0]
if n == 0:
return np.empty((0, 0)), factor
if factor is None:
factor = np.ones(n) * EPS ** 0.5
else:
factor = factor.copy()
# Direct the step as ODE dictates, hoping that such a step won't lead to
# a problematic region. For complex ODEs it makes sense to use the real
# part of f as we use steps along real axis.
f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
y_scale = f_sign * np.maximum(threshold, np.abs(y))
h = (y + factor * y_scale) - y
# Make sure that the step is not 0 to start with. Not likely it will be
# executed often.
for i in np.nonzero(h == 0)[0]:
while h[i] == 0:
factor[i] *= 10
h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
if sparsity is None:
return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
else:
structure, groups = sparsity
return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
structure, groups)
def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
n = y.shape[0]
h_vecs = np.diag(h)
f_new = fun(t, y[:, None] + h_vecs)
diff = f_new - f[:, None]
max_ind = np.argmax(np.abs(diff), axis=0)
r = np.arange(n)
max_diff = np.abs(diff[max_ind, r])
scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
if np.any(diff_too_small):
ind, = np.nonzero(diff_too_small)
new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
h_vecs[ind, ind] = h_new
f_new = fun(t, y[:, None] + h_vecs[:, ind])
diff_new = f_new - f[:, None]
max_ind = np.argmax(np.abs(diff_new), axis=0)
r = np.arange(ind.shape[0])
max_diff_new = np.abs(diff_new[max_ind, r])
scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
if np.any(update):
update, = np.where(update)
update_ind = ind[update]
factor[update_ind] = new_factor[update]
h[update_ind] = h_new[update]
diff[:, update_ind] = diff_new[:, update]
scale[update_ind] = scale_new[update]
max_diff[update_ind] = max_diff_new[update]
diff /= h
factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
return diff, factor
def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
n = y.shape[0]
n_groups = np.max(groups) + 1
h_vecs = np.empty((n_groups, n))
for group in range(n_groups):
e = np.equal(group, groups)
h_vecs[group] = h * e
h_vecs = h_vecs.T
f_new = fun(t, y[:, None] + h_vecs)
df = f_new - f[:, None]
i, j, _ = find(structure)
diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
r = np.arange(n)
max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
scale = np.maximum(np.abs(f[max_ind]),
np.abs(f_new[max_ind, groups[r]]))
diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
if np.any(diff_too_small):
ind, = np.nonzero(diff_too_small)
new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
h_new_all = np.zeros(n)
h_new_all[ind] = h_new
groups_unique = np.unique(groups[ind])
groups_map = np.empty(n_groups, dtype=int)
h_vecs = np.empty((groups_unique.shape[0], n))
for k, group in enumerate(groups_unique):
e = np.equal(group, groups)
h_vecs[k] = h_new_all * e
groups_map[group] = k
h_vecs = h_vecs.T
f_new = fun(t, y[:, None] + h_vecs)
df = f_new - f[:, None]
i, j, _ = find(structure[:, ind])
diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
(i, j)), shape=(n, ind.shape[0])).tocsc()
max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
r = np.arange(ind.shape[0])
max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
scale_new = np.maximum(
np.abs(f[max_ind_new]),
np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
if np.any(update):
update, = np.where(update)
update_ind = ind[update]
factor[update_ind] = new_factor[update]
h[update_ind] = h_new[update]
diff[:, update_ind] = diff_new[:, update]
scale[update_ind] = scale_new[update]
max_diff[update_ind] = max_diff_new[update]
diff.data /= np.repeat(h, np.diff(diff.indptr))
factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
return diff, factor
| 14,332 | 32.884161 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/integrate/_ivp/__init__.py
|
"""Suite of ODE solvers implemented in Python."""
from __future__ import division, print_function, absolute_import
from .ivp import solve_ivp
from .rk import RK23, RK45
from .radau import Radau
from .bdf import BDF
from .lsoda import LSODA
from .common import OdeSolution
from .base import DenseOutput, OdeSolver
| 314 | 27.636364 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/dia.py
|
"""Sparse DIAgonal format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['dia_matrix', 'isspmatrix_dia']
import numpy as np
from .base import isspmatrix, _formats, spmatrix
from .data import _data_matrix
from .sputils import (isshape, upcast_char, getdtype, get_index_dtype,
get_sum_dtype, validateaxis, check_shape)
from ._sparsetools import dia_matvec
class dia_matrix(_data_matrix):
"""Sparse matrix with DIAgonal storage
This can be instantiated in several ways:
dia_matrix(D)
with a dense matrix
dia_matrix(S)
with another sparse matrix S (equivalent to S.todia())
dia_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N),
dtype is optional, defaulting to dtype='d'.
dia_matrix((data, offsets), shape=(M, N))
where the ``data[k,:]`` stores the diagonal entries for
diagonal ``offsets[k]`` (See example below)
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
DIA format data array of the matrix
offsets
DIA format offset array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dia_matrix
>>> dia_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
>>> offsets = np.array([0, -1, 2])
>>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
format = 'dia'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix_dia(arg1):
if copy:
arg1 = arg1.copy()
self.data = arg1.data
self.offsets = arg1.offsets
self._shape = check_shape(arg1.shape)
elif isspmatrix(arg1):
if isspmatrix_dia(arg1) and copy:
A = arg1.copy()
else:
A = arg1.todia()
self.data = A.data
self.offsets = A.offsets
self._shape = check_shape(A.shape)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self._shape = check_shape(arg1)
self.data = np.zeros((0,0), getdtype(dtype, default=float))
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.offsets = np.zeros((0), dtype=idx_dtype)
else:
try:
# Try interpreting it as (data, offsets)
data, offsets = arg1
except:
raise ValueError('unrecognized form for dia_matrix constructor')
else:
if shape is None:
raise ValueError('expected a shape argument')
self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy))
self.offsets = np.atleast_1d(np.array(arg1[1],
dtype=get_index_dtype(maxval=max(shape)),
copy=copy))
self._shape = check_shape(shape)
else:
#must be dense, convert to COO first, then to DIA
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
A = coo_matrix(arg1, dtype=dtype, shape=shape).todia()
self.data = A.data
self.offsets = A.offsets
self._shape = check_shape(A.shape)
if dtype is not None:
self.data = self.data.astype(dtype)
#check format
if self.offsets.ndim != 1:
raise ValueError('offsets array must have rank 1')
if self.data.ndim != 2:
raise ValueError('data array must have rank 2')
if self.data.shape[0] != len(self.offsets):
raise ValueError('number of diagonals (%d) '
'does not match the number of offsets (%d)'
% (self.data.shape[0], len(self.offsets)))
if len(np.unique(self.offsets)) != len(self.offsets):
raise ValueError('offset array contains duplicate values')
def __repr__(self):
format = _formats[self.getformat()][1]
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements (%d diagonals) in %s format>" % \
(self.shape + (self.dtype.type, self.nnz, self.data.shape[0],
format))
def _data_mask(self):
"""Returns a mask of the same shape as self.data, where
mask[i,j] is True when data[i,j] corresponds to a stored element."""
num_rows, num_cols = self.shape
offset_inds = np.arange(self.data.shape[1])
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
return mask
def count_nonzero(self):
mask = self._data_mask()
return np.count_nonzero(self.data[mask])
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for DIA format")
M,N = self.shape
nnz = 0
for k in self.offsets:
if k > 0:
nnz += min(M,N-k)
else:
nnz += min(M+k,N)
return int(nnz)
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def sum(self, axis=None, dtype=None, out=None):
validateaxis(axis)
if axis is not None and axis < 0:
axis += 2
res_dtype = get_sum_dtype(self.dtype)
num_rows, num_cols = self.shape
ret = None
if axis == 0:
mask = self._data_mask()
x = (self.data * mask).sum(axis=0)
if x.shape[0] == num_cols:
res = x
else:
res = np.zeros(num_cols, dtype=x.dtype)
res[:x.shape[0]] = x
ret = np.matrix(res, dtype=res_dtype)
else:
row_sums = np.zeros(num_rows, dtype=res_dtype)
one = np.ones(num_cols, dtype=res_dtype)
dia_matvec(num_rows, num_cols, len(self.offsets),
self.data.shape[1], self.offsets, self.data, one, row_sums)
row_sums = np.matrix(row_sums)
if axis is None:
return row_sums.sum(dtype=dtype, out=out)
if axis is not None:
row_sums = row_sums.T
ret = np.matrix(row_sums.sum(axis=axis))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _mul_vector(self, other):
x = other
y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
x.dtype.char))
L = self.data.shape[1]
M,N = self.shape
dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel())
return y
def _mul_multimatrix(self, other):
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
def _setdiag(self, values, k=0):
M, N = self.shape
if values.ndim == 0:
# broadcast
values_n = np.inf
else:
values_n = len(values)
if k < 0:
n = min(M + k, N, values_n)
min_index = 0
max_index = n
else:
n = min(M, N - k, values_n)
min_index = k
max_index = k + n
if values.ndim != 0:
# allow also longer sequences
values = values[:n]
if k in self.offsets:
self.data[self.offsets == k, min_index:max_index] = values
else:
self.offsets = np.append(self.offsets, self.offsets.dtype.type(k))
m = max(max_index, self.data.shape[1])
data = np.zeros((self.data.shape[0]+1, m), dtype=self.data.dtype)
data[:-1,:self.data.shape[1]] = self.data
data[-1, min_index:max_index] = values
self.data = data
def todia(self, copy=False):
if copy:
return self.copy()
else:
return self
todia.__doc__ = spmatrix.todia.__doc__
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
num_rows, num_cols = self.shape
max_dim = max(self.shape)
# flip diagonal offsets
offsets = -self.offsets
# re-align the data matrix
r = np.arange(len(offsets), dtype=np.intc)[:, None]
c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None]
pad_amount = max(0, max_dim-self.data.shape[1])
data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount),
dtype=self.data.dtype)))
data = data[r, c]
return dia_matrix((data, offsets), shape=(
num_cols, num_rows), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
idx, = np.where(self.offsets == k)
first_col, last_col = max(0, k), min(rows + k, cols)
if idx.size == 0:
return np.zeros(last_col - first_col, dtype=self.data.dtype)
return self.data[idx[0], first_col:last_col]
diagonal.__doc__ = spmatrix.diagonal.__doc__
def tocsc(self, copy=False):
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
idx_dtype = get_index_dtype(maxval=max(self.shape))
indptr = np.zeros(num_cols + 1, dtype=idx_dtype)
indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0))
indptr[offset_len+1:] = indptr[offset_len]
indices = row.T[mask.T].astype(idx_dtype, copy=False)
data = self.data.T[mask.T]
return csc_matrix((data, indices, indptr), shape=self.shape,
dtype=self.dtype)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=False):
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
row = row[mask]
col = np.tile(offset_inds, num_offsets)[mask.ravel()]
data = self.data[mask]
from .coo import coo_matrix
A = coo_matrix((data,(row,col)), shape=self.shape, dtype=self.dtype)
A.has_canonical_format = True
return A
tocoo.__doc__ = spmatrix.tocoo.__doc__
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays are copied.
"""
if copy:
return dia_matrix((data, self.offsets.copy()), shape=self.shape)
else:
return dia_matrix((data,self.offsets), shape=self.shape)
def resize(self, *shape):
shape = check_shape(shape)
M, N = shape
# we do not need to handle the case of expanding N
self.data = self.data[:, :N]
if (M > self.shape[0] and
np.any(self.offsets + self.shape[0] < self.data.shape[1])):
# explicitly clear values that were previously hidden
mask = (self.offsets[:, None] + self.shape[0] <=
np.arange(self.data.shape[1]))
self.data[mask] = 0
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
def isspmatrix_dia(x):
"""Is x of dia_matrix type?
Parameters
----------
x
object to check for being a dia matrix
Returns
-------
bool
True if x is a dia matrix, False otherwise
Examples
--------
>>> from scipy.sparse import dia_matrix, isspmatrix_dia
>>> isspmatrix_dia(dia_matrix([[5]]))
True
>>> from scipy.sparse import dia_matrix, csr_matrix, isspmatrix_dia
>>> isspmatrix_dia(csr_matrix([[5]]))
False
"""
return isinstance(x, dia_matrix)
| 13,942 | 32.118765 | 99 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/base.py
|
"""Base class for sparse matrices"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy._lib.six import xrange
from scipy._lib._numpy_compat import broadcast_to
from .sputils import (isdense, isscalarlike, isintlike,
get_sum_dtype, validateaxis, check_reshape_kwargs,
check_shape)
__all__ = ['spmatrix', 'isspmatrix', 'issparse',
'SparseWarning', 'SparseEfficiencyWarning']
class SparseWarning(Warning):
pass
class SparseFormatWarning(SparseWarning):
pass
class SparseEfficiencyWarning(SparseWarning):
pass
# The formats that we might potentially understand.
_formats = {'csc': [0, "Compressed Sparse Column"],
'csr': [1, "Compressed Sparse Row"],
'dok': [2, "Dictionary Of Keys"],
'lil': [3, "LInked List"],
'dod': [4, "Dictionary of Dictionaries"],
'sss': [5, "Symmetric Sparse Skyline"],
'coo': [6, "COOrdinate"],
'lba': [7, "Linpack BAnded"],
'egd': [8, "Ellpack-itpack Generalized Diagonal"],
'dia': [9, "DIAgonal"],
'bsr': [10, "Block Sparse Row"],
'msr': [11, "Modified compressed Sparse Row"],
'bsc': [12, "Block Sparse Column"],
'msc': [13, "Modified compressed Sparse Column"],
'ssk': [14, "Symmetric SKyline"],
'nsk': [15, "Nonsymmetric SKyline"],
'jad': [16, "JAgged Diagonal"],
'uss': [17, "Unsymmetric Sparse Skyline"],
'vbr': [18, "Variable Block Row"],
'und': [19, "Undefined"]
}
# These univariate ufuncs preserve zeros.
_ufuncs_with_fixed_point_at_zero = frozenset([
np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh,
np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad,
np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt])
MAXPRINT = 50
class spmatrix(object):
""" This class provides a base class for all sparse matrices. It
cannot be instantiated. Most of the work is provided by subclasses.
"""
__array_priority__ = 10.1
ndim = 2
def __init__(self, maxprint=MAXPRINT):
self._shape = None
if self.__class__.__name__ == 'spmatrix':
raise ValueError("This class is not intended"
" to be instantiated directly.")
self.maxprint = maxprint
def set_shape(self, shape):
"""See `reshape`."""
# Make sure copy is False since this is in place
# Make sure format is unchanged because we are doing a __dict__ swap
new_matrix = self.reshape(shape, copy=False).asformat(self.format)
self.__dict__ = new_matrix.__dict__
def get_shape(self):
"""Get shape of a matrix."""
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def reshape(self, *args, **kwargs):
"""reshape(self, shape, order='C', copy=False)
Gives a new shape to a sparse matrix without changing its data.
Parameters
----------
shape : length-2 tuple of ints
The new shape should be compatible with the original shape.
order : {'C', 'F'}, optional
Read the elements using this index order. 'C' means to read and
write the elements using C-like index order; e.g. read entire first
row, then second row, etc. 'F' means to read and write the elements
using Fortran-like index order; e.g. read entire first column, then
second column, etc.
copy : bool, optional
Indicates whether or not attributes of self should be copied
whenever possible. The degree to which attributes are copied varies
depending on the type of sparse matrix being used.
Returns
-------
reshaped_matrix : sparse matrix
A sparse matrix with the given `shape`, not necessarily of the same
format as the current object.
See Also
--------
np.matrix.reshape : NumPy's implementation of 'reshape' for matrices
"""
# If the shape already matches, don't bother doing an actual reshape
# Otherwise, the default is to convert to COO and use its reshape
shape = check_shape(args, self.shape)
order, copy = check_reshape_kwargs(kwargs)
if shape == self.shape:
if copy:
return self.copy()
else:
return self
return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)
def resize(self, shape):
"""Resize the matrix in-place to dimensions given by ``shape``
Any elements that lie within the new shape will remain at the same
indices, while non-zero elements lying outside the new shape are
removed.
Parameters
----------
shape : (int, int)
number of rows and columns in the new matrix
Notes
-----
The semantics are not identical to `numpy.ndarray.resize` or
`numpy.resize`. Here, the same data will be maintained at each index
before and after reshape, if that index is within the new bounds. In
numpy, resizing maintains contiguity of the array, moving elements
around in the logical matrix but not within a flattened representation.
We give no guarantees about whether the underlying data attributes
(arrays, etc.) will be modified in place or replaced with new objects.
"""
# As an inplace operation, this requires implementation in each format.
raise NotImplementedError(
'{}.resize is not implemented'.format(type(self).__name__))
def astype(self, dtype, casting='unsafe', copy=True):
"""Cast the matrix elements to a specified type.
Parameters
----------
dtype : string or numpy dtype
Typecode or data-type to which to cast the data.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
Defaults to 'unsafe' for backwards compatibility.
'no' means the data types should not be cast at all.
'equiv' means only byte-order changes are allowed.
'safe' means only casts which can preserve values are allowed.
'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
'unsafe' means any data conversions may be done.
copy : bool, optional
If `copy` is `False`, the result might share some memory with this
matrix. If `copy` is `True`, it is guaranteed that the result and
this matrix do not share any memory.
"""
dtype = np.dtype(dtype)
if self.dtype != dtype:
return self.tocsr().astype(
dtype, casting=casting, copy=copy).asformat(self.format)
elif copy:
return self.copy()
else:
return self
def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
fp_types = ['f', 'd', 'F', 'D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name)
def __iter__(self):
for r in xrange(self.shape[0]):
yield self[r, :]
def getmaxprint(self):
"""Maximum number of elements to display when printed."""
return self.maxprint
def count_nonzero(self):
"""Number of non-zero entries, equivalent to
np.count_nonzero(a.toarray())
Unlike getnnz() and the nnz property, which return the number of stored
entries (the length of the data attribute), this method counts the
actual number of non-zero entries in data.
"""
raise NotImplementedError("count_nonzero not implemented for %s." %
self.__class__.__name__)
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros.
Parameters
----------
axis : None, 0, or 1
Select between the number of values across the whole matrix, in
each column, or in each row.
See also
--------
count_nonzero : Number of non-zero entries
"""
raise NotImplementedError("getnnz not implemented for %s." %
self.__class__.__name__)
@property
def nnz(self):
"""Number of stored values, including explicit zeros.
See also
--------
count_nonzero : Number of non-zero entries
"""
return self.getnnz()
def getformat(self):
"""Format of a matrix representation as a string."""
return getattr(self, 'format', 'und')
def __repr__(self):
_, format_name = _formats[self.getformat()]
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements in %s format>" % \
(self.shape + (self.dtype.type, self.nnz, format_name))
def __str__(self):
maxprint = self.getmaxprint()
A = self.tocoo()
# helper function, outputs "(i,j) v"
def tostr(row, col, data):
triples = zip(list(zip(row, col)), data)
return '\n'.join([(' %s\t%s' % t) for t in triples])
if self.nnz > maxprint:
half = maxprint // 2
out = tostr(A.row[:half], A.col[:half], A.data[:half])
out += "\n :\t:\n"
half = maxprint - maxprint//2
out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
else:
out = tostr(A.row, A.col, A.data)
return out
def __bool__(self): # Simple -- other ideas?
if self.shape == (1, 1):
return self.nnz != 0
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all().")
__nonzero__ = __bool__
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? But for some uses the number of
# non-zeros is more important. For now, raise an exception!
def __len__(self):
raise TypeError("sparse matrix length is ambiguous; use getnnz()"
" or shape[0]")
def asformat(self, format, copy=False):
"""Return this matrix in the passed sparse format.
Parameters
----------
format : {str, None}
The desired sparse matrix format ("csr", "csc", "lil", "dok", ...)
or None for no conversion.
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : This matrix in the passed sparse format.
"""
if format is None or format == self.format:
if copy:
return self.copy()
else:
return self
else:
try:
convert_method = getattr(self, 'to' + format)
except AttributeError:
raise ValueError('Format {} is unknown.'.format(format))
else:
return convert_method(copy=copy)
###################################################################
# NOTE: All arithmetic operations use csr_matrix by default.
# Therefore a new sparse matrix format just needs to define a
# .tocsr() method to provide arithmetic support. Any of these
# methods can be overridden for efficiency.
####################################################################
def multiply(self, other):
"""Point-wise multiplication by another matrix
"""
return self.tocsr().multiply(other)
def maximum(self, other):
"""Element-wise maximum between this and another matrix."""
return self.tocsr().maximum(other)
def minimum(self, other):
"""Element-wise minimum between this and another matrix."""
return self.tocsr().minimum(other)
def dot(self, other):
"""Ordinary dot product
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
"""
return self * other
def power(self, n, dtype=None):
"""Element-wise power."""
return self.tocsr().power(n, dtype=dtype)
def __eq__(self, other):
return self.tocsr().__eq__(other)
def __ne__(self, other):
return self.tocsr().__ne__(other)
def __lt__(self, other):
return self.tocsr().__lt__(other)
def __gt__(self, other):
return self.tocsr().__gt__(other)
def __le__(self, other):
return self.tocsr().__le__(other)
def __ge__(self, other):
return self.tocsr().__ge__(other)
def __abs__(self):
return abs(self.tocsr())
def _add_sparse(self, other):
return self.tocsr()._add_sparse(other)
def _add_dense(self, other):
return self.tocoo()._add_dense(other)
def _sub_sparse(self, other):
return self.tocsr()._sub_sparse(other)
def _sub_dense(self, other):
return self.todense() - other
def _rsub_dense(self, other):
# note: this can't be replaced by other + (-self) for unsigned types
return other - self.todense()
def __add__(self, other): # self + other
if isscalarlike(other):
if other == 0:
return self.copy()
# Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._add_sparse(other)
elif isdense(other):
other = broadcast_to(other, self.shape)
return self._add_dense(other)
else:
return NotImplemented
def __radd__(self,other): # other + self
return self.__add__(other)
def __sub__(self, other): # self - other
if isscalarlike(other):
if other == 0:
return self.copy()
raise NotImplementedError('subtracting a nonzero scalar from a '
'sparse matrix is not supported')
elif isspmatrix(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._sub_sparse(other)
elif isdense(other):
other = broadcast_to(other, self.shape)
return self._sub_dense(other)
else:
return NotImplemented
def __rsub__(self,other): # other - self
if isscalarlike(other):
if other == 0:
return -self.copy()
raise NotImplementedError('subtracting a sparse matrix from a '
'nonzero scalar is not supported')
elif isdense(other):
other = broadcast_to(other, self.shape)
return self._rsub_dense(other)
else:
return NotImplemented
def __mul__(self, other):
"""interpret other and call one of the following
self._mul_scalar()
self._mul_vector()
self._mul_multivector()
self._mul_sparse_matrix()
"""
M, N = self.shape
if other.__class__ is np.ndarray:
# Fast path for the most common case
if other.shape == (N,):
return self._mul_vector(other)
elif other.shape == (N, 1):
return self._mul_vector(other.ravel()).reshape(M, 1)
elif other.ndim == 2 and other.shape[0] == N:
return self._mul_multivector(other)
if isscalarlike(other):
# scalar value
return self._mul_scalar(other)
if issparse(other):
if self.shape[1] != other.shape[0]:
raise ValueError('dimension mismatch')
return self._mul_sparse_matrix(other)
# If it's a list or whatever, treat it like a matrix
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# Not interpretable as an array; return NotImplemented so that
# other's __rmul__ can kick in if that's implemented.
return NotImplemented
try:
other.shape
except AttributeError:
other = other_a
if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
# dense row or column vector
if other.shape != (N,) and other.shape != (N, 1):
raise ValueError('dimension mismatch')
result = self._mul_vector(np.ravel(other))
if isinstance(other, np.matrix):
result = np.asmatrix(result)
if other.ndim == 2 and other.shape[1] == 1:
# If 'other' was an (nx1) column vector, reshape the result
result = result.reshape(-1, 1)
return result
elif other.ndim == 2:
##
# dense 2D array or matrix ("multivector")
if other.shape[0] != self.shape[1]:
raise ValueError('dimension mismatch')
result = self._mul_multivector(np.asarray(other))
if isinstance(other, np.matrix):
result = np.asmatrix(result)
return result
else:
raise ValueError('could not interpret dimensions')
# by default, use CSR for __mul__ handlers
def _mul_scalar(self, other):
return self.tocsr()._mul_scalar(other)
def _mul_vector(self, other):
return self.tocsr()._mul_vector(other)
def _mul_multivector(self, other):
return self.tocsr()._mul_multivector(other)
def _mul_sparse_matrix(self, other):
return self.tocsr()._mul_sparse_matrix(other)
def __rmul__(self, other): # other * self
if isscalarlike(other):
return self.__mul__(other)
else:
# Don't use asarray unless we have to
try:
tr = other.transpose()
except AttributeError:
tr = np.asarray(other).transpose()
return (self.transpose() * tr).transpose()
#####################################
# matmul (@) operator (Python 3.5+) #
#####################################
def __matmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__rmul__(other)
####################
# Other Arithmetic #
####################
def _divide(self, other, true_divide=False, rdivide=False):
if isscalarlike(other):
if rdivide:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
if true_divide and np.can_cast(self.dtype, np.float_):
return self.astype(np.float_)._mul_scalar(1./other)
else:
r = self._mul_scalar(1./other)
scalar_dtype = np.asarray(other).dtype
if (np.issubdtype(self.dtype, np.integer) and
np.issubdtype(scalar_dtype, np.integer)):
return r.astype(self.dtype)
else:
return r
elif isdense(other):
if not rdivide:
if true_divide:
return np.true_divide(self.todense(), other)
else:
return np.divide(self.todense(), other)
else:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
elif isspmatrix(other):
if rdivide:
return other._divide(self, true_divide, rdivide=False)
self_csr = self.tocsr()
if true_divide and np.can_cast(self.dtype, np.float_):
return self_csr.astype(np.float_)._divide_sparse(other)
else:
return self_csr._divide_sparse(other)
else:
return NotImplemented
def __truediv__(self, other):
return self._divide(other, true_divide=True)
def __div__(self, other):
# Always do true division
return self._divide(other, true_divide=True)
def __rtruediv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __rdiv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __neg__(self):
return -self.tocsr()
def __iadd__(self, other):
return NotImplemented
def __isub__(self, other):
return NotImplemented
def __imul__(self, other):
return NotImplemented
def __idiv__(self, other):
return self.__itruediv__(other)
def __itruediv__(self, other):
return NotImplemented
def __pow__(self, other):
if self.shape[0] != self.shape[1]:
raise TypeError('matrix is not square')
if isintlike(other):
other = int(other)
if other < 0:
raise ValueError('exponent must be >= 0')
if other == 0:
from .construct import eye
return eye(self.shape[0], dtype=self.dtype)
elif other == 1:
return self.copy()
else:
tmp = self.__pow__(other//2)
if (other % 2):
return self * tmp * tmp
else:
return tmp * tmp
elif isscalarlike(other):
raise ValueError('exponent must be an integer')
else:
return NotImplemented
def __getattr__(self, attr):
if attr == 'A':
return self.toarray()
elif attr == 'T':
return self.transpose()
elif attr == 'H':
return self.getH()
elif attr == 'real':
return self._real()
elif attr == 'imag':
return self._imag()
elif attr == 'size':
return self.getnnz()
else:
raise AttributeError(attr + " not found")
def transpose(self, axes=None, copy=False):
"""
Reverses the dimensions of the sparse matrix.
Parameters
----------
axes : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value.
copy : bool, optional
Indicates whether or not attributes of `self` should be
copied whenever possible. The degree to which attributes
are copied varies depending on the type of sparse matrix
being used.
Returns
-------
p : `self` with the dimensions reversed.
See Also
--------
np.matrix.transpose : NumPy's implementation of 'transpose'
for matrices
"""
return self.tocsr(copy=copy).transpose(axes=axes, copy=False)
def conj(self, copy=True):
"""Element-wise complex conjugation.
If the matrix is of non-complex data type and `copy` is False,
this method does nothing and the data is not copied.
Parameters
----------
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : The element-wise complex conjugate.
"""
if np.issubdtype(self.dtype, np.complexfloating):
return self.tocsr(copy=copy).conj(copy=False)
elif copy:
return self.copy()
else:
return self
def conjugate(self, copy=True):
return self.conj(copy=copy)
conjugate.__doc__ = conj.__doc__
# Renamed conjtranspose() -> getH() for compatibility with dense matrices
def getH(self):
"""Return the Hermitian transpose of this matrix.
See Also
--------
np.matrix.getH : NumPy's implementation of `getH` for matrices
"""
return self.transpose().conj()
def _real(self):
return self.tocsr()._real()
def _imag(self):
return self.tocsr()._imag()
def nonzero(self):
"""nonzero indices
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the matrix.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return (A.row[nz_mask], A.col[nz_mask])
def getcol(self, j):
"""Returns a copy of column j of the matrix, as an (m x 1) sparse
matrix (column vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Post-multiply by a (n x 1) column vector 'a' containing all zeros
# except for a_j = 1
from .csc import csc_matrix
n = self.shape[1]
if j < 0:
j += n
if j < 0 or j >= n:
raise IndexError("index out of bounds")
col_selector = csc_matrix(([1], [[j], [0]]),
shape=(n, 1), dtype=self.dtype)
return self * col_selector
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n) sparse
matrix (row vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Pre-multiply by a (1 x m) row vector 'a' containing all zeros
# except for a_i = 1
from .csr import csr_matrix
m = self.shape[0]
if i < 0:
i += m
if i < 0 or i >= m:
raise IndexError("index out of bounds")
row_selector = csr_matrix(([1], [[0], [i]]),
shape=(1, m), dtype=self.dtype)
return row_selector * self
# def __array__(self):
# return self.toarray()
def todense(self, order=None, out=None):
"""
Return a dense matrix representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-dimensional, optional
If specified, uses this array (or `numpy.matrix`) as the
output buffer instead of allocating a new array to
return. The provided array must have the same shape and
dtype as the sparse matrix on which you are calling the
method.
Returns
-------
arr : numpy.matrix, 2-dimensional
A NumPy matrix object with the same shape and containing
the same data represented by the sparse matrix, with the
requested memory order. If `out` was passed and was an
array (rather than a `numpy.matrix`), it will be filled
with the appropriate values and returned wrapped in a
`numpy.matrix` object that shares the same memory.
"""
return np.asmatrix(self.toarray(order=order, out=out))
def toarray(self, order=None, out=None):
"""
Return a dense ndarray representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-dimensional, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The provided
array must have the same shape and dtype as the sparse
matrix on which you are calling the method. For most
sparse types, `out` is required to be memory contiguous
(either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-dimensional
An array with the same shape and containing the same
data represented by the sparse matrix, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self.tocoo(copy=False).toarray(order=order, out=out)
# Any sparse matrix format deriving from spmatrix must define one of
# tocsr or tocoo. The other conversion methods may be implemented for
# efficiency, but are not required.
def tocsr(self, copy=False):
"""Convert this matrix to Compressed Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csr_matrix.
"""
return self.tocoo(copy=copy).tocsr(copy=False)
def todok(self, copy=False):
"""Convert this matrix to Dictionary Of Keys format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dok_matrix.
"""
return self.tocoo(copy=copy).todok(copy=False)
def tocoo(self, copy=False):
"""Convert this matrix to COOrdinate format.
With copy=False, the data/indices may be shared between this matrix and
the resultant coo_matrix.
"""
return self.tocsr(copy=False).tocoo(copy=copy)
def tolil(self, copy=False):
"""Convert this matrix to LInked List format.
With copy=False, the data/indices may be shared between this matrix and
the resultant lil_matrix.
"""
return self.tocsr(copy=False).tolil(copy=copy)
def todia(self, copy=False):
"""Convert this matrix to sparse DIAgonal format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dia_matrix.
"""
return self.tocoo(copy=copy).todia(copy=False)
def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix to Block Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant bsr_matrix.
When blocksize=(R, C) is provided, it will be used for construction of
the bsr_matrix.
"""
return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
def tocsc(self, copy=False):
"""Convert this matrix to Compressed Sparse Column format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csc_matrix.
"""
return self.tocsr(copy=copy).tocsc(copy=False)
def copy(self):
"""Returns a copy of this matrix.
No data/indices will be shared between the returned value and current
matrix.
"""
return self.__class__(self, copy=True)
def sum(self, axis=None, dtype=None, out=None):
"""
Sum the matrix elements over a given axis.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the sum of all the matrix elements, returning a scalar
(i.e. `axis` = `None`).
dtype : dtype, optional
The type of the returned matrix and of the accumulator in which
the elements are summed. The dtype of `a` is used by default
unless `a` has an integer dtype of less precision than the default
platform integer. In that case, if `a` is signed then the platform
integer is used while if `a` is unsigned then an unsigned integer
of the same precision as the platform integer is used.
.. versionadded:: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded:: 0.18.0
Returns
-------
sum_along_axis : np.matrix
A matrix with the same shape as `self`, with the specified
axis removed.
See Also
--------
np.matrix.sum : NumPy's implementation of 'sum' for matrices
"""
validateaxis(axis)
# We use multiplication by a matrix of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
# Mimic numpy's casting.
res_dtype = get_sum_dtype(self.dtype)
if axis is None:
# sum over rows and columns
return (self * np.asmatrix(np.ones(
(n, 1), dtype=res_dtype))).sum(
dtype=dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
# sum over columns
ret = np.asmatrix(np.ones(
(1, m), dtype=res_dtype)) * self
else:
# sum over rows
ret = self * np.asmatrix(
np.ones((n, 1), dtype=res_dtype))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out)
def mean(self, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the matrix elements. The average is taken
over all elements in the matrix by default, otherwise over the
specified axis. `float64` intermediate and return values are used
for integer inputs.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the mean is computed. The default is to compute
the mean of all elements in the matrix (i.e. `axis` = `None`).
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
.. versionadded:: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded:: 0.18.0
Returns
-------
m : np.matrix
See Also
--------
np.matrix.mean : NumPy's implementation of 'mean' for matrices
"""
def _is_integral(dtype):
return (np.issubdtype(dtype, np.integer) or
np.issubdtype(dtype, np.bool_))
validateaxis(axis)
res_dtype = self.dtype.type
integral = _is_integral(self.dtype)
# output dtype
if dtype is None:
if integral:
res_dtype = np.float64
else:
res_dtype = np.dtype(dtype).type
# intermediate dtype for summation
inter_dtype = np.float64 if integral else res_dtype
inter_self = self.astype(inter_dtype)
if axis is None:
return (inter_self / np.array(
self.shape[0] * self.shape[1]))\
.sum(dtype=res_dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
return (inter_self * (1.0 / self.shape[0])).sum(
axis=0, dtype=res_dtype, out=out)
else:
return (inter_self * (1.0 / self.shape[1])).sum(
axis=1, dtype=res_dtype, out=out)
def diagonal(self, k=0):
"""Returns the k-th diagonal of the matrix.
Parameters
----------
k : int, optional
Which diagonal to set, corresponding to elements a[i, i+k].
Default: 0 (the main diagonal).
.. versionadded:: 1.0
See also
--------
numpy.diagonal : Equivalent numpy function.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> A.diagonal()
array([1, 0, 5])
>>> A.diagonal(k=1)
array([2, 3])
"""
return self.tocsr().diagonal(k=k)
def setdiag(self, values, k=0):
"""
Set diagonal or off-diagonal elements of the array.
Parameters
----------
values : array_like
New values of the diagonal elements.
Values may have any length. If the diagonal is longer than values,
then the remaining diagonal entries will not be set. If values if
longer than the diagonal, then the remaining values are ignored.
If a scalar value is given, all of the diagonal is set to it.
k : int, optional
Which off-diagonal to set, corresponding to elements a[i,i+k].
Default: 0 (the main diagonal).
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
raise ValueError("k exceeds matrix dimensions")
self._setdiag(np.asarray(values), k)
def _setdiag(self, values, k):
M, N = self.shape
if k < 0:
if values.ndim == 0:
# broadcast
max_index = min(M+k, N)
for i in xrange(max_index):
self[i - k, i] = values
else:
max_index = min(M+k, N, len(values))
if max_index <= 0:
return
for i, v in enumerate(values[:max_index]):
self[i - k, i] = v
else:
if values.ndim == 0:
# broadcast
max_index = min(M, N-k)
for i in xrange(max_index):
self[i, i + k] = values
else:
max_index = min(M, N-k, len(values))
if max_index <= 0:
return
for i, v in enumerate(values[:max_index]):
self[i, i + k] = v
def _process_toarray_args(self, order, out):
if out is not None:
if order is not None:
raise ValueError('order cannot be specified if out '
'is not None')
if out.shape != self.shape or out.dtype != self.dtype:
raise ValueError('out array must be same dtype and shape as '
'sparse matrix')
out[...] = 0.
return out
else:
return np.zeros(self.shape, dtype=self.dtype, order=order)
def isspmatrix(x):
"""Is x of a sparse matrix type?
Parameters
----------
x
object to check for being a sparse matrix
Returns
-------
bool
True if x is a sparse matrix, False otherwise
Notes
-----
issparse and isspmatrix are aliases for the same function.
Examples
--------
>>> from scipy.sparse import csr_matrix, isspmatrix
>>> isspmatrix(csr_matrix([[5]]))
True
>>> from scipy.sparse import isspmatrix
>>> isspmatrix(5)
False
"""
return isinstance(x, spmatrix)
issparse = isspmatrix
| 41,182 | 32.811987 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/sparsetools.py
|
"""
sparsetools is not a public module in scipy.sparse, but this file is
for backward compatibility if someone happens to use it.
"""
from numpy import deprecate
# This file shouldn't be imported by scipy --- Scipy code should use
# internally scipy.sparse._sparsetools
@deprecate(old_name="scipy.sparse.sparsetools",
message=("scipy.sparse.sparsetools is a private module for scipy.sparse, "
"and should not be used."))
def _deprecated():
pass
del deprecate
try:
_deprecated()
except DeprecationWarning as e:
# don't fail import if DeprecationWarnings raise error -- works around
# the situation with Numpy's test framework
pass
from ._sparsetools import *
| 716 | 24.607143 | 85 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csr.py
|
"""Compressed Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csr_matrix', 'isspmatrix_csr']
import numpy as np
from scipy._lib.six import xrange
from .base import spmatrix
from ._sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \
get_csr_submatrix, csr_sample_values
from .sputils import (upcast, isintlike, IndexMixin, issequence,
get_index_dtype, ismatrix)
from .compressed import _cs_matrix
class csr_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Row matrix
This can be instantiated in several ways:
csr_matrix(D)
with a dense matrix or rank-2 ndarray D
csr_matrix(S)
with another sparse matrix S (equivalent to S.tocsr())
csr_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
If the shape parameter is not supplied, the matrix dimensions
are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
CSR format data array of the matrix
indices
CSR format index array of the matrix
indptr
CSR format index pointer array of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSR format
- efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
- efficient row slicing
- fast matrix vector products
Disadvantages of the CSR format
- slow column slicing operations (consider CSC)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> csr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
As an example of how to construct a CSR matrix incrementally,
the following snippet builds a term-document matrix from texts:
>>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
>>> indptr = [0]
>>> indices = []
>>> data = []
>>> vocabulary = {}
>>> for d in docs:
... for term in d:
... index = vocabulary.setdefault(term, len(vocabulary))
... indices.append(index)
... data.append(1)
... indptr.append(len(indices))
...
>>> csr_matrix((data, indices, indptr), dtype=int).toarray()
array([[2, 1, 0, 0],
[0, 1, 1, 1]])
"""
format = 'csr'
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
M, N = self.shape
from .csc import csc_matrix
return csc_matrix((self.data, self.indices,
self.indptr), shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def tolil(self, copy=False):
from .lil import lil_matrix
lil = lil_matrix(self.shape,dtype=self.dtype)
self.sum_duplicates()
ptr,ind,dat = self.indptr,self.indices,self.data
rows, data = lil.rows, lil.data
for n in xrange(self.shape[0]):
start = ptr[n]
end = ptr[n+1]
rows[n] = ind[start:end].tolist()
data[n] = dat[start:end].tolist()
return lil
tolil.__doc__ = spmatrix.tolil.__doc__
def tocsr(self, copy=False):
if copy:
return self.copy()
else:
return self
tocsr.__doc__ = spmatrix.tocsr.__doc__
def tocsc(self, copy=False):
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, self.shape[0]))
indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csr_tocsc(self.shape[0], self.shape[1],
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csc import csc_matrix
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tobsr(self, blocksize=None, copy=True):
from .bsr import bsr_matrix
if blocksize is None:
from .spfuncs import estimate_blocksize
return self.tobsr(blocksize=estimate_blocksize(self))
elif blocksize == (1,1):
arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
return bsr_matrix(arg1, shape=self.shape, copy=copy)
else:
R,C = blocksize
M,N = self.shape
if R < 1 or C < 1 or M % R != 0 or N % C != 0:
raise ValueError('invalid blocksize %s' % blocksize)
blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(N//C, blks))
indptr = np.empty(M//R+1, dtype=idx_dtype)
indices = np.empty(blks, dtype=idx_dtype)
data = np.zeros((blks,R,C), dtype=self.dtype)
csr_tobsr(M, N, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr, indices, data.ravel())
return bsr_matrix((data,indices,indptr), shape=self.shape)
tobsr.__doc__ = spmatrix.tobsr.__doc__
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self, x):
"""swap the members of x if this is a column-oriented matrix
"""
return x
def __getitem__(self, key):
def asindices(x):
try:
x = np.asarray(x)
# Check index contents to avoid creating 64bit arrays needlessly
idx_dtype = get_index_dtype((x,), check_contents=True)
if idx_dtype != x.dtype:
x = x.astype(idx_dtype)
except:
raise IndexError('invalid index')
else:
return x
def check_bounds(indices, N):
if indices.size == 0:
return (0, 0)
max_indx = indices.max()
if max_indx >= N:
raise IndexError('index (%d) out of range' % max_indx)
min_indx = indices.min()
if min_indx < -N:
raise IndexError('index (%d) out of range' % (N + min_indx))
return min_indx, max_indx
def extractor(indices,N):
"""Return a sparse matrix P so that P*self implements
slicing of the form self[[1,2,3],:]
"""
indices = asindices(indices).copy()
min_indx, max_indx = check_bounds(indices, N)
if min_indx < 0:
indices[indices < 0] += N
indptr = np.arange(len(indices)+1, dtype=indices.dtype)
data = np.ones(len(indices), dtype=self.dtype)
shape = (len(indices),N)
return csr_matrix((data,indices,indptr), shape=shape,
dtype=self.dtype, copy=False)
row, col = self._unpack_index(key)
# First attempt to use original row optimized methods
# [1, ?]
if isintlike(row):
# [i, j]
if isintlike(col):
return self._get_single_element(row, col)
# [i, 1:2]
elif isinstance(col, slice):
return self._get_row_slice(row, col)
# [i, [1, 2]]
elif issequence(col):
P = extractor(col,self.shape[1]).T
return self[row, :] * P
elif isinstance(row, slice):
# [1:2,??]
if ((isintlike(col) and row.step in (1, None)) or
(isinstance(col, slice) and
col.step in (1, None) and
row.step in (1, None))):
# col is int or slice with step 1, row is slice with step 1.
return self._get_submatrix(row, col)
elif issequence(col):
# row is slice, col is sequence.
P = extractor(col,self.shape[1]).T # [1:2,[1,2]]
sliced = self
if row != slice(None, None, None):
sliced = sliced[row,:]
return sliced * P
elif issequence(row):
# [[1,2],??]
if isintlike(col) or isinstance(col,slice):
P = extractor(row, self.shape[0]) # [[1,2],j] or [[1,2],1:2]
extracted = P * self
if col == slice(None, None, None):
return extracted
else:
return extracted[:,col]
elif ismatrix(row) and issequence(col):
if len(row[0]) == 1 and isintlike(row[0][0]):
# [[[1],[2]], [1,2]], outer indexing
row = asindices(row)
P_row = extractor(row[:,0], self.shape[0])
P_col = extractor(col, self.shape[1]).T
return P_row * self * P_col
if not (issequence(col) and issequence(row)):
# Sample elementwise
row, col = self._index_to_arrays(row, col)
row = asindices(row)
col = asindices(col)
if row.shape != col.shape:
raise IndexError('number of row and column indices differ')
assert row.ndim <= 2
num_samples = np.size(row)
if num_samples == 0:
return csr_matrix(np.atleast_2d(row).shape, dtype=self.dtype)
check_bounds(row, self.shape[0])
check_bounds(col, self.shape[1])
val = np.empty(num_samples, dtype=self.dtype)
csr_sample_values(self.shape[0], self.shape[1],
self.indptr, self.indices, self.data,
num_samples, row.ravel(), col.ravel(), val)
if row.ndim == 1:
# row and col are 1d
return np.asmatrix(val)
return self.__class__(val.reshape(row.shape))
def __iter__(self):
indptr = np.zeros(2, dtype=self.indptr.dtype)
shape = (1, self.shape[1])
i0 = 0
for i1 in self.indptr[1:]:
indptr[1] = i1 - i0
indices = self.indices[i0:i1]
data = self.data[i0:i1]
yield csr_matrix((data, indices, indptr), shape=shape, copy=True)
i0 = i1
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += M
if i < 0 or i >= M:
raise IndexError('index (%d) out of range' % i)
idx = slice(*self.indptr[i:i+2])
data = self.data[idx].copy()
indices = self.indices[idx].copy()
indptr = np.array([0, len(indices)], dtype=self.indptr.dtype)
return csr_matrix((data, indices, indptr), shape=(1, N),
dtype=self.dtype, copy=False)
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSR matrix (column vector).
"""
return self._get_submatrix(slice(None), i)
def _get_row_slice(self, i, cslice):
"""Returns a copy of row self[i, cslice]
"""
M, N = self.shape
if i < 0:
i += M
if i < 0 or i >= M:
raise IndexError('index (%d) out of range' % i)
start, stop, stride = cslice.indices(N)
if stride == 1:
# for stride == 1, get_csr_submatrix is faster
row_indptr, row_indices, row_data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i, i + 1,
start, stop)
else:
# other strides need new code
row_indices = self.indices[self.indptr[i]:self.indptr[i + 1]]
row_data = self.data[self.indptr[i]:self.indptr[i + 1]]
if stride > 0:
ind = (row_indices >= start) & (row_indices < stop)
else:
ind = (row_indices <= start) & (row_indices > stop)
if abs(stride) > 1:
ind &= (row_indices - start) % stride == 0
row_indices = (row_indices[ind] - start) // stride
row_data = row_data[ind]
row_indptr = np.array([0, len(row_indices)])
if stride < 0:
row_data = row_data[::-1]
row_indices = abs(row_indices[::-1])
shape = (1, int(np.ceil(float(stop - start) / stride)))
return csr_matrix((row_data, row_indices, row_indptr), shape=shape,
dtype=self.dtype, copy=False)
def _get_submatrix(self, row_slice, col_slice):
"""Return a submatrix of this matrix (new matrix is created)."""
def process_slice(sl, num):
if isinstance(sl, slice):
i0, i1, stride = sl.indices(num)
if stride != 1:
raise ValueError('slicing with step != 1 not supported')
elif isintlike(sl):
if sl < 0:
sl += num
i0, i1 = sl, sl + 1
else:
raise TypeError('expected slice or scalar')
if not (0 <= i0 <= num) or not (0 <= i1 <= num) or not (i0 <= i1):
raise IndexError(
"index out of bounds: 0 <= %d <= %d, 0 <= %d <= %d,"
" %d <= %d" % (i0, num, i1, num, i0, i1))
return i0, i1
M,N = self.shape
i0, i1 = process_slice(row_slice, M)
j0, j1 = process_slice(col_slice, N)
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
shape = (i1 - i0, j1 - j0)
return self.__class__((data, indices, indptr), shape=shape,
dtype=self.dtype, copy=False)
def isspmatrix_csr(x):
"""Is x of csr_matrix type?
Parameters
----------
x
object to check for being a csr matrix
Returns
-------
bool
True if x is a csr matrix, False otherwise
Examples
--------
>>> from scipy.sparse import csr_matrix, isspmatrix_csr
>>> isspmatrix_csr(csr_matrix([[5]]))
True
>>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
>>> isspmatrix_csr(csc_matrix([[5]]))
False
"""
return isinstance(x, csr_matrix)
| 16,486 | 32.646939 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/spfuncs.py
|
""" Functions that operate on sparse matrices
"""
from __future__ import division, print_function, absolute_import
__all__ = ['count_blocks','estimate_blocksize']
from .csr import isspmatrix_csr, csr_matrix
from .csc import isspmatrix_csc
from ._sparsetools import csr_count_blocks
def extract_diagonal(A):
raise NotImplementedError('use .diagonal() instead')
#def extract_diagonal(A):
# """extract_diagonal(A) returns the main diagonal of A."""
# #TODO extract k-th diagonal
# if isspmatrix_csr(A) or isspmatrix_csc(A):
# fn = getattr(sparsetools, A.format + "_diagonal")
# y = empty( min(A.shape), dtype=upcast(A.dtype) )
# fn(A.shape[0],A.shape[1],A.indptr,A.indices,A.data,y)
# return y
# elif isspmatrix_bsr(A):
# M,N = A.shape
# R,C = A.blocksize
# y = empty( min(M,N), dtype=upcast(A.dtype) )
# fn = sparsetools.bsr_diagonal(M//R, N//C, R, C, \
# A.indptr, A.indices, ravel(A.data), y)
# return y
# else:
# return extract_diagonal(csr_matrix(A))
def estimate_blocksize(A,efficiency=0.7):
"""Attempt to determine the blocksize of a sparse matrix
Returns a blocksize=(r,c) such that
- A.nnz / A.tobsr( (r,c) ).nnz > efficiency
"""
if not (isspmatrix_csr(A) or isspmatrix_csc(A)):
A = csr_matrix(A)
if A.nnz == 0:
return (1,1)
if not 0 < efficiency < 1.0:
raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0')
high_efficiency = (1.0 + efficiency) / 2.0
nnz = float(A.nnz)
M,N = A.shape
if M % 2 == 0 and N % 2 == 0:
e22 = nnz / (4 * count_blocks(A,(2,2)))
else:
e22 = 0.0
if M % 3 == 0 and N % 3 == 0:
e33 = nnz / (9 * count_blocks(A,(3,3)))
else:
e33 = 0.0
if e22 > high_efficiency and e33 > high_efficiency:
e66 = nnz / (36 * count_blocks(A,(6,6)))
if e66 > efficiency:
return (6,6)
else:
return (3,3)
else:
if M % 4 == 0 and N % 4 == 0:
e44 = nnz / (16 * count_blocks(A,(4,4)))
else:
e44 = 0.0
if e44 > efficiency:
return (4,4)
elif e33 > efficiency:
return (3,3)
elif e22 > efficiency:
return (2,2)
else:
return (1,1)
def count_blocks(A,blocksize):
"""For a given blocksize=(r,c) count the number of occupied
blocks in a sparse matrix A
"""
r,c = blocksize
if r < 1 or c < 1:
raise ValueError('r and c must be positive')
if isspmatrix_csr(A):
M,N = A.shape
return csr_count_blocks(M,N,r,c,A.indptr,A.indices)
elif isspmatrix_csc(A):
return count_blocks(A.T,(c,r))
else:
return count_blocks(csr_matrix(A),blocksize)
| 2,823 | 26.960396 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/generate_sparsetools.py
|
"""
python generate_sparsetools.py
Generate manual wrappers for C++ sparsetools code.
Type codes used:
'i': integer scalar
'I': integer array
'T': data array
'B': boolean array
'V': std::vector<integer>*
'W': std::vector<data>*
'*': indicates that the next argument is an output argument
'v': void
'l': 64-bit integer scalar
See sparsetools.cxx for more details.
"""
import optparse
import os
from distutils.dep_util import newer
#
# List of all routines and their argument types.
#
# The first code indicates the return value, the rest the arguments.
#
# bsr.h
BSR_ROUTINES = """
bsr_diagonal v iiiiiIIT*T
bsr_scale_rows v iiiiII*TT
bsr_scale_columns v iiiiII*TT
bsr_sort_indices v iiii*I*I*T
bsr_transpose v iiiiIIT*I*I*T
bsr_matmat_pass2 v iiiiiIITIIT*I*I*T
bsr_matvec v iiiiIITT*T
bsr_matvecs v iiiiiIITT*T
bsr_elmul_bsr v iiiiIITIIT*I*I*T
bsr_eldiv_bsr v iiiiIITIIT*I*I*T
bsr_plus_bsr v iiiiIITIIT*I*I*T
bsr_minus_bsr v iiiiIITIIT*I*I*T
bsr_maximum_bsr v iiiiIITIIT*I*I*T
bsr_minimum_bsr v iiiiIITIIT*I*I*T
bsr_ne_bsr v iiiiIITIIT*I*I*B
bsr_lt_bsr v iiiiIITIIT*I*I*B
bsr_gt_bsr v iiiiIITIIT*I*I*B
bsr_le_bsr v iiiiIITIIT*I*I*B
bsr_ge_bsr v iiiiIITIIT*I*I*B
"""
# csc.h
CSC_ROUTINES = """
csc_diagonal v iiiIIT*T
csc_tocsr v iiIIT*I*I*T
csc_matmat_pass1 v iiIIII*I
csc_matmat_pass2 v iiIITIIT*I*I*T
csc_matvec v iiIITT*T
csc_matvecs v iiiIITT*T
csc_elmul_csc v iiIITIIT*I*I*T
csc_eldiv_csc v iiIITIIT*I*I*T
csc_plus_csc v iiIITIIT*I*I*T
csc_minus_csc v iiIITIIT*I*I*T
csc_maximum_csc v iiIITIIT*I*I*T
csc_minimum_csc v iiIITIIT*I*I*T
csc_ne_csc v iiIITIIT*I*I*B
csc_lt_csc v iiIITIIT*I*I*B
csc_gt_csc v iiIITIIT*I*I*B
csc_le_csc v iiIITIIT*I*I*B
csc_ge_csc v iiIITIIT*I*I*B
"""
# csr.h
CSR_ROUTINES = """
csr_matmat_pass1 v iiIIII*I
csr_matmat_pass2 v iiIITIIT*I*I*T
csr_diagonal v iiiIIT*T
csr_tocsc v iiIIT*I*I*T
csr_tobsr v iiiiIIT*I*I*T
csr_todense v iiIIT*T
csr_matvec v iiIITT*T
csr_matvecs v iiiIITT*T
csr_elmul_csr v iiIITIIT*I*I*T
csr_eldiv_csr v iiIITIIT*I*I*T
csr_plus_csr v iiIITIIT*I*I*T
csr_minus_csr v iiIITIIT*I*I*T
csr_maximum_csr v iiIITIIT*I*I*T
csr_minimum_csr v iiIITIIT*I*I*T
csr_ne_csr v iiIITIIT*I*I*B
csr_lt_csr v iiIITIIT*I*I*B
csr_gt_csr v iiIITIIT*I*I*B
csr_le_csr v iiIITIIT*I*I*B
csr_ge_csr v iiIITIIT*I*I*B
csr_scale_rows v iiII*TT
csr_scale_columns v iiII*TT
csr_sort_indices v iI*I*T
csr_eliminate_zeros v ii*I*I*T
csr_sum_duplicates v ii*I*I*T
get_csr_submatrix v iiIITiiii*V*V*W
csr_sample_values v iiIITiII*T
csr_count_blocks i iiiiII
csr_sample_offsets i iiIIiII*I
expandptr v iI*I
test_throw_error i
csr_has_sorted_indices i iII
csr_has_canonical_format i iII
"""
# coo.h, dia.h, csgraph.h
OTHER_ROUTINES = """
coo_tocsr v iiiIIT*I*I*T
coo_todense v iilIIT*Ti
coo_matvec v lIITT*T
dia_matvec v iiiiITT*T
cs_graph_components i iII*I
"""
# List of compilation units
COMPILATION_UNITS = [
('bsr', BSR_ROUTINES),
('csr', CSR_ROUTINES),
('csc', CSC_ROUTINES),
('other', OTHER_ROUTINES),
]
#
# List of the supported index typenums and the corresponding C++ types
#
I_TYPES = [
('NPY_INT32', 'npy_int32'),
('NPY_INT64', 'npy_int64'),
]
#
# List of the supported data typenums and the corresponding C++ types
#
T_TYPES = [
('NPY_BOOL', 'npy_bool_wrapper'),
('NPY_BYTE', 'npy_byte'),
('NPY_UBYTE', 'npy_ubyte'),
('NPY_SHORT', 'npy_short'),
('NPY_USHORT', 'npy_ushort'),
('NPY_INT', 'npy_int'),
('NPY_UINT', 'npy_uint'),
('NPY_LONG', 'npy_long'),
('NPY_ULONG', 'npy_ulong'),
('NPY_LONGLONG', 'npy_longlong'),
('NPY_ULONGLONG', 'npy_ulonglong'),
('NPY_FLOAT', 'npy_float'),
('NPY_DOUBLE', 'npy_double'),
('NPY_LONGDOUBLE', 'npy_longdouble'),
('NPY_CFLOAT', 'npy_cfloat_wrapper'),
('NPY_CDOUBLE', 'npy_cdouble_wrapper'),
('NPY_CLONGDOUBLE', 'npy_clongdouble_wrapper'),
]
#
# Code templates
#
THUNK_TEMPLATE = """
static PY_LONG_LONG %(name)s_thunk(int I_typenum, int T_typenum, void **a)
{
%(thunk_content)s
}
"""
METHOD_TEMPLATE = """
NPY_VISIBILITY_HIDDEN PyObject *
%(name)s_method(PyObject *self, PyObject *args)
{
return call_thunk('%(ret_spec)s', "%(arg_spec)s", %(name)s_thunk, args);
}
"""
GET_THUNK_CASE_TEMPLATE = """
static int get_thunk_case(int I_typenum, int T_typenum)
{
%(content)s;
return -1;
}
"""
#
# Code generation
#
def get_thunk_type_set():
"""
Get a list containing cartesian product of data types, plus a getter routine.
Returns
-------
i_types : list [(j, I_typenum, None, I_type, None), ...]
Pairing of index type numbers and the corresponding C++ types,
and an unique index `j`. This is for routines that are parameterized
only by I but not by T.
it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...]
Same as `i_types`, but for routines parameterized both by T and I.
getter_code : str
C++ code for a function that takes I_typenum, T_typenum and returns
the unique index corresponding to the lists, or -1 if no match was
found.
"""
it_types = []
i_types = []
j = 0
getter_code = " if (0) {}"
for I_typenum, I_type in I_TYPES:
piece = """
else if (I_typenum == %(I_typenum)s) {
if (T_typenum == -1) { return %(j)s; }"""
getter_code += piece % dict(I_typenum=I_typenum, j=j)
i_types.append((j, I_typenum, None, I_type, None))
j += 1
for T_typenum, T_type in T_TYPES:
piece = """
else if (T_typenum == %(T_typenum)s) { return %(j)s; }"""
getter_code += piece % dict(T_typenum=T_typenum, j=j)
it_types.append((j, I_typenum, T_typenum, I_type, T_type))
j += 1
getter_code += """
}"""
return i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code)
def parse_routine(name, args, types):
"""
Generate thunk and method code for a given routine.
Parameters
----------
name : str
Name of the C++ routine
args : str
Argument list specification (in format explained above)
types : list
List of types to instantiate, as returned `get_thunk_type_set`
"""
ret_spec = args[0]
arg_spec = args[1:]
def get_arglist(I_type, T_type):
"""
Generate argument list for calling the C++ function
"""
args = []
next_is_writeable = False
j = 0
for t in arg_spec:
const = '' if next_is_writeable else 'const '
next_is_writeable = False
if t == '*':
next_is_writeable = True
continue
elif t == 'i':
args.append("*(%s*)a[%d]" % (const + I_type, j))
elif t == 'I':
args.append("(%s*)a[%d]" % (const + I_type, j))
elif t == 'T':
args.append("(%s*)a[%d]" % (const + T_type, j))
elif t == 'B':
args.append("(npy_bool_wrapper*)a[%d]" % (j,))
elif t == 'V':
if const:
raise ValueError("'V' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (I_type, j,))
elif t == 'W':
if const:
raise ValueError("'W' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (T_type, j,))
elif t == 'l':
args.append("*(%snpy_int64*)a[%d]" % (const, j))
else:
raise ValueError("Invalid spec character %r" % (t,))
j += 1
return ", ".join(args)
# Generate thunk code: a giant switch statement with different
# type combinations inside.
thunk_content = """int j = get_thunk_case(I_typenum, T_typenum);
switch (j) {"""
for j, I_typenum, T_typenum, I_type, T_type in types:
arglist = get_arglist(I_type, T_type)
if T_type is None:
dispatch = "%s" % (I_type,)
else:
dispatch = "%s,%s" % (I_type, T_type)
if 'B' in arg_spec:
dispatch += ",npy_bool_wrapper"
piece = """
case %(j)s:"""
if ret_spec == 'v':
piece += """
(void)%(name)s<%(dispatch)s>(%(arglist)s);
return 0;"""
else:
piece += """
return %(name)s<%(dispatch)s>(%(arglist)s);"""
thunk_content += piece % dict(j=j, I_type=I_type, T_type=T_type,
I_typenum=I_typenum, T_typenum=T_typenum,
arglist=arglist, name=name,
dispatch=dispatch)
thunk_content += """
default:
throw std::runtime_error("internal error: invalid argument typenums");
}"""
thunk_code = THUNK_TEMPLATE % dict(name=name,
thunk_content=thunk_content)
# Generate method code
method_code = METHOD_TEMPLATE % dict(name=name,
ret_spec=ret_spec,
arg_spec=arg_spec)
return thunk_code, method_code
def main():
p = optparse.OptionParser(usage=__doc__.strip())
p.add_option("--no-force", action="store_false",
dest="force", default=True)
options, args = p.parse_args()
names = []
i_types, it_types, getter_code = get_thunk_type_set()
# Generate *_impl.h for each compilation unit
for unit_name, routines in COMPILATION_UNITS:
thunks = []
methods = []
# Generate thunks and methods for all routines
for line in routines.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
name, args = line.split(None, 1)
except ValueError:
raise ValueError("Malformed line: %r" % (line,))
args = "".join(args.split())
if 't' in args or 'T' in args:
thunk, method = parse_routine(name, args, it_types)
else:
thunk, method = parse_routine(name, args, i_types)
if name in names:
raise ValueError("Duplicate routine %r" % (name,))
names.append(name)
thunks.append(thunk)
methods.append(method)
# Produce output
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
unit_name + '_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(getter_code)
for thunk in thunks:
f.write(thunk)
for method in methods:
f.write(method)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
# Generate code for method struct
method_defs = ""
for name in names:
method_defs += "NPY_VISIBILITY_HIDDEN PyObject *%s_method(PyObject *, PyObject *);\n" % (name,)
method_struct = """\nstatic struct PyMethodDef sparsetools_methods[] = {"""
for name in names:
method_struct += """
{"%(name)s", (PyCFunction)%(name)s_method, METH_VARARGS, NULL},""" % dict(name=name)
method_struct += """
{NULL, NULL, 0, NULL}
};"""
# Produce sparsetools_impl.h
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
'sparsetools_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(method_defs)
f.write(method_struct)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
def write_autogen_blurb(stream):
stream.write("""\
/* This file is autogenerated by generate_sparsetools.py
* Do not edit manually or check into VCS.
*/
""")
if __name__ == "__main__":
main()
| 12,679 | 28.55711 | 103 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/compressed.py
|
"""Base class for sparse matrix formats using compressed storage."""
from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib.six import zip as izip
from scipy._lib._util import _prune_array
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, IndexMixin, get_index_dtype,
downcast_intp_index, get_sum_dtype, check_shape)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
"""base matrix class for compressed row and column oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self._shape = check_shape(arg1)
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M,N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M,N))[0] + 1, dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
self._shape = check_shape(self._swap((major_dim,minor_dim)))
if dtype is not None:
self.data = np.asarray(self.data, dtype=dtype)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self._shape = check_shape(other.shape)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
# use _swap to determine proper bounds
major_name,minor_name = self._swap(('row','column'))
major_dim,minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.data.ndim != 1 or self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("%s index values must be < %d" %
(minor_name,minor_dim))
if self.indices.min() < 0:
raise ValueError("%s index values must be >= 0" %
minor_name)
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning)
#TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other,'_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is inefficient",
SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other,'_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self,other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
#################################
# Arithmetic operator overrides #
#################################
def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError('Incompatible shapes.')
dtype = upcast_char(self.dtype.char, other.dtype.char)
order = self._swap('CF')[0]
result = np.array(other, dtype=dtype, order=order, copy=True)
M, N = self._swap(self.shape)
y = result if result.flags.c_contiguous else result.T
_sparsetools.csr_todense(M, N, self.indptr, self.indices, self.data, y)
return np.matrix(result, copy=False)
def _add_sparse(self, other):
return self._binopt(other, '_plus_')
def _sub_sparse(self, other):
return self._binopt(other, '_minus_')
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1,1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1,1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == 1 and other.shape[0] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == 1 and other.shape[1] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Assume other is a dense matrix/array, which produces a single-item
# object array if other isn't convertible to ndarray.
other = np.atleast_2d(other)
if other.ndim != 2:
return np.multiply(self.toarray(), other)
# Single element / wrapped object.
if other.size == 1:
return self._mul_scalar(other.flat[0])
# Fast case for trivial sparse matrix.
elif self.shape == (1, 1):
return np.multiply(self.toarray()[0,0], other)
from .coo import coo_matrix
ret = self.tocoo()
# Matching shapes.
if self.shape == other.shape:
data = np.multiply(ret.data, other[ret.row, ret.col])
# Sparse row vector times...
elif self.shape[0] == 1:
if other.shape[1] == 1: # Dense column vector.
data = np.multiply(ret.data, other)
elif other.shape[1] == self.shape[1]: # Dense matrix.
data = np.multiply(ret.data, other[:, ret.col])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(np.arange(other.shape[0]), len(ret.row))
col = np.tile(ret.col, other.shape[0])
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(other.shape[0], self.shape[1]),
copy=False)
# Sparse column vector times...
elif self.shape[1] == 1:
if other.shape[0] == 1: # Dense row vector.
data = np.multiply(ret.data[:, None], other)
elif other.shape[0] == self.shape[0]: # Dense matrix.
data = np.multiply(ret.data[:, None], other[ret.row])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(ret.row, other.shape[1])
col = np.tile(np.arange(other.shape[1]), len(ret.col))
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(self.shape[0], other.shape[1]),
copy=False)
# Sparse matrix times dense row vector.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
data = np.multiply(ret.data, other[:, ret.col].ravel())
# Sparse matrix times dense column vector.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
data = np.multiply(ret.data, other[ret.row].ravel())
else:
raise ValueError("inconsistent shapes")
ret.data = data.view(np.ndarray).ravel()
return ret
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M,N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools,self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools,self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data, other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M,N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=M*N)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
fn = getattr(_sparsetools, self.format + '_matmat_pass1')
fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
indptr)
nnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.asarray(indptr, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat_pass2')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data,indices,indptr),shape=(M,N))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
self.data, y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results to "
"a dense matrix.",
SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum, '_maximum_', lambda x: np.asarray(x) > 0)
maximum.__doc__ = spmatrix.maximum.__doc__
def minimum(self, other):
return self._maximum_minimum(other, np.minimum, '_minimum_', lambda x: np.asarray(x) < 0)
minimum.__doc__ = spmatrix.minimum.__doc__
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = np.asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc, data=None):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Can be applied to a function of self.data by supplying data parameter.
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
if data is None:
data = self.data
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def __setitem__(self, index, x):
# Process arrays from IndexMixin
i, j = self._unpack_index(index)
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
if not ((broadcast_row or x.shape[0] == i.shape[0]) and
(broadcast_col or x.shape[1] == i.shape[1])):
raise ValueError("shape mismatch in assignment")
# clear entries that will be overwritten
ci, cj = self._swap((i.ravel(), j.ravel()))
self._zero_many(ci, cj)
x = x.tocoo(copy=True)
x.sum_duplicates()
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(i.shape[0]), len(r))
c = np.tile(c, i.shape[0])
x = np.tile(x, i.shape[0])
if broadcast_col:
r = np.repeat(r, i.shape[1])
c = np.tile(np.arange(i.shape[1]), len(c))
x = np.repeat(x, i.shape[1])
# only assign entries in the new sparsity structure
i = i[r, c]
j = j[r, c]
else:
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
if np.size(x) == 0:
return
i, j = self._swap((i.ravel(), j.ravel()))
self._set_many(i, j, x.ravel())
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
check_bounds(i, M)
check_bounds(j, N)
i = np.asarray(i, dtype=self.indices.dtype)
j = np.asarray(j, dtype=self.indices.dtype)
return i, j, M, N
def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively, and must not contain
duplicate entries.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(x)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a %s_matrix is expensive. "
"lil_matrix is more efficient." % self.format,
SparseEfficiencyWarning)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(izip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.asarray(np.ediff1d(self.indptr, to_begin=0), dtype=idx_dtype)
nnzs[1:][ui] += new_nnzs
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
def _get_single_element(self, row, col):
M, N = self.shape
if (row < 0):
row += M
if (col < 0):
col += N
if not (0 <= row < M) or not (0 <= col < N):
raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d" %
(row, M, col, N))
major_index, minor_index = self._swap((row, col))
start = self.indptr[major_index]
end = self.indptr[major_index + 1]
if self.has_sorted_indices:
# Copies may be made, if dtypes of indices are not identical
minor_index = self.indices.dtype.type(minor_index)
minor_indices = self.indices[start:end]
insert_pos_left = np.searchsorted(
minor_indices, minor_index, side='left')
insert_pos_right = insert_pos_left + np.searchsorted(
minor_indices[insert_pos_left:], minor_index, side='right')
return self.data[start + insert_pos_left:
start + insert_pos_right].sum(dtype=self.dtype)
else:
return np.compress(minor_index == self.indices[start:end],
self.data[start:end]).sum(dtype=self.dtype)
def _get_submatrix(self, slice0, slice1):
"""Return a submatrix of this matrix (new matrix is created)."""
slice0, slice1 = self._swap((slice0,slice1))
shape0, shape1 = self._swap(self.shape)
def _process_slice(sl, num):
if isinstance(sl, slice):
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif np.isscalar(sl):
if sl < 0:
sl += num
return sl, sl + 1
else:
return sl[0], sl[1]
def _in_bounds(i0, i1, num):
if not (0 <= i0 < num) or not (0 < i1 <= num) or not (i0 < i1):
raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %
(i0, num, i1, num, i0, i1))
i0, i1 = _process_slice(slice0, shape0)
j0, j1 = _process_slice(slice1, shape1)
_in_bounds(i0, i1, shape0)
_in_bounds(j0, j1, shape1)
aux = _sparsetools.get_csr_submatrix(shape0, shape1,
self.indptr, self.indices,
self.data,
i0, i1, j0, j1)
data, indices, indptr = aux[2], aux[1], aux[0]
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape)
######################
# Conversion methods #
######################
def tocoo(self, copy=True):
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
from .coo import coo_matrix
return coo_matrix((self.data, (row, col)), self.shape, copy=copy,
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
if out is None and order is None:
order = self._swap('cf')[0]
out = self._process_toarray_args(order, out)
if not (out.flags.c_contiguous or out.flags.f_contiguous):
raise ValueError('Output array must be C or F contiguous')
# align ideal order with output array order
if out.flags.c_contiguous:
x = self.tocsr()
y = out
else:
x = self.tocsc()
y = out.T
M, N = x._swap(x.shape)
_sparsetools.csr_todense(M, N, x.indptr, x.indices, x.data, y)
return out
toarray.__doc__ = spmatrix.toarray.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
def __get_has_canonical_format(self):
"""Determine whether the matrix has sorted indices and no duplicates
Returns
- True: if the above applies
- False: otherwise
has_canonical_format implies has_sorted_indices, so if the latter flag
is False, so will the former be; if the former is found True, the
latter flag is also set.
"""
# first check to see if result was cached
if not getattr(self, '_has_sorted_indices', True):
# not sorted => not canonical
self._has_canonical_format = False
elif not hasattr(self, '_has_canonical_format'):
self.has_canonical_format = _sparsetools.csr_has_canonical_format(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_canonical_format
def __set_has_canonical_format(self, val):
self._has_canonical_format = bool(val)
if val:
self.has_sorted_indices = True
has_canonical_format = property(fget=__get_has_canonical_format,
fset=__set_has_canonical_format)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
M, N = self._swap(self.shape)
_sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
self.has_canonical_format = True
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self,'_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices
def __set_sorted(self, val):
self._has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
# return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
_sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.indices = _prune_array(self.indices[:self.nnz])
self.data = _prune_array(self.data[:self.nnz])
def resize(self, *shape):
shape = check_shape(shape)
if hasattr(self, 'blocksize'):
bm, bn = self.blocksize
new_M, rm = divmod(shape[0], bm)
new_N, rn = divmod(shape[1], bn)
if rm or rn:
raise ValueError("shape must be divisible into %s blocks. "
"Got %s" % (self.blocksize, shape))
M, N = self.shape[0] // bm, self.shape[1] // bn
else:
new_M, new_N = self._swap(shape)
M, N = self._swap(self.shape)
if new_M < M:
self.indices = self.indices[:self.indptr[new_M]]
self.data = self.data[:self.indptr[new_M]]
self.indptr = self.indptr[:new_M + 1]
elif new_M > M:
self.indptr = np.resize(self.indptr, new_M + 1)
self.indptr[M + 1:].fill(self.indptr[M])
if new_N < N:
mask = self.indices < new_N
if not np.all(mask):
self.indices = self.indices[mask]
self.data = self.data[mask]
major_index, val = self._minor_reduce(np.add, mask)
self.indptr.fill(0)
self.indptr[1:][major_index] = val
np.cumsum(self.indptr, out=self.indptr)
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually.
# Everything outside of other's sparsity is NaN, and everything
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = np.matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
| 46,311 | 37.917647 | 102 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/setup.py
|
from __future__ import division, print_function, absolute_import
import os
import sys
import subprocess
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('sparse',parent_package,top_path)
config.add_data_dir('tests')
config.add_subpackage('linalg')
config.add_subpackage('csgraph')
config.add_extension('_csparsetools',
sources=['_csparsetools.c'])
def get_sparsetools_sources(ext, build_dir):
# Defer generation of source files
subprocess.check_call([sys.executable,
os.path.join(os.path.dirname(__file__),
'generate_sparsetools.py'),
'--no-force'])
return []
depends = ['sparsetools_impl.h',
'bsr_impl.h',
'csc_impl.h',
'csr_impl.h',
'other_impl.h',
'bool_ops.h',
'bsr.h',
'complex_ops.h',
'coo.h',
'csc.h',
'csgraph.h',
'csr.h',
'dense.h',
'dia.h',
'py3k.h',
'sparsetools.h',
'util.h']
depends = [os.path.join('sparsetools', hdr) for hdr in depends],
config.add_extension('_sparsetools',
define_macros=[('__STDC_FORMAT_MACROS', 1)],
depends=depends,
include_dirs=['sparsetools'],
sources=[os.path.join('sparsetools', 'sparsetools.cxx'),
os.path.join('sparsetools', 'csr.cxx'),
os.path.join('sparsetools', 'csc.cxx'),
os.path.join('sparsetools', 'bsr.cxx'),
os.path.join('sparsetools', 'other.cxx'),
get_sparsetools_sources]
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 2,185 | 32.630769 | 81 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/bsr.py
|
"""Compressed Block Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['bsr_matrix', 'isspmatrix_bsr']
from warnings import warn
import numpy as np
from .data import _data_matrix, _minmax_mixin
from .compressed import _cs_matrix
from .base import isspmatrix, _formats, spmatrix
from .sputils import (isshape, getdtype, to_native, upcast, get_index_dtype,
check_shape)
from . import _sparsetools
from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_pass1,
bsr_matmat_pass2, bsr_transpose, bsr_sort_indices)
class bsr_matrix(_cs_matrix, _minmax_mixin):
"""Block Sparse Row matrix
This can be instantiated in several ways:
bsr_matrix(D, [blocksize=(R,C)])
where D is a dense matrix or 2-D ndarray.
bsr_matrix(S, [blocksize=(R,C)])
with another sparse matrix S (equivalent to S.tobsr())
bsr_matrix((M, N), [blocksize=(R,C), dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
bsr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard BSR representation where the block column
indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding block values are stored in
``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
supplied, the matrix dimensions are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
BSR format index array
indptr
BSR format index pointer array
blocksize
Block size of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
**Summary of BSR format**
The Block Compressed Row (BSR) format is very similar to the Compressed
Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
sub matrices like the last example below. Block matrices often arise in
vector-valued finite element discretizations. In such cases, BSR is
considerably more efficient than CSR and CSC for many sparse arithmetic
operations.
**Blocksize**
The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
That is, R and C must satisfy the relationship ``M % R = 0`` and
``N % C = 0``.
If no blocksize is specified, a simple heuristic is applied to determine
an appropriate blocksize.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> bsr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3 ,4, 5, 6])
>>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
>>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
array([[1, 1, 0, 0, 2, 2],
[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 3, 3],
[0, 0, 0, 0, 3, 3],
[4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6]])
"""
format = 'bsr'
def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if isspmatrix_bsr(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.tobsr(blocksize=blocksize)
self._set_self(arg1)
elif isinstance(arg1,tuple):
if isshape(arg1):
# it's a tuple of matrix dimensions (M,N)
self._shape = check_shape(arg1)
M,N = self.shape
# process blocksize
if blocksize is None:
blocksize = (1,1)
else:
if not isshape(blocksize):
raise ValueError('invalid blocksize=%s' % blocksize)
blocksize = tuple(blocksize)
self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
R,C = blocksize
if (M % R) != 0 or (N % C) != 0:
raise ValueError('shape must be multiple of blocksize')
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))
self.indices = np.zeros(0, dtype=idx_dtype)
self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
elif len(arg1) == 2:
# (data,(row,col)) format
from .coo import coo_matrix
self._set_self(coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize))
elif len(arg1) == 3:
# (data,indices,indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = 1
if shape is not None:
maxval = max(shape)
if blocksize is not None:
maxval = max(maxval, max(blocksize))
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data))
else:
raise ValueError('unrecognized bsr_matrix constructor usage')
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)
self._set_self(arg1)
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
M = len(self.indptr) - 1
N = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
R,C = self.blocksize
self._shape = check_shape((M*R,N*C))
if self.shape is None:
if shape is None:
# TODO infer shape here
raise ValueError('need to infer shape')
else:
self._shape = check_shape(shape)
if dtype is not None:
self.data = self.data.astype(dtype)
self.check_format(full_check=False)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
*Parameters*:
full_check:
True - rigorous check, O(N) operations : default
False - basic check, O(1) operations
"""
M,N = self.shape
R,C = self.blocksize
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indices, self.indptr))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError("indices, and indptr should be 1-D")
if self.data.ndim != 3:
raise ValueError("data should be 3-D")
# check index pointer
if (len(self.indptr) != M//R + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), M//R + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= N//C:
raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
if self.indices.min() < 0:
raise ValueError("column index values must be >= 0")
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices(check_first=False)
def _get_blocksize(self):
return self.data.shape[1:]
blocksize = property(fget=_get_blocksize)
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for BSR format")
R,C = self.blocksize
return int(self.indptr[-1] * R * C)
getnnz.__doc__ = spmatrix.getnnz.__doc__
def __repr__(self):
format = _formats[self.getformat()][1]
return ("<%dx%d sparse matrix of type '%s'\n"
"\twith %d stored elements (blocksize = %dx%d) in %s format>" %
(self.shape + (self.dtype.type, self.nnz) + self.blocksize +
(format,)))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
R, C = self.blocksize
y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
_sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,
self.indptr, self.indices,
np.ravel(self.data), y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
##########################
# NotImplemented methods #
##########################
def __getitem__(self,key):
raise NotImplementedError
def __setitem__(self,key,val):
raise NotImplementedError
######################
# Arithmetic methods #
######################
@np.deprecate(message="BSR matvec is deprecated in scipy 0.19.0. "
"Use * operator instead.")
def matvec(self, other):
"""Multiply matrix by vector."""
return self * other
@np.deprecate(message="BSR matmat is deprecated in scipy 0.19.0. "
"Use * operator instead.")
def matmat(self, other):
"""Multiply this sparse matrix by other matrix."""
return self * other
def _add_dense(self, other):
return self.tocoo(copy=False)._add_dense(other)
def _mul_vector(self, other):
M,N = self.shape
R,C = self.blocksize
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
bsr_matvec(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
other, result)
return result
def _mul_multivector(self,other):
R,C = self.blocksize
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
bsr_matvecs(M//R, N//C, n_vecs, R, C,
self.indptr, self.indices, self.data.ravel(),
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
R,n = self.blocksize
# convert to this format
if isspmatrix_bsr(other):
C = other.blocksize[1]
else:
C = 1
from .csr import isspmatrix_csr
if isspmatrix_csr(other) and n == 1:
other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
else:
other = other.tobsr(blocksize=(n,C))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=(M//R)*(N//C))
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
csr_matmat_pass1(M//R, N//C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
indptr)
bnnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=bnnz)
indptr = indptr.astype(idx_dtype)
indices = np.empty(bnnz, dtype=idx_dtype)
data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
bsr_matmat_pass2(M//R, N//C, R, C, n,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
data = data.reshape(-1,R,C)
# TODO eliminate zeros
return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))
######################
# Conversion methods #
######################
def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix into Block Sparse Row Format.
With copy=False, the data/indices may be shared between this
matrix and the resultant bsr_matrix.
If blocksize=(R, C) is provided, it will be used for determining
block size of the bsr_matrix.
"""
if blocksize not in [None, self.blocksize]:
return self.tocsr().tobsr(blocksize=blocksize)
if copy:
return self.copy()
else:
return self
def tocsr(self, copy=False):
return self.tocoo(copy=False).tocsr(copy=copy)
# TODO make this more efficient
tocsr.__doc__ = spmatrix.tocsr.__doc__
def tocsc(self, copy=False):
return self.tocoo(copy=False).tocsc(copy=copy)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=True):
"""Convert this matrix to COOrdinate format.
When copy=False the data array will be shared between
this matrix and the resultant coo_matrix.
"""
M,N = self.shape
R,C = self.blocksize
indptr_diff = np.diff(self.indptr)
if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
# Check for potential overflow
indptr_diff_limited = indptr_diff.astype(np.intp)
if np.any(indptr_diff_limited != indptr_diff):
raise ValueError("Matrix too big to convert")
indptr_diff = indptr_diff_limited
row = (R * np.arange(M//R)).repeat(indptr_diff)
row = row.repeat(R*C).reshape(-1,R,C)
row += np.tile(np.arange(R).reshape(-1,1), (1,C))
row = row.reshape(-1)
col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
col += np.tile(np.arange(C), (R,1))
col = col.reshape(-1)
data = self.data.reshape(-1)
if copy:
data = data.copy()
from .coo import coo_matrix
return coo_matrix((data,(row,col)), shape=self.shape)
def toarray(self, order=None, out=None):
return self.tocoo(copy=False).toarray(order=order, out=out)
toarray.__doc__ = spmatrix.toarray.__doc__
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
R, C = self.blocksize
M, N = self.shape
NBLK = self.nnz//(R*C)
if self.nnz == 0:
return bsr_matrix((N, M), blocksize=(C, R),
dtype=self.dtype, copy=copy)
indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
indices = np.empty(NBLK, dtype=self.indices.dtype)
data = np.empty((NBLK, C, R), dtype=self.data.dtype)
bsr_transpose(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
indptr, indices, data.ravel())
return bsr_matrix((data, indices, indptr),
shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero elements in-place."""
R,C = self.blocksize
M,N = self.shape
mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
nonzero_blocks = mask.nonzero()[0]
if len(nonzero_blocks) == 0:
return # nothing to do
self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
# modifies self.indptr and self.indices *in place*
_sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
self.indices, mask)
self.prune()
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
R, C = self.blocksize
M, N = self.shape
# port of _sparsetools.csr_sum_duplicates
n_row = M // R
nnz = 0
row_end = 0
for i in range(n_row):
jj = row_end
row_end = self.indptr[i+1]
while jj < row_end:
j = self.indices[jj]
x = self.data[jj]
jj += 1
while jj < row_end and self.indices[jj] == j:
x += self.data[jj]
jj += 1
self.indices[nnz] = j
self.data[nnz] = x
nnz += 1
self.indptr[i+1] = nnz
self.prune() # nnz may have changed
self.has_canonical_format = True
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if self.has_sorted_indices:
return
R,C = self.blocksize
M,N = self.shape
bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
self.has_sorted_indices = True
def prune(self):
""" Remove empty space after all non-zero elements.
"""
R,C = self.blocksize
M,N = self.shape
if len(self.indptr) != M//R + 1:
raise ValueError("index pointer has invalid length")
bnnz = self.indptr[-1]
if len(self.indices) < bnnz:
raise ValueError("indices array has too few elements")
if len(self.data) < bnnz:
raise ValueError("data array has too few elements")
self.data = self.data[:bnnz]
self.indices = self.indices[:bnnz]
# utility functions
def _binopt(self, other, op, in_shape=None, out_shape=None):
"""Apply the binary operation fn to two sparse matrices."""
# Ideally we'd take the GCDs of the blocksize dimensions
# and explode self and other to match.
other = self.__class__(other, blocksize=self.blocksize)
# e.g. bsr_plus_bsr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
R,C = self.blocksize
max_bnnz = len(self.data) + len(other.data)
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=max_bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(max_bnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(R*C*max_bnnz, dtype=np.bool_)
else:
data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
fn(self.shape[0]//R, self.shape[1]//C, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
actual_bnnz = indptr[-1]
indices = indices[:actual_bnnz]
data = data[:R*C*actual_bnnz]
if actual_bnnz < max_bnnz/2:
indices = indices.copy()
data = data.copy()
data = data.reshape(-1,R,C)
return self.__class__((data, indices, indptr), shape=self.shape)
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
# # these functions are used by the parent class
# # to remove redudancy between bsc_matrix and bsr_matrix
# def _swap(self,x):
# """swap the members of x if this is a column-oriented matrix
# """
# return (x[0],x[1])
def isspmatrix_bsr(x):
"""Is x of a bsr_matrix type?
Parameters
----------
x
object to check for being a bsr matrix
Returns
-------
bool
True if x is a bsr matrix, False otherwise
Examples
--------
>>> from scipy.sparse import bsr_matrix, isspmatrix_bsr
>>> isspmatrix_bsr(bsr_matrix([[5]]))
True
>>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr
>>> isspmatrix_bsr(csr_matrix([[5]]))
False
"""
return isinstance(x, bsr_matrix)
| 24,364 | 33.658606 | 114 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/dok.py
|
"""Dictionary Of Keys based matrix"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['dok_matrix', 'isspmatrix_dok']
import functools
import operator
import itertools
import numpy as np
from scipy._lib.six import zip as izip, xrange, iteritems, iterkeys, itervalues
from .base import spmatrix, isspmatrix
from .sputils import (isdense, getdtype, isshape, isintlike, isscalarlike,
upcast, upcast_scalar, IndexMixin, get_index_dtype,
check_shape)
try:
from operator import isSequenceType as _is_sequence
except ImportError:
def _is_sequence(x):
return (hasattr(x, '__len__') or hasattr(x, '__next__')
or hasattr(x, 'next'))
class dok_matrix(spmatrix, IndexMixin, dict):
"""
Dictionary Of Keys based sparse matrix.
This is an efficient structure for constructing sparse
matrices incrementally.
This can be instantiated in several ways:
dok_matrix(D)
with a dense matrix, D
dok_matrix(S)
with a sparse matrix, S
dok_matrix((M,N), [dtype])
create the matrix with initial shape (M,N)
dtype is optional, defaulting to dtype='d'
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Allows for efficient O(1) access of individual elements.
Duplicates are not allowed.
Can be efficiently converted to a coo_matrix once constructed.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dok_matrix
>>> S = dok_matrix((5, 5), dtype=np.float32)
>>> for i in range(5):
... for j in range(5):
... S[i, j] = i + j # Update element
"""
format = 'dok'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
dict.__init__(self)
spmatrix.__init__(self)
self.dtype = getdtype(dtype, default=float)
if isinstance(arg1, tuple) and isshape(arg1): # (M,N)
M, N = arg1
self._shape = check_shape((M, N))
elif isspmatrix(arg1): # Sparse ctor
if isspmatrix_dok(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.todok()
if dtype is not None:
arg1 = arg1.astype(dtype)
dict.update(self, arg1)
self._shape = check_shape(arg1.shape)
self.dtype = arg1.dtype
else: # Dense ctor
try:
arg1 = np.asarray(arg1)
except:
raise TypeError('Invalid input format.')
if len(arg1.shape) != 2:
raise TypeError('Expected rank <=2 dense array or matrix.')
from .coo import coo_matrix
d = coo_matrix(arg1, dtype=dtype).todok()
dict.update(self, d)
self._shape = check_shape(arg1.shape)
self.dtype = d.dtype
def update(self, val):
# Prevent direct usage of update
raise NotImplementedError("Direct modification to dok_matrix element "
"is not allowed.")
def _update(self, data):
"""An update method for dict data defined for direct access to
`dok_matrix` data. Main purpose is to be used for effcient conversion
from other spmatrix classes. Has no checking if `data` is valid."""
return dict.update(self, data)
def set_shape(self, shape):
new_matrix = self.reshape(shape, copy=False).asformat(self.format)
self.__dict__ = new_matrix.__dict__
dict.clear(self)
dict.update(self, new_matrix)
shape = property(fget=spmatrix.get_shape, fset=set_shape)
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for DOK format.")
return dict.__len__(self)
def count_nonzero(self):
return sum(x != 0 for x in itervalues(self))
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def __len__(self):
return dict.__len__(self)
def get(self, key, default=0.):
"""This overrides the dict.get method, providing type checking
but otherwise equivalent functionality.
"""
try:
i, j = key
assert isintlike(i) and isintlike(j)
except (AssertionError, TypeError, ValueError):
raise IndexError('Index must be a pair of integers.')
if (i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]):
raise IndexError('Index out of bounds.')
return dict.get(self, key, default)
def __getitem__(self, index):
"""If key=(i, j) is a pair of integers, return the corresponding
element. If either i or j is a slice or sequence, return a new sparse
matrix with just these elements.
"""
zero = self.dtype.type(0)
i, j = self._unpack_index(index)
i_intlike = isintlike(i)
j_intlike = isintlike(j)
if i_intlike and j_intlike:
i = int(i)
j = int(j)
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('Index out of bounds.')
if j < 0:
j += self.shape[1]
if j < 0 or j >= self.shape[1]:
raise IndexError('Index out of bounds.')
return dict.get(self, (i,j), zero)
elif ((i_intlike or isinstance(i, slice)) and
(j_intlike or isinstance(j, slice))):
# Fast path for slicing very sparse matrices
i_slice = slice(i, i+1) if i_intlike else i
j_slice = slice(j, j+1) if j_intlike else j
i_indices = i_slice.indices(self.shape[0])
j_indices = j_slice.indices(self.shape[1])
i_seq = xrange(*i_indices)
j_seq = xrange(*j_indices)
newshape = (len(i_seq), len(j_seq))
newsize = _prod(newshape)
if len(self) < 2*newsize and newsize != 0:
# Switch to the fast path only when advantageous
# (count the iterations in the loops, adjust for complexity)
#
# We also don't handle newsize == 0 here (if
# i/j_intlike, it can mean index i or j was out of
# bounds)
return self._getitem_ranges(i_indices, j_indices, newshape)
i, j = self._index_to_arrays(i, j)
if i.size == 0:
return dok_matrix(i.shape, dtype=self.dtype)
min_i = i.min()
if min_i < -self.shape[0] or i.max() >= self.shape[0]:
raise IndexError('Index (%d) out of range -%d to %d.' %
(i.min(), self.shape[0], self.shape[0]-1))
if min_i < 0:
i = i.copy()
i[i < 0] += self.shape[0]
min_j = j.min()
if min_j < -self.shape[1] or j.max() >= self.shape[1]:
raise IndexError('Index (%d) out of range -%d to %d.' %
(j.min(), self.shape[1], self.shape[1]-1))
if min_j < 0:
j = j.copy()
j[j < 0] += self.shape[1]
newdok = dok_matrix(i.shape, dtype=self.dtype)
for key in itertools.product(xrange(i.shape[0]), xrange(i.shape[1])):
v = dict.get(self, (i[key], j[key]), zero)
if v:
dict.__setitem__(newdok, key, v)
return newdok
def _getitem_ranges(self, i_indices, j_indices, shape):
# performance golf: we don't want Numpy scalars here, they are slow
i_start, i_stop, i_stride = map(int, i_indices)
j_start, j_stop, j_stride = map(int, j_indices)
newdok = dok_matrix(shape, dtype=self.dtype)
for (ii, jj) in iterkeys(self):
# ditto for numpy scalars
ii = int(ii)
jj = int(jj)
a, ra = divmod(ii - i_start, i_stride)
if a < 0 or a >= shape[0] or ra != 0:
continue
b, rb = divmod(jj - j_start, j_stride)
if b < 0 or b >= shape[1] or rb != 0:
continue
dict.__setitem__(newdok, (a, b),
dict.__getitem__(self, (ii, jj)))
return newdok
def __setitem__(self, index, x):
if isinstance(index, tuple) and len(index) == 2:
# Integer index fast path
i, j = index
if (isintlike(i) and isintlike(j) and 0 <= i < self.shape[0]
and 0 <= j < self.shape[1]):
v = np.asarray(x, dtype=self.dtype)
if v.ndim == 0 and v != 0:
dict.__setitem__(self, (int(i), int(j)), v[()])
return
i, j = self._unpack_index(index)
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
x = x.toarray()
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("Shape mismatch in assignment.")
if np.size(x) == 0:
return
min_i = i.min()
if min_i < -self.shape[0] or i.max() >= self.shape[0]:
raise IndexError('Index (%d) out of range -%d to %d.' %
(i.min(), self.shape[0], self.shape[0]-1))
if min_i < 0:
i = i.copy()
i[i < 0] += self.shape[0]
min_j = j.min()
if min_j < -self.shape[1] or j.max() >= self.shape[1]:
raise IndexError('Index (%d) out of range -%d to %d.' %
(j.min(), self.shape[1], self.shape[1]-1))
if min_j < 0:
j = j.copy()
j[j < 0] += self.shape[1]
dict.update(self, izip(izip(i.flat, j.flat), x.flat))
if 0 in x:
zeroes = x == 0
for key in izip(i[zeroes].flat, j[zeroes].flat):
if dict.__getitem__(self, key) == 0:
# may have been superseded by later update
del self[key]
def __add__(self, other):
if isscalarlike(other):
res_dtype = upcast_scalar(self.dtype, other)
new = dok_matrix(self.shape, dtype=res_dtype)
# Add this scalar to every element.
M, N = self.shape
for key in itertools.product(xrange(M), xrange(N)):
aij = dict.get(self, (key), 0) + other
if aij:
new[key] = aij
# new.dtype.char = self.dtype.char
elif isspmatrix_dok(other):
if other.shape != self.shape:
raise ValueError("Matrix dimensions are not equal.")
# We could alternatively set the dimensions to the largest of
# the two matrices to be summed. Would this be a good idea?
res_dtype = upcast(self.dtype, other.dtype)
new = dok_matrix(self.shape, dtype=res_dtype)
dict.update(new, self)
with np.errstate(over='ignore'):
dict.update(new,
((k, new[k] + other[k]) for k in iterkeys(other)))
elif isspmatrix(other):
csc = self.tocsc()
new = csc + other
elif isdense(other):
new = self.todense() + other
else:
return NotImplemented
return new
def __radd__(self, other):
if isscalarlike(other):
new = dok_matrix(self.shape, dtype=self.dtype)
M, N = self.shape
for key in itertools.product(xrange(M), xrange(N)):
aij = dict.get(self, (key), 0) + other
if aij:
new[key] = aij
elif isspmatrix_dok(other):
if other.shape != self.shape:
raise ValueError("Matrix dimensions are not equal.")
new = dok_matrix(self.shape, dtype=self.dtype)
dict.update(new, self)
dict.update(new,
((k, self[k] + other[k]) for k in iterkeys(other)))
elif isspmatrix(other):
csc = self.tocsc()
new = csc + other
elif isdense(other):
new = other + self.todense()
else:
return NotImplemented
return new
def __neg__(self):
if self.dtype.kind == 'b':
raise NotImplementedError('Negating a sparse boolean matrix is not'
' supported.')
new = dok_matrix(self.shape, dtype=self.dtype)
dict.update(new, ((k, -self[k]) for k in iterkeys(self)))
return new
def _mul_scalar(self, other):
res_dtype = upcast_scalar(self.dtype, other)
# Multiply this scalar by every element.
new = dok_matrix(self.shape, dtype=res_dtype)
dict.update(new, ((k, v * other) for k, v in iteritems(self)))
return new
def _mul_vector(self, other):
# matrix * vector
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
for (i, j), v in iteritems(self):
result[i] += v * other[j]
return result
def _mul_multivector(self, other):
# matrix * multivector
result_shape = (self.shape[0], other.shape[1])
result_dtype = upcast(self.dtype, other.dtype)
result = np.zeros(result_shape, dtype=result_dtype)
for (i, j), v in iteritems(self):
result[i,:] += v * other[j,:]
return result
def __imul__(self, other):
if isscalarlike(other):
dict.update(self, ((k, v * other) for k, v in iteritems(self)))
return self
return NotImplemented
def __truediv__(self, other):
if isscalarlike(other):
res_dtype = upcast_scalar(self.dtype, other)
new = dok_matrix(self.shape, dtype=res_dtype)
dict.update(new, ((k, v / other) for k, v in iteritems(self)))
return new
return self.tocsr() / other
def __itruediv__(self, other):
if isscalarlike(other):
dict.update(self, ((k, v / other) for k, v in iteritems(self)))
return self
return NotImplemented
def __reduce__(self):
# this approach is necessary because __setstate__ is called after
# __setitem__ upon unpickling and since __init__ is not called there
# is no shape attribute hence it is not possible to unpickle it.
return dict.__reduce__(self)
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? For now it returns the number
# of non-zeros.
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation.")
M, N = self.shape
new = dok_matrix((N, M), dtype=self.dtype, copy=copy)
dict.update(new, (((right, left), val)
for (left, right), val in iteritems(self)))
return new
transpose.__doc__ = spmatrix.transpose.__doc__
def conjtransp(self):
"""Return the conjugate transpose."""
M, N = self.shape
new = dok_matrix((N, M), dtype=self.dtype)
dict.update(new, (((right, left), np.conj(val))
for (left, right), val in iteritems(self)))
return new
def copy(self):
new = dok_matrix(self.shape, dtype=self.dtype)
dict.update(new, self)
return new
copy.__doc__ = spmatrix.copy.__doc__
def getrow(self, i):
"""Returns the i-th row as a (1 x n) DOK matrix."""
new = dok_matrix((1, self.shape[1]), dtype=self.dtype)
dict.update(new, (((0, j), self[i, j]) for j in xrange(self.shape[1])))
return new
def getcol(self, j):
"""Returns the j-th column as a (m x 1) DOK matrix."""
new = dok_matrix((self.shape[0], 1), dtype=self.dtype)
dict.update(new, (((i, 0), self[i, j]) for i in xrange(self.shape[0])))
return new
def tocoo(self, copy=False):
from .coo import coo_matrix
if self.nnz == 0:
return coo_matrix(self.shape, dtype=self.dtype)
idx_dtype = get_index_dtype(maxval=max(self.shape))
data = np.fromiter(itervalues(self), dtype=self.dtype, count=self.nnz)
row = np.fromiter((i for i, _ in iterkeys(self)), dtype=idx_dtype, count=self.nnz)
col = np.fromiter((j for _, j in iterkeys(self)), dtype=idx_dtype, count=self.nnz)
A = coo_matrix((data, (row, col)), shape=self.shape, dtype=self.dtype)
A.has_canonical_format = True
return A
tocoo.__doc__ = spmatrix.tocoo.__doc__
def todok(self, copy=False):
if copy:
return self.copy()
return self
todok.__doc__ = spmatrix.todok.__doc__
def tocsc(self, copy=False):
return self.tocoo(copy=False).tocsc(copy=copy)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def resize(self, *shape):
shape = check_shape(shape)
newM, newN = shape
M, N = self.shape
if newM < M or newN < N:
# Remove all elements outside new dimensions
for (i, j) in list(iterkeys(self)):
if i >= newM or j >= newN:
del self[i, j]
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
def isspmatrix_dok(x):
"""Is x of dok_matrix type?
Parameters
----------
x
object to check for being a dok matrix
Returns
-------
bool
True if x is a dok matrix, False otherwise
Examples
--------
>>> from scipy.sparse import dok_matrix, isspmatrix_dok
>>> isspmatrix_dok(dok_matrix([[5]]))
True
>>> from scipy.sparse import dok_matrix, csr_matrix, isspmatrix_dok
>>> isspmatrix_dok(csr_matrix([[5]]))
False
"""
return isinstance(x, dok_matrix)
def _prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
| 18,763 | 33.812616 | 90 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csc.py
|
"""Compressed Sparse Column matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
import numpy as np
from .base import spmatrix
from ._sparsetools import csc_tocsr
from . import _sparsetools
from .sputils import upcast, isintlike, IndexMixin, get_index_dtype
from .compressed import _cs_matrix
class csc_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
CSC format index array
indptr
CSC format index pointer array
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> csc_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 2, 2, 0, 1, 2])
>>> col = np.array([0, 0, 1, 2, 2, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
format = 'csc'
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
M, N = self.shape
from .csr import csr_matrix
return csr_matrix((self.data, self.indices,
self.indptr), (N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def __iter__(self):
for r in self.tocsr():
yield r
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocsr(self, copy=False):
M,N = self.shape
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
A = csr_matrix((data, indices, indptr), shape=self.shape, copy=False)
A.has_sorted_indices = True
return A
tocsr.__doc__ = spmatrix.tocsr.__doc__
def __getitem__(self, key):
# Use CSR to implement fancy indexing.
row, col = self._unpack_index(key)
# Things that return submatrices. row or col is a int or slice.
if (isinstance(row, slice) or isinstance(col, slice) or
isintlike(row) or isintlike(col)):
return self.T[col, row].T
# Things that return a sequence of values.
else:
return self.T[col, row]
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Remove explicit zeros
nz_mask = self.data != 0
row = row[nz_mask]
col = col[nz_mask]
# Sort them to be in C-style order
ind = np.argsort(row, kind='mergesort')
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
# we convert to CSR to maintain compatibility with old impl.
# in spmatrix.getrow()
return self._get_submatrix(i, slice(None)).tocsr()
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += N
if i < 0 or i >= N:
raise IndexError('index (%d) out of range' % i)
idx = slice(*self.indptr[i:i+2])
data = self.data[idx].copy()
indices = self.indices[idx].copy()
indptr = np.array([0, len(indices)], dtype=self.indptr.dtype)
return csc_matrix((data, indices, indptr), shape=(M, 1),
dtype=self.dtype, copy=False)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self, x):
"""swap the members of x if this is a column-oriented matrix
"""
return x[1], x[0]
def isspmatrix_csc(x):
"""Is x of csc_matrix type?
Parameters
----------
x
object to check for being a csc matrix
Returns
-------
bool
True if x is a csc matrix, False otherwise
Examples
--------
>>> from scipy.sparse import csc_matrix, isspmatrix_csc
>>> isspmatrix_csc(csc_matrix([[5]]))
True
>>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
>>> isspmatrix_csc(csr_matrix([[5]]))
False
"""
return isinstance(x, csc_matrix)
| 7,786 | 29.65748 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/extract.py
|
"""Functions to extract parts of sparse matrices
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['find', 'tril', 'triu']
from .coo import coo_matrix
def find(A):
"""Return the indices and values of the nonzero elements of a matrix
Parameters
----------
A : dense or sparse matrix
Matrix whose nonzero elements are desired.
Returns
-------
(I,J,V) : tuple of arrays
I,J, and V contain the row indices, column indices, and values
of the nonzero matrix entries.
Examples
--------
>>> from scipy.sparse import csr_matrix, find
>>> A = csr_matrix([[7.0, 8.0, 0],[0, 0, 9.0]])
>>> find(A)
(array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7., 8., 9.]))
"""
A = coo_matrix(A, copy=True)
A.sum_duplicates()
# remove explicit zeros
nz_mask = A.data != 0
return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask]
def tril(A, k=0, format=None):
"""Return the lower triangular portion of a matrix in sparse format
Returns the elements on or below the k-th diagonal of the matrix A.
- k = 0 corresponds to the main diagonal
- k > 0 is above the main diagonal
- k < 0 is below the main diagonal
Parameters
----------
A : dense or sparse matrix
Matrix whose lower trianglar portion is desired.
k : integer : optional
The top-most diagonal of the lower triangle.
format : string
Sparse format of the result, e.g. format="csr", etc.
Returns
-------
L : sparse matrix
Lower triangular portion of A in sparse format.
See Also
--------
triu : upper triangle in sparse format
Examples
--------
>>> from scipy.sparse import csr_matrix, tril
>>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
... dtype='int32')
>>> A.toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]])
>>> tril(A).toarray()
array([[1, 0, 0, 0, 0],
[4, 5, 0, 0, 0],
[0, 0, 8, 0, 0]])
>>> tril(A).nnz
4
>>> tril(A, k=1).toarray()
array([[1, 2, 0, 0, 0],
[4, 5, 0, 0, 0],
[0, 0, 8, 9, 0]])
>>> tril(A, k=-1).toarray()
array([[0, 0, 0, 0, 0],
[4, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> tril(A, format='csc')
<3x5 sparse matrix of type '<class 'numpy.int32'>'
with 4 stored elements in Compressed Sparse Column format>
"""
# convert to COOrdinate format where things are easy
A = coo_matrix(A, copy=False)
mask = A.row + k >= A.col
return _masked_coo(A, mask).asformat(format)
def triu(A, k=0, format=None):
"""Return the upper triangular portion of a matrix in sparse format
Returns the elements on or above the k-th diagonal of the matrix A.
- k = 0 corresponds to the main diagonal
- k > 0 is above the main diagonal
- k < 0 is below the main diagonal
Parameters
----------
A : dense or sparse matrix
Matrix whose upper trianglar portion is desired.
k : integer : optional
The bottom-most diagonal of the upper triangle.
format : string
Sparse format of the result, e.g. format="csr", etc.
Returns
-------
L : sparse matrix
Upper triangular portion of A in sparse format.
See Also
--------
tril : lower triangle in sparse format
Examples
--------
>>> from scipy.sparse import csr_matrix, triu
>>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
... dtype='int32')
>>> A.toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]])
>>> triu(A).toarray()
array([[1, 2, 0, 0, 3],
[0, 5, 0, 6, 7],
[0, 0, 8, 9, 0]])
>>> triu(A).nnz
8
>>> triu(A, k=1).toarray()
array([[0, 2, 0, 0, 3],
[0, 0, 0, 6, 7],
[0, 0, 0, 9, 0]])
>>> triu(A, k=-1).toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]])
>>> triu(A, format='csc')
<3x5 sparse matrix of type '<class 'numpy.int32'>'
with 8 stored elements in Compressed Sparse Column format>
"""
# convert to COOrdinate format where things are easy
A = coo_matrix(A, copy=False)
mask = A.row + k <= A.col
return _masked_coo(A, mask).asformat(format)
def _masked_coo(A, mask):
row = A.row[mask]
col = A.col[mask]
data = A.data[mask]
return coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype)
| 4,713 | 26.406977 | 90 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/data.py
|
"""Base class for sparse matrice with a .data attribute
subclasses must provide a _with_data() method that
creates a new matrix with the same sparsity pattern
as self but with a different data array
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .base import spmatrix, _ufuncs_with_fixed_point_at_zero
from .sputils import isscalarlike, validateaxis
__all__ = []
# TODO implement all relevant operations
# use .data.__methods__() instead of /=, *=, etc.
class _data_matrix(spmatrix):
def __init__(self):
spmatrix.__init__(self)
def _get_dtype(self):
return self.data.dtype
def _set_dtype(self, newtype):
self.data.dtype = newtype
dtype = property(fget=_get_dtype, fset=_set_dtype)
def _deduped_data(self):
if hasattr(self, 'sum_duplicates'):
self.sum_duplicates()
return self.data
def __abs__(self):
return self._with_data(abs(self._deduped_data()))
def _real(self):
return self._with_data(self.data.real)
def _imag(self):
return self._with_data(self.data.imag)
def __neg__(self):
if self.dtype.kind == 'b':
raise NotImplementedError('negating a sparse boolean '
'matrix is not supported')
return self._with_data(-self.data)
def __imul__(self, other): # self *= other
if isscalarlike(other):
self.data *= other
return self
else:
return NotImplemented
def __itruediv__(self, other): # self /= other
if isscalarlike(other):
recip = 1.0 / other
self.data *= recip
return self
else:
return NotImplemented
def astype(self, dtype, casting='unsafe', copy=True):
dtype = np.dtype(dtype)
if self.dtype != dtype:
return self._with_data(
self._deduped_data().astype(dtype, casting=casting, copy=copy),
copy=copy)
elif copy:
return self.copy()
else:
return self
astype.__doc__ = spmatrix.astype.__doc__
def conj(self, copy=True):
if np.issubdtype(self.dtype, np.complexfloating):
return self._with_data(self.data.conj(), copy=copy)
elif copy:
return self.copy()
else:
return self
conj.__doc__ = spmatrix.conj.__doc__
def copy(self):
return self._with_data(self.data.copy(), copy=True)
copy.__doc__ = spmatrix.copy.__doc__
def count_nonzero(self):
return np.count_nonzero(self._deduped_data())
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def power(self, n, dtype=None):
"""
This function performs element-wise power.
Parameters
----------
n : n is a scalar
dtype : If dtype is not specified, the current dtype will be preserved.
"""
if not isscalarlike(n):
raise NotImplementedError("input is not scalar")
data = self._deduped_data()
if dtype is not None:
data = data.astype(dtype)
return self._with_data(data ** n)
###########################
# Multiplication handlers #
###########################
def _mul_scalar(self, other):
return self._with_data(self.data * other)
# Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix.
for npfunc in _ufuncs_with_fixed_point_at_zero:
name = npfunc.__name__
def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = ("Element-wise %s.\n\n"
"See numpy.%s for more information." % (name, name))
method.__name__ = name
return method
setattr(_data_matrix, name, _create_method(npfunc))
def _find_missing_index(ind, n):
for k, a in enumerate(ind):
if k != a:
return k
k += 1
if k < n:
return k
else:
return -1
class _minmax_mixin(object):
"""Mixin for min and max methods.
These are not implemented for dia_matrix, hence the separate class.
"""
def _min_or_max_axis(self, axis, min_or_max):
N = self.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = self.shape[1 - axis]
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
major_index, value = mat._minor_reduce(min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from . import coo_matrix
if axis == 0:
return coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=self.dtype, shape=(1, M))
else:
return coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=self.dtype, shape=(M, 1))
def _min_or_max(self, axis, out, min_or_max):
if out is not None:
raise ValueError(("Sparse matrices do not support "
"an 'out' parameter."))
validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("zero-size array to reduction operation")
zero = self.dtype.type(0)
if self.nnz == 0:
return zero
m = min_or_max.reduce(self._deduped_data().ravel())
if self.nnz != np.product(self.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return self._min_or_max_axis(axis, min_or_max)
else:
raise ValueError("axis out of range")
def _arg_min_or_max_axis(self, axis, op, compare):
if self.shape[axis] == 0:
raise ValueError("Can't apply the operation along a zero-sized "
"dimension.")
if axis < 0:
axis += 2
zero = self.dtype.type(0)
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
ret_size, line_size = mat._swap(mat.shape)
ret = np.zeros(ret_size, dtype=int)
nz_lines, = np.nonzero(np.diff(mat.indptr))
for i in nz_lines:
p, q = mat.indptr[i:i + 2]
data = mat.data[p:q]
indices = mat.indices[p:q]
am = op(data)
m = data[am]
if compare(m, zero) or q - p == line_size:
ret[i] = indices[am]
else:
zero_ind = _find_missing_index(indices, line_size)
if m == zero:
ret[i] = min(am, zero_ind)
else:
ret[i] = zero_ind
if axis == 1:
ret = ret.reshape(-1, 1)
return np.asmatrix(ret)
def _arg_min_or_max(self, axis, out, op, compare):
if out is not None:
raise ValueError("Sparse matrices do not support "
"an 'out' parameter.")
validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("Can't apply the operation to "
"an empty matrix.")
if self.nnz == 0:
return 0
else:
zero = self.dtype.type(0)
mat = self.tocoo()
mat.sum_duplicates()
am = op(mat.data)
m = mat.data[am]
if compare(m, zero):
return mat.row[am] * mat.shape[1] + mat.col[am]
else:
size = np.product(mat.shape)
if size == mat.nnz:
return am
else:
ind = mat.row * mat.shape[1] + mat.col
zero_ind = _find_missing_index(ind, size)
if m == zero:
return min(zero_ind, am)
else:
return zero_ind
return self._arg_min_or_max_axis(axis, op, compare)
def max(self, axis=None, out=None):
"""
Return the maximum of the matrix or maximum along an axis.
This takes all elements into account, not just the non-zero ones.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the maximum over all the matrix elements, returning
a scalar (i.e. `axis` = `None`).
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value, as this argument is not used.
Returns
-------
amax : coo_matrix or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is a sparse.coo_matrix of dimension
``a.ndim - 1``.
See Also
--------
min : The minimum value of a sparse matrix along a given axis.
np.matrix.max : NumPy's implementation of 'max' for matrices
"""
return self._min_or_max(axis, out, np.maximum)
def min(self, axis=None, out=None):
"""
Return the minimum of the matrix or maximum along an axis.
This takes all elements into account, not just the non-zero ones.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the minimum over all the matrix elements, returning
a scalar (i.e. `axis` = `None`).
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
amin : coo_matrix or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is a sparse.coo_matrix of dimension
``a.ndim - 1``.
See Also
--------
max : The maximum value of a sparse matrix along a given axis.
np.matrix.min : NumPy's implementation of 'min' for matrices
"""
return self._min_or_max(axis, out, np.minimum)
def argmax(self, axis=None, out=None):
"""Return indices of maximum elements along an axis.
Implicit zero elements are also taken into account. If there are
several maximum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmax is computed. If None (default), index
of the maximum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : np.matrix or int
Indices of maximum elements. If matrix, its size along `axis` is 1.
"""
return self._arg_min_or_max(axis, out, np.argmax, np.greater)
def argmin(self, axis=None, out=None):
"""Return indices of minimum elements along an axis.
Implicit zero elements are also taken into account. If there are
several minimum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmin is computed. If None (default), index
of the minimum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : np.matrix or int
Indices of minimum elements. If matrix, its size along `axis` is 1.
"""
return self._arg_min_or_max(axis, out, np.argmin, np.less)
| 12,708 | 31.012594 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/construct.py
|
"""Functions to construct sparse matrices
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag']
import numpy as np
from scipy._lib.six import xrange
from .sputils import upcast, get_index_dtype, isscalarlike
from .csr import csr_matrix
from .csc import csc_matrix
from .bsr import bsr_matrix
from .coo import coo_matrix
from .dia import dia_matrix
from .base import issparse
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : str, optional
Format of the result. By default (format=None) an appropriate sparse
matrix format is returned. This choice is subject to change.
See Also
--------
diags : more convenient form of this function
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> from scipy.sparse import spdiags
>>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
>>> diags = np.array([0, -1, 2])
>>> spdiags(data, diags, 4, 4).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
"""
Construct a sparse matrix from diagonals.
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int or an int, optional
Diagonals to set:
- k = 0 the main diagonal (default)
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
.. versionadded:: 0.11
Examples
--------
>>> from scipy.sparse import diags
>>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
>>> diags(diagonals, [0, -1, 2]).toarray()
array([[1, 0, 1, 0],
[1, 2, 0, 2],
[0, 2, 3, 0],
[0, 0, 3, 4]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
array([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).toarray()
array([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
if isscalarlike(offsets):
# now check that there's actually only one diagonal
if len(diagonals) == 0 or isscalarlike(diagonals[0]):
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
# Construct data array
m, n = shape
M = max([min(m + offset, n - offset) + max(0, offset)
for offset in offsets])
M = max(0, M)
data_arr = np.zeros((len(offsets), M), dtype=dtype)
K = min(m, n)
for j, diagonal in enumerate(diagonals):
offset = offsets[j]
k = max(0, offset)
length = min(m + offset, n - offset, K)
if length < 0:
raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
try:
data_arr[j, k:k+length] = diagonal[...,:length]
except ValueError:
if len(diagonal) != length and len(diagonal) != 1:
raise ValueError(
"Diagonal length (index %d: %d at offset %d) does not "
"agree with matrix size (%d, %d)." % (
j, len(diagonal), offset, m, n))
raise
return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : int
Shape of the identity matrix.
dtype : dtype, optional
Data type of the matrix
format : str, optional
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy.sparse import identity
>>> identity(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<class 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
return eye(n, n, dtype=dtype, format=format)
def eye(m, n=None, k=0, dtype=float, format=None):
"""Sparse matrix with ones on diagonal
Returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
Parameters
----------
m : int
Number of rows in the matrix.
n : int, optional
Number of columns. Default: `m`.
k : int, optional
Diagonal to place ones on. Default: 0 (main diagonal).
dtype : dtype, optional
Data type of the matrix.
format : str, optional
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy import sparse
>>> sparse.eye(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> sparse.eye(3, dtype=np.int8)
<3x3 sparse matrix of type '<class 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if n is None:
n = m
m,n = int(m),int(n)
if m == n and k == 0:
# fast branch for special formats
if format in ['csr', 'csc']:
idx_dtype = get_index_dtype(maxval=n)
indptr = np.arange(n+1, dtype=idx_dtype)
indices = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
idx_dtype = get_index_dtype(maxval=n)
row = np.arange(n, dtype=idx_dtype)
col = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : str, optional
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> from scipy import sparse
>>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
>>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
>>> sparse.kron(A, B).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : str
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) # since L + R is not always same format
def _compressed_sparse_stack(blocks, axis):
"""
Stacking fast path for CSR/CSC matrices
(i) vstack for CSR, (ii) hstack for CSC.
"""
other_axis = 1 if axis == 0 else 0
data = np.concatenate([b.data for b in blocks])
constant_dim = blocks[0].shape[other_axis]
idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks],
maxval=max(data.size, constant_dim))
indices = np.empty(data.size, dtype=idx_dtype)
indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype)
last_indptr = idx_dtype(0)
sum_dim = 0
sum_indices = 0
for b in blocks:
if b.shape[other_axis] != constant_dim:
raise ValueError('incompatible dimensions for axis %d' % other_axis)
indices[sum_indices:sum_indices+b.indices.size] = b.indices
sum_indices += b.indices.size
idxs = slice(sum_dim, sum_dim + b.shape[axis])
indptr[idxs] = b.indptr[:-1]
indptr[idxs] += last_indptr
sum_dim += b.shape[axis]
last_indptr += b.indptr[-1]
indptr[-1] = last_indptr
if axis == 0:
return csr_matrix((data, indices, indptr),
shape=(sum_dim, constant_dim))
else:
return csc_matrix((data, indices, indptr),
shape=(constant_dim, sum_dim))
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, hstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> hstack([A,B]).toarray()
array([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str, optional
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5, 6]])
>>> vstack([A, B]).toarray()
array([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([[b] for b in blocks], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks : array_like
Grid of sparse matrices with compatible shapes.
An entry of None implies an all-zero matrix.
format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
The sparse format of the result (e.g. "csr"). By default an
appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
bmat : sparse matrix
See Also
--------
block_diag, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> bmat([[A, B], [None, C]]).toarray()
array([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat([[A, None], [None, C]]).toarray()
array([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if blocks.ndim != 2:
raise ValueError('blocks must be 2-D')
M,N = blocks.shape
# check for fast path cases
if (N == 1 and format in (None, 'csr') and all(isinstance(b, csr_matrix)
for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[:,0], 0)
if dtype is not None:
A = A.astype(dtype)
return A
elif (M == 1 and format in (None, 'csc')
and all(isinstance(b, csc_matrix) for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[0,:], 1)
if dtype is not None:
A = A.astype(dtype)
return A
block_mask = np.zeros(blocks.shape, dtype=bool)
brow_lengths = np.zeros(M, dtype=np.int64)
bcol_lengths = np.zeros(N, dtype=np.int64)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
elif brow_lengths[i] != A.shape[0]:
msg = ('blocks[{i},:] has incompatible row dimensions. '
'Got blocks[{i},{j}].shape[0] == {got}, '
'expected {exp}.'.format(i=i, j=j,
exp=brow_lengths[i],
got=A.shape[0]))
raise ValueError(msg)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
elif bcol_lengths[j] != A.shape[1]:
msg = ('blocks[:,{j}] has incompatible row dimensions. '
'Got blocks[{i},{j}].shape[1] == {got}, '
'expected {exp}.'.format(i=i, j=j,
exp=bcol_lengths[j],
got=A.shape[1]))
raise ValueError(msg)
nnz = sum(block.nnz for block in blocks[block_mask])
if dtype is None:
all_dtypes = [blk.dtype for blk in blocks[block_mask]]
dtype = upcast(*all_dtypes) if all_dtypes else None
row_offsets = np.append(0, np.cumsum(brow_lengths))
col_offsets = np.append(0, np.cumsum(bcol_lengths))
shape = (row_offsets[-1], col_offsets[-1])
data = np.empty(nnz, dtype=dtype)
idx_dtype = get_index_dtype(maxval=max(shape))
row = np.empty(nnz, dtype=idx_dtype)
col = np.empty(nnz, dtype=idx_dtype)
nnz = 0
ii, jj = np.nonzero(block_mask)
for i, j in zip(ii, jj):
B = blocks[i, j]
idx = slice(nnz, nnz + B.nnz)
data[idx] = B.data
row[idx] = B.row + row_offsets[i]
col[idx] = B.col + col_offsets[j]
nnz += B.nnz
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def block_diag(mats, format=None, dtype=None):
"""
Build a block diagonal sparse matrix from provided matrices.
Parameters
----------
mats : sequence of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
res : sparse matrix
Notes
-----
.. versionadded:: 0.11.0
See Also
--------
bmat, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, block_diag
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> block_diag((A, B, C)).toarray()
array([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None]*nmat
if issparse(a):
row[ia] = a
else:
row[ia] = coo_matrix(a)
rows.append(row)
return bmat(rows, format=format, dtype=dtype)
def random(m, n, density=0.01, format='coo', dtype=None,
random_state=None, data_rvs=None):
"""Generate a sparse matrix of the given shape and density with randomly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : callable, optional
Samples a requested number of random values.
This function should take a single argument specifying the length
of the ndarray that it will return. The structurally nonzero entries
of the sparse random matrix will be taken from the array sampled
by this function. By default, uniform [0, 1) random values will be
sampled using the same random state as is used for sampling
the sparsity structure.
Returns
-------
res : sparse matrix
Examples
--------
>>> from scipy.sparse import random
>>> from scipy import stats
>>> class CustomRandomState(object):
... def randint(self, k):
... i = np.random.randint(k)
... return i - i % 2
>>> rs = CustomRandomState()
>>> rvs = stats.poisson(25, loc=10).rvs
>>> S = random(3, 4, density=0.25, random_state=rs, data_rvs=rvs)
>>> S.A
array([[ 36., 0., 33., 0.], # random
[ 0., 0., 0., 0.],
[ 0., 0., 36., 0.]])
>>> from scipy.sparse import random
>>> from scipy.stats import rv_continuous
>>> class CustomDistribution(rv_continuous):
... def _rvs(self, *args, **kwargs):
... return self._random_state.randn(*self._size)
>>> X = CustomDistribution(seed=2906)
>>> Y = X() # get a frozen version of the distribution
>>> S = random(3, 4, density=0.25, random_state=2906, data_rvs=Y.rvs)
>>> S.A
array([[ 0. , 1.9467163 , 0.13569738, -0.81205367],
[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ]])
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
dtype = np.dtype(dtype)
if dtype.char not in 'fdg':
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
tp = np.intc
if mn > np.iinfo(tp).max:
tp = np.int64
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
# Use the algorithm from python's random.sample for k < mn/3.
if mn < 3*k:
ind = random_state.choice(mn, size=k, replace=False)
else:
ind = np.empty(k, dtype=tp)
selected = set()
for i in xrange(k):
j = random_state.randint(mn)
while j in selected:
j = random_state.randint(mn)
selected.add(j)
ind[i] = j
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = data_rvs(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Returns
-------
res : sparse matrix
Notes
-----
Only float types are supported for now.
See Also
--------
scipy.sparse.random : Similar function that allows a user-specified random
data source.
Examples
--------
>>> from scipy.sparse import rand
>>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42)
>>> matrix
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> matrix.todense()
matrix([[ 0. , 0.59685016, 0.779691 , 0. ],
[ 0. , 0. , 0. , 0.44583275],
[ 0. , 0. , 0. , 0. ]])
"""
return random(m, n, density, format, dtype, random_state)
| 25,911 | 29.884386 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/_matrix_io.py
|
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
import scipy.sparse
from scipy._lib._version import NumpyVersion
__all__ = ['save_npz', 'load_npz']
if NumpyVersion(np.__version__) >= '1.10.0':
# Make loading safe vs. malicious input
PICKLE_KWARGS = dict(allow_pickle=False)
else:
PICKLE_KWARGS = dict()
def save_npz(file, matrix, compressed=True):
""" Save a sparse matrix to a file using ``.npz`` format.
Parameters
----------
file : str or file-like object
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already
there.
matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``)
The sparse matrix to save.
compressed : bool, optional
Allow compressing the file. Default: True
See Also
--------
scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format.
numpy.savez: Save several arrays into a ``.npz`` archive.
numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive.
Examples
--------
Store sparse matrix to disk, and load it again:
>>> import scipy.sparse
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
"""
arrays_dict = {}
if matrix.format in ('csc', 'csr', 'bsr'):
arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
elif matrix.format == 'dia':
arrays_dict.update(offsets=matrix.offsets)
elif matrix.format == 'coo':
arrays_dict.update(row=matrix.row, col=matrix.col)
else:
raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format))
arrays_dict.update(
format=matrix.format.encode('ascii'),
shape=matrix.shape,
data=matrix.data
)
if compressed:
np.savez_compressed(file, **arrays_dict)
else:
np.savez(file, **arrays_dict)
def load_npz(file):
""" Load a sparse matrix from a file using ``.npz`` format.
Parameters
----------
file : str or file-like object
Either the file name (string) or an open file (file-like object)
where the data will be loaded.
Returns
-------
result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix
A sparse matrix containing the loaded data.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format.
numpy.load: Load several arrays from a ``.npz`` archive.
Examples
--------
Store sparse matrix to disk, and load it again:
>>> import scipy.sparse
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
"""
with np.load(file, **PICKLE_KWARGS) as loaded:
try:
matrix_format = loaded['format']
except KeyError:
raise ValueError('The file {} does not contain a sparse matrix.'.format(file))
matrix_format = matrix_format.item()
if sys.version_info[0] >= 3 and not isinstance(matrix_format, str):
# Play safe with Python 2 vs 3 backward compatibility;
# files saved with Scipy < 1.0.0 may contain unicode or bytes.
matrix_format = matrix_format.decode('ascii')
try:
cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format))
except AttributeError:
raise ValueError('Unknown matrix format "{}"'.format(matrix_format))
if matrix_format in ('csc', 'csr', 'bsr'):
return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape'])
elif matrix_format == 'dia':
return cls((loaded['data'], loaded['offsets']), shape=loaded['shape'])
elif matrix_format == 'coo':
return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape'])
else:
raise NotImplementedError('Load is not implemented for '
'sparse matrix of format {}.'.format(matrix_format))
| 5,547 | 34.113924 | 114 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/sputils.py
|
""" Utility functions for sparse matrix module
"""
from __future__ import division, print_function, absolute_import
import operator
import warnings
import numpy as np
__all__ = ['upcast', 'getdtype', 'isscalarlike', 'isintlike',
'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype']
supported_dtypes = ['bool', 'int8', 'uint8', 'short', 'ushort', 'intc',
'uintc', 'longlong', 'ulonglong', 'single', 'double',
'longdouble', 'csingle', 'cdouble', 'clongdouble']
supported_dtypes = [np.typeDict[x] for x in supported_dtypes]
_upcast_memo = {}
def upcast(*args):
"""Returns the nearest supported sparse dtype for the
combination of one or more types.
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
Examples
--------
>>> upcast('int32')
<type 'numpy.int32'>
>>> upcast('bool')
<type 'numpy.bool_'>
>>> upcast('int32','float32')
<type 'numpy.float64'>
>>> upcast('bool',complex,float)
<type 'numpy.complex128'>
"""
t = _upcast_memo.get(hash(args))
if t is not None:
return t
upcast = np.find_common_type(args, [])
for t in supported_dtypes:
if np.can_cast(upcast, t):
_upcast_memo[hash(args)] = t
return t
raise TypeError('no supported conversion for types: %r' % (args,))
def upcast_char(*args):
"""Same as `upcast` but taking dtype.char as input (faster)."""
t = _upcast_memo.get(args)
if t is not None:
return t
t = upcast(*map(np.dtype, args))
_upcast_memo[args] = t
return t
def upcast_scalar(dtype, scalar):
"""Determine data type for binary operation between an array of
type `dtype` and a scalar.
"""
return (np.array([0], dtype=dtype) * scalar).dtype
def downcast_intp_index(arr):
"""
Down-cast index array to np.intp dtype if it is of a larger dtype.
Raise an error if the array contains a value that is too large for
intp.
"""
if arr.dtype.itemsize > np.dtype(np.intp).itemsize:
if arr.size == 0:
return arr.astype(np.intp)
maxval = arr.max()
minval = arr.min()
if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:
raise ValueError("Cannot deal with arrays with indices larger "
"than the machine maximum address size "
"(e.g. 64-bit indices on 32-bit machine).")
return arr.astype(np.intp)
return arr
def to_native(A):
return np.asarray(A, dtype=A.dtype.newbyteorder('native'))
def getdtype(dtype, a=None, default=None):
"""Function used to simplify argument processing. If 'dtype' is not
specified (is None), returns a.dtype; otherwise returns a np.dtype
object created from the specified dtype argument. If 'dtype' and 'a'
are both None, construct a data type out of the 'default' parameter.
Furthermore, 'dtype' must be in 'allowed' set.
"""
# TODO is this really what we want?
if dtype is None:
try:
newdtype = a.dtype
except AttributeError:
if default is not None:
newdtype = np.dtype(default)
else:
raise TypeError("could not interpret data type")
else:
newdtype = np.dtype(dtype)
if newdtype == np.object_:
warnings.warn("object dtype is not supported by sparse matrices")
return newdtype
def get_index_dtype(arrays=(), maxval=None, check_contents=False):
"""
Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
Parameters
----------
arrays : tuple of array_like
Input arrays whose types/contents to check
maxval : float, optional
Maximum value needed
check_contents : bool, optional
Whether to check the values in the arrays and not just their types.
Default: False (check only the types)
Returns
-------
dtype : dtype
Suitable index data type (int32 or int64)
"""
int32min = np.iinfo(np.int32).min
int32max = np.iinfo(np.int32).max
dtype = np.intc
if maxval is not None:
if maxval > int32max:
dtype = np.int64
if isinstance(arrays, np.ndarray):
arrays = (arrays,)
for arr in arrays:
arr = np.asarray(arr)
if not np.can_cast(arr.dtype, np.int32):
if check_contents:
if arr.size == 0:
# a bigger type not needed
continue
elif np.issubdtype(arr.dtype, np.integer):
maxval = arr.max()
minval = arr.min()
if minval >= int32min and maxval <= int32max:
# a bigger type not needed
continue
dtype = np.int64
break
return dtype
def get_sum_dtype(dtype):
"""Mimic numpy's casting for np.sum"""
if np.issubdtype(dtype, np.float_):
return np.float_
if dtype.kind == 'u' and np.can_cast(dtype, np.uint):
return np.uint
if np.can_cast(dtype, np.int_):
return np.int_
return dtype
def isscalarlike(x):
"""Is x either a scalar, an array scalar, or a 0-dim array?"""
return np.isscalar(x) or (isdense(x) and x.ndim == 0)
def isintlike(x):
"""Is x appropriate as an index into a sparse matrix? Returns True
if it can be cast safely to a machine int.
"""
# Fast-path check to eliminate non-scalar values. operator.index would
# catch this case too, but the exception catching is slow.
if np.ndim(x) != 0:
return False
try:
operator.index(x)
except (TypeError, ValueError):
try:
loose_int = bool(int(x) == x)
except (TypeError, ValueError):
return False
if loose_int:
warnings.warn("Inexact indices into sparse matrices are deprecated",
DeprecationWarning)
return loose_int
return True
def isshape(x, nonneg=False):
"""Is x a valid 2-tuple of dimensions?
If nonneg, also checks that the dimensions are non-negative.
"""
try:
# Assume it's a tuple of matrix dimensions (M, N)
(M, N) = x
except:
return False
else:
if isintlike(M) and isintlike(N):
if np.ndim(M) == 0 and np.ndim(N) == 0:
if not nonneg or (M >= 0 and N >= 0):
return True
return False
def issequence(t):
return ((isinstance(t, (list, tuple)) and
(len(t) == 0 or np.isscalar(t[0]))) or
(isinstance(t, np.ndarray) and (t.ndim == 1)))
def ismatrix(t):
return ((isinstance(t, (list, tuple)) and
len(t) > 0 and issequence(t[0])) or
(isinstance(t, np.ndarray) and t.ndim == 2))
def isdense(x):
return isinstance(x, np.ndarray)
def validateaxis(axis):
if axis is not None:
axis_type = type(axis)
# In NumPy, you can pass in tuples for 'axis', but they are
# not very useful for sparse matrices given their limited
# dimensions, so let's make it explicit that they are not
# allowed to be passed in
if axis_type == tuple:
raise TypeError(("Tuples are not accepted for the 'axis' "
"parameter. Please pass in one of the "
"following: {-2, -1, 0, 1, None}."))
# If not a tuple, check that the provided axis is actually
# an integer and raise a TypeError similar to NumPy's
if not np.issubdtype(np.dtype(axis_type), np.integer):
raise TypeError("axis must be an integer, not {name}"
.format(name=axis_type.__name__))
if not (-2 <= axis <= 1):
raise ValueError("axis out of range")
def check_shape(args, current_shape=None):
"""Imitate numpy.matrix handling of shape arguments"""
if len(args) == 0:
raise TypeError("function missing 1 required positional argument: "
"'shape'")
elif len(args) == 1:
try:
shape_iter = iter(args[0])
except TypeError:
new_shape = (operator.index(args[0]), )
else:
new_shape = tuple(operator.index(arg) for arg in shape_iter)
else:
new_shape = tuple(operator.index(arg) for arg in args)
if current_shape is None:
if len(new_shape) != 2:
raise ValueError('shape must be a 2-tuple of positive integers')
elif new_shape[0] < 0 or new_shape[1] < 0:
raise ValueError("'shape' elements cannot be negative")
else:
# Check the current size only if needed
current_size = np.prod(current_shape, dtype=int)
# Check for negatives
negative_indexes = [i for i, x in enumerate(new_shape) if x < 0]
if len(negative_indexes) == 0:
new_size = np.prod(new_shape, dtype=int)
if new_size != current_size:
raise ValueError('cannot reshape array of size {} into shape {}'
.format(new_size, new_shape))
elif len(negative_indexes) == 1:
skip = negative_indexes[0]
specified = np.prod(new_shape[0:skip] + new_shape[skip+1:])
unspecified, remainder = divmod(current_size, specified)
if remainder != 0:
err_shape = tuple('newshape' if x < 0 else x for x in new_shape)
raise ValueError('cannot reshape array of size {} into shape {}'
''.format(current_size, err_shape))
new_shape = new_shape[0:skip] + (unspecified,) + new_shape[skip+1:]
else:
raise ValueError('can only specify one unknown dimension')
# Add and remove ones like numpy.matrix.reshape
if len(new_shape) != 2:
new_shape = tuple(arg for arg in new_shape if arg != 1)
if len(new_shape) == 0:
new_shape = (1, 1)
elif len(new_shape) == 1:
new_shape = (1, new_shape[0])
if len(new_shape) > 2:
raise ValueError('shape too large to be a matrix')
return new_shape
def check_reshape_kwargs(kwargs):
"""Unpack keyword arguments for reshape function.
This is useful because keyword arguments after star arguments are not
allowed in Python 2, but star keyword arguments are. This function unpacks
'order' and 'copy' from the star keyword arguments (with defaults) and
throws an error for any remaining.
"""
order = kwargs.pop('order', 'C')
copy = kwargs.pop('copy', False)
if kwargs: # Some unused kwargs remain
raise TypeError('reshape() got unexpected keywords arguments: {}'
.format(', '.join(kwargs.keys())))
return order, copy
class IndexMixin(object):
"""
This class simply exists to hold the methods necessary for fancy indexing.
"""
def _slicetoarange(self, j, shape):
""" Given a slice object, use numpy arange to change it to a 1D
array.
"""
start, stop, step = j.indices(shape)
return np.arange(start, stop, step)
def _unpack_index(self, index):
""" Parse index. Always return a tuple of the form (row, col).
Where row/col is a integer, slice, or array of integers.
"""
# First, check if indexing with single boolean matrix.
from .base import spmatrix # This feels dirty but...
if (isinstance(index, (spmatrix, np.ndarray)) and
(index.ndim == 2) and index.dtype.kind == 'b'):
return index.nonzero()
# Parse any ellipses.
index = self._check_ellipsis(index)
# Next, parse the tuple or object
if isinstance(index, tuple):
if len(index) == 2:
row, col = index
elif len(index) == 1:
row, col = index[0], slice(None)
else:
raise IndexError('invalid number of indices')
else:
row, col = index, slice(None)
# Next, check for validity, or transform the index as needed.
row, col = self._check_boolean(row, col)
return row, col
def _check_ellipsis(self, index):
"""Process indices with Ellipsis. Returns modified index."""
if index is Ellipsis:
return (slice(None), slice(None))
elif isinstance(index, tuple):
# Find first ellipsis
for j, v in enumerate(index):
if v is Ellipsis:
first_ellipsis = j
break
else:
first_ellipsis = None
# Expand the first one
if first_ellipsis is not None:
# Shortcuts
if len(index) == 1:
return (slice(None), slice(None))
elif len(index) == 2:
if first_ellipsis == 0:
if index[1] is Ellipsis:
return (slice(None), slice(None))
else:
return (slice(None), index[1])
else:
return (index[0], slice(None))
# General case
tail = ()
for v in index[first_ellipsis+1:]:
if v is not Ellipsis:
tail = tail + (v,)
nd = first_ellipsis + len(tail)
nslice = max(0, 2 - nd)
return index[:first_ellipsis] + (slice(None),)*nslice + tail
return index
def _check_boolean(self, row, col):
from .base import isspmatrix # ew...
# Supporting sparse boolean indexing with both row and col does
# not work because spmatrix.ndim is always 2.
if isspmatrix(row) or isspmatrix(col):
raise IndexError(
"Indexing with sparse matrices is not supported "
"except boolean indexing where matrix and index "
"are equal shapes.")
if isinstance(row, np.ndarray) and row.dtype.kind == 'b':
row = self._boolean_index_to_array(row)
if isinstance(col, np.ndarray) and col.dtype.kind == 'b':
col = self._boolean_index_to_array(col)
return row, col
def _boolean_index_to_array(self, i):
if i.ndim > 1:
raise IndexError('invalid index shape')
return i.nonzero()[0]
def _index_to_arrays(self, i, j):
i, j = self._check_boolean(i, j)
i_slice = isinstance(i, slice)
if i_slice:
i = self._slicetoarange(i, self.shape[0])[:, None]
else:
i = np.atleast_1d(i)
if isinstance(j, slice):
j = self._slicetoarange(j, self.shape[1])[None, :]
if i.ndim == 1:
i = i[:, None]
elif not i_slice:
raise IndexError('index returns 3-dim structure')
elif isscalarlike(j):
# row vector special case
j = np.atleast_1d(j)
if i.ndim == 1:
i, j = np.broadcast_arrays(i, j)
i = i[:, None]
j = j[:, None]
return i, j
else:
j = np.atleast_1d(j)
if i_slice and j.ndim > 1:
raise IndexError('index returns 3-dim structure')
i, j = np.broadcast_arrays(i, j)
if i.ndim == 1:
# return column vectors for 1-D indexing
i = i[None, :]
j = j[None, :]
elif i.ndim > 2:
raise IndexError("Index dimension must be <= 2")
return i, j
| 15,847 | 32.085595 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/__init__.py
|
"""
=====================================
Sparse matrices (:mod:`scipy.sparse`)
=====================================
.. currentmodule:: scipy.sparse
SciPy 2-D sparse matrix package for numeric data.
Contents
========
Sparse matrix classes
---------------------
.. autosummary::
:toctree: generated/
bsr_matrix - Block Sparse Row matrix
coo_matrix - A sparse matrix in COOrdinate format
csc_matrix - Compressed Sparse Column matrix
csr_matrix - Compressed Sparse Row matrix
dia_matrix - Sparse matrix with DIAgonal storage
dok_matrix - Dictionary Of Keys based sparse matrix
lil_matrix - Row-based linked list sparse matrix
spmatrix - Sparse matrix base class
Functions
---------
Building sparse matrices:
.. autosummary::
:toctree: generated/
eye - Sparse MxN matrix whose k-th diagonal is all ones
identity - Identity matrix in sparse format
kron - kronecker product of two sparse matrices
kronsum - kronecker sum of sparse matrices
diags - Return a sparse matrix from diagonals
spdiags - Return a sparse matrix from diagonals
block_diag - Build a block diagonal sparse matrix
tril - Lower triangular portion of a matrix in sparse format
triu - Upper triangular portion of a matrix in sparse format
bmat - Build a sparse matrix from sparse sub-blocks
hstack - Stack sparse matrices horizontally (column wise)
vstack - Stack sparse matrices vertically (row wise)
rand - Random values in a given shape
random - Random values in a given shape
Save and load sparse matrices:
.. autosummary::
:toctree: generated/
save_npz - Save a sparse matrix to a file using ``.npz`` format.
load_npz - Load a sparse matrix from a file using ``.npz`` format.
Sparse matrix tools:
.. autosummary::
:toctree: generated/
find
Identifying sparse matrices:
.. autosummary::
:toctree: generated/
issparse
isspmatrix
isspmatrix_csc
isspmatrix_csr
isspmatrix_bsr
isspmatrix_lil
isspmatrix_dok
isspmatrix_coo
isspmatrix_dia
Submodules
----------
.. autosummary::
:toctree: generated/
csgraph - Compressed sparse graph routines
linalg - sparse linear algebra routines
Exceptions
----------
.. autosummary::
:toctree: generated/
SparseEfficiencyWarning
SparseWarning
Usage information
=================
There are seven available sparse matrix types:
1. csc_matrix: Compressed Sparse Column format
2. csr_matrix: Compressed Sparse Row format
3. bsr_matrix: Block Sparse Row format
4. lil_matrix: List of Lists format
5. dok_matrix: Dictionary of Keys format
6. coo_matrix: COOrdinate format (aka IJV, triplet format)
7. dia_matrix: DIAgonal format
To construct a matrix efficiently, use either dok_matrix or lil_matrix.
The lil_matrix class supports basic slicing and fancy indexing with a
similar syntax to NumPy arrays. As illustrated below, the COO format
may also be used to efficiently construct matrices. Despite their
similarity to NumPy arrays, it is **strongly discouraged** to use NumPy
functions directly on these matrices because NumPy may not properly convert
them for computations, leading to unexpected (and incorrect) results. If you
do want to apply a NumPy function to these matrices, first check if SciPy has
its own implementation for the given sparse matrix class, or **convert the
sparse matrix to a NumPy array** (e.g. using the `toarray()` method of the
class) first before applying the method.
To perform manipulations such as multiplication or inversion, first
convert the matrix to either CSC or CSR format. The lil_matrix format is
row-based, so conversion to CSR is efficient, whereas conversion to CSC
is less so.
All conversions among the CSR, CSC, and COO formats are efficient,
linear-time operations.
Matrix vector product
---------------------
To do a vector product between a sparse matrix and a vector simply use
the matrix `dot` method, as described in its docstring:
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
.. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices,
therefore using it will result on unexpected results or errors.
The corresponding dense array should be obtained first instead:
>>> np.dot(A.toarray(), v)
array([ 1, -3, -1], dtype=int64)
but then all the performance advantages would be lost.
The CSR format is specially suitable for fast matrix vector products.
Example 1
---------
Construct a 1000x1000 lil_matrix and add some values to it:
>>> from scipy.sparse import lil_matrix
>>> from scipy.sparse.linalg import spsolve
>>> from numpy.linalg import solve, norm
>>> from numpy.random import rand
>>> A = lil_matrix((1000, 1000))
>>> A[0, :100] = rand(100)
>>> A[1, 100:200] = A[0, :100]
>>> A.setdiag(rand(1000))
Now convert it to CSR format and solve A x = b for x:
>>> A = A.tocsr()
>>> b = rand(1000)
>>> x = spsolve(A, b)
Convert it to a dense matrix and solve, and check that the result
is the same:
>>> x_ = solve(A.toarray(), b)
Now we can compute norm of the error with:
>>> err = norm(x-x_)
>>> err < 1e-10
True
It should be small :)
Example 2
---------
Construct a matrix in COO format:
>>> from scipy import sparse
>>> from numpy import array
>>> I = array([0,3,1,0])
>>> J = array([0,3,1,2])
>>> V = array([4,5,7,9])
>>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4))
Notice that the indices do not need to be sorted.
Duplicate (i,j) entries are summed when converting to CSR or CSC.
>>> I = array([0,0,1,3,1,0,0])
>>> J = array([0,2,1,3,1,0,0])
>>> V = array([1,1,1,1,1,1,1])
>>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr()
This is useful for constructing finite-element stiffness and mass matrices.
Further Details
---------------
CSR column indices are not necessarily sorted. Likewise for CSC row
indices. Use the .sorted_indices() and .sort_indices() methods when
sorted indices are required (e.g. when passing data to other libraries).
"""
from __future__ import division, print_function, absolute_import
# Original code by Travis Oliphant.
# Modified and extended by Ed Schofield, Robert Cimrman,
# Nathan Bell, and Jake Vanderplas.
from .base import *
from .csr import *
from .csc import *
from .lil import *
from .dok import *
from .coo import *
from .dia import *
from .bsr import *
from .construct import *
from .extract import *
from ._matrix_io import *
# For backward compatibility with v0.19.
from . import csgraph
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 6,735 | 26.16129 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/coo.py
|
""" A sparse matrix in COOrdinate or 'triplet' format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['coo_matrix', 'isspmatrix_coo']
from warnings import warn
import numpy as np
from scipy._lib.six import zip as izip
from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
from .base import isspmatrix, SparseEfficiencyWarning, spmatrix
from .data import _data_matrix, _minmax_mixin
from .sputils import (upcast, upcast_char, to_native, isshape, getdtype,
get_index_dtype, downcast_intp_index, check_shape,
check_reshape_kwargs)
class coo_matrix(_data_matrix, _minmax_mixin):
"""
A sparse matrix in COOrdinate format.
Also known as the 'ijv' or 'triplet' format.
This can be instantiated in several ways:
coo_matrix(D)
with a dense matrix D
coo_matrix(S)
with another sparse matrix S (equivalent to S.tocoo())
coo_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
coo_matrix((data, (i, j)), [shape=(M, N)])
to construct from three arrays:
1. data[:] the entries of the matrix, in any order
2. i[:] the row indices of the matrix entries
3. j[:] the column indices of the matrix entries
Where ``A[i[k], j[k]] = data[k]``. When shape is not
specified, it is inferred from the index arrays
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
COO format data array of the matrix
row
COO format row index array of the matrix
col
COO format column index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the COO format
- facilitates fast conversion among sparse formats
- permits duplicate entries (see example)
- very fast conversion to and from CSR/CSC formats
Disadvantages of the COO format
- does not directly support:
+ arithmetic operations
+ slicing
Intended Usage
- COO is a fast format for constructing sparse matrices
- Once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- By default when converting to CSR or CSC format, duplicate (i,j)
entries will be summed together. This facilitates efficient
construction of finite element matrices and the like. (see example)
Examples
--------
>>> # Constructing an empty matrix
>>> from scipy.sparse import coo_matrix
>>> coo_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> # Constructing a matrix using ijv format
>>> row = np.array([0, 3, 1, 0])
>>> col = np.array([0, 3, 1, 2])
>>> data = np.array([4, 5, 7, 9])
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
>>> # Constructing a matrix with duplicate indices
>>> row = np.array([0, 0, 1, 3, 1, 0, 0])
>>> col = np.array([0, 2, 1, 3, 1, 0, 0])
>>> data = np.array([1, 1, 1, 1, 1, 1, 1])
>>> coo = coo_matrix((data, (row, col)), shape=(4, 4))
>>> # Duplicate indices are maintained until implicitly or explicitly summed
>>> np.max(coo.data)
1
>>> coo.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
format = 'coo'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isinstance(arg1, tuple):
if isshape(arg1):
M, N = arg1
self._shape = check_shape((M, N))
idx_dtype = get_index_dtype(maxval=max(M, N))
self.row = np.array([], dtype=idx_dtype)
self.col = np.array([], dtype=idx_dtype)
self.data = np.array([], getdtype(dtype, default=float))
self.has_canonical_format = True
else:
try:
obj, (row, col) = arg1
except (TypeError, ValueError):
raise TypeError('invalid input format')
if shape is None:
if len(row) == 0 or len(col) == 0:
raise ValueError('cannot infer dimensions from zero '
'sized index arrays')
M = np.max(row) + 1
N = np.max(col) + 1
self._shape = check_shape((M, N))
else:
# Use 2 steps to ensure shape has length 2.
M, N = shape
self._shape = check_shape((M, N))
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = np.array(row, copy=copy, dtype=idx_dtype)
self.col = np.array(col, copy=copy, dtype=idx_dtype)
self.data = np.array(obj, copy=copy)
self.has_canonical_format = False
else:
if isspmatrix(arg1):
if isspmatrix_coo(arg1) and copy:
self.row = arg1.row.copy()
self.col = arg1.col.copy()
self.data = arg1.data.copy()
self._shape = check_shape(arg1.shape)
else:
coo = arg1.tocoo()
self.row = coo.row
self.col = coo.col
self.data = coo.data
self._shape = check_shape(coo.shape)
self.has_canonical_format = False
else:
#dense argument
M = np.atleast_2d(np.asarray(arg1))
if M.ndim != 2:
raise TypeError('expected dimension <= 2 array or matrix')
else:
self._shape = check_shape(M.shape)
self.row, self.col = M.nonzero()
self.data = M[self.row, self.col]
self.has_canonical_format = True
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self._check()
def reshape(self, *args, **kwargs):
shape = check_shape(args, self.shape)
order, copy = check_reshape_kwargs(kwargs)
# Return early if reshape is not required
if shape == self.shape:
if copy:
return self.copy()
else:
return self
nrows, ncols = self.shape
if order == 'C':
flat_indices = ncols * self.row + self.col
new_row, new_col = divmod(flat_indices, shape[1])
elif order == 'F':
flat_indices = self.row + nrows * self.col
new_col, new_row = divmod(flat_indices, shape[0])
else:
raise ValueError("'order' must be 'C' or 'F'")
# Handle copy here rather than passing on to the constructor so that no
# copy will be made of new_row and new_col regardless
if copy:
new_data = self.data.copy()
else:
new_data = self.data
return coo_matrix((new_data, (new_row, new_col)),
shape=shape, copy=False)
reshape.__doc__ = spmatrix.reshape.__doc__
def getnnz(self, axis=None):
if axis is None:
nnz = len(self.data)
if nnz != len(self.row) or nnz != len(self.col):
raise ValueError('row, column, and data array must all be the '
'same length')
if self.data.ndim != 1 or self.row.ndim != 1 or \
self.col.ndim != 1:
raise ValueError('row, column, and data arrays must be 1-D')
return int(nnz)
if axis < 0:
axis += 2
if axis == 0:
return np.bincount(downcast_intp_index(self.col),
minlength=self.shape[1])
elif axis == 1:
return np.bincount(downcast_intp_index(self.row),
minlength=self.shape[0])
else:
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _check(self):
""" Checks data structure for consistency """
# index arrays should have integer data types
if self.row.dtype.kind != 'i':
warn("row index array has non-integer dtype (%s) "
% self.row.dtype.name)
if self.col.dtype.kind != 'i':
warn("col index array has non-integer dtype (%s) "
% self.col.dtype.name)
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = np.asarray(self.row, dtype=idx_dtype)
self.col = np.asarray(self.col, dtype=idx_dtype)
self.data = to_native(self.data)
if self.nnz > 0:
if self.row.max() >= self.shape[0]:
raise ValueError('row index exceeds matrix dimensions')
if self.col.max() >= self.shape[1]:
raise ValueError('column index exceeds matrix dimensions')
if self.row.min() < 0:
raise ValueError('negative row index found')
if self.col.min() < 0:
raise ValueError('negative column index found')
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
M, N = self.shape
return coo_matrix((self.data, (self.col, self.row)),
shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def resize(self, *shape):
shape = check_shape(shape)
new_M, new_N = shape
M, N = self.shape
if new_M < M or new_N < N:
mask = np.logical_and(self.row < new_M, self.col < new_N)
if not mask.all():
self.row = self.row[mask]
self.col = self.col[mask]
self.data = self.data[mask]
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
B = self._process_toarray_args(order, out)
fortran = int(B.flags.f_contiguous)
if not fortran and not B.flags.c_contiguous:
raise ValueError("Output array must be C or F contiguous")
M,N = self.shape
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
B.ravel('A'), fortran)
return B
def tocsc(self, copy=False):
"""Convert this matrix to Compressed Sparse Column format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.col, self.row),
maxval=max(self.nnz, M))
row = self.row.astype(idx_dtype, copy=False)
col = self.col.astype(idx_dtype, copy=False)
indptr = np.empty(N + 1, dtype=idx_dtype)
indices = np.empty_like(row, dtype=idx_dtype)
data = np.empty_like(self.data, dtype=upcast(self.dtype))
coo_tocsr(N, M, self.nnz, col, row, self.data,
indptr, indices, data)
x = csc_matrix((data, indices, indptr), shape=self.shape)
if not self.has_canonical_format:
x.sum_duplicates()
return x
def tocsr(self, copy=False):
"""Convert this matrix to Compressed Sparse Row format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csr import csr_matrix
if self.nnz == 0:
return csr_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.row, self.col),
maxval=max(self.nnz, N))
row = self.row.astype(idx_dtype, copy=False)
col = self.col.astype(idx_dtype, copy=False)
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty_like(col, dtype=idx_dtype)
data = np.empty_like(self.data, dtype=upcast(self.dtype))
coo_tocsr(M, N, self.nnz, row, col, self.data,
indptr, indices, data)
x = csr_matrix((data, indices, indptr), shape=self.shape)
if not self.has_canonical_format:
x.sum_duplicates()
return x
def tocoo(self, copy=False):
if copy:
return self.copy()
else:
return self
tocoo.__doc__ = spmatrix.tocoo.__doc__
def todia(self, copy=False):
from .dia import dia_matrix
self.sum_duplicates()
ks = self.col - self.row # the diagonal for each nonzero
diags, diag_idx = np.unique(ks, return_inverse=True)
if len(diags) > 100:
# probably undesired, should todia() have a maxdiags parameter?
warn("Constructing a DIA matrix with %d diagonals "
"is inefficient" % len(diags), SparseEfficiencyWarning)
#initialize and fill in data array
if self.data.size == 0:
data = np.zeros((0, 0), dtype=self.dtype)
else:
data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
data[diag_idx, self.col] = self.data
return dia_matrix((data,diags), shape=self.shape)
todia.__doc__ = spmatrix.todia.__doc__
def todok(self, copy=False):
from .dok import dok_matrix
self.sum_duplicates()
dok = dok_matrix((self.shape), dtype=self.dtype)
dok._update(izip(izip(self.row,self.col),self.data))
return dok
todok.__doc__ = spmatrix.todok.__doc__
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
dtype=self.dtype)
diag_mask = (self.row + k) == self.col
if self.has_canonical_format:
row = self.row[diag_mask]
data = self.data[diag_mask]
else:
row, _, data = self._sum_duplicates(self.row[diag_mask],
self.col[diag_mask],
self.data[diag_mask])
diag[row + min(k, 0)] = data
return diag
diagonal.__doc__ = _data_matrix.diagonal.__doc__
def _setdiag(self, values, k):
M, N = self.shape
if values.ndim and not len(values):
return
idx_dtype = self.row.dtype
# Determine which triples to keep and where to put the new ones.
full_keep = self.col - self.row != k
if k < 0:
max_index = min(M+k, N)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.col >= max_index)
new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
new_col = np.arange(max_index, dtype=idx_dtype)
else:
max_index = min(M, N-k)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.row >= max_index)
new_row = np.arange(max_index, dtype=idx_dtype)
new_col = np.arange(k, k + max_index, dtype=idx_dtype)
# Define the array of data consisting of the entries to be added.
if values.ndim:
new_data = values[:max_index]
else:
new_data = np.empty(max_index, dtype=self.dtype)
new_data[:] = values
# Update the internal structure.
self.row = np.concatenate((self.row[keep], new_row))
self.col = np.concatenate((self.col[keep], new_col))
self.data = np.concatenate((self.data[keep], new_data))
self.has_canonical_format = False
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the index arrays
(i.e. .row and .col) are copied.
"""
if copy:
return coo_matrix((data, (self.row.copy(), self.col.copy())),
shape=self.shape, dtype=data.dtype)
else:
return coo_matrix((data, (self.row, self.col)),
shape=self.shape, dtype=data.dtype)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
This is an *in place* operation
"""
if self.has_canonical_format:
return
summed = self._sum_duplicates(self.row, self.col, self.data)
self.row, self.col, self.data = summed
self.has_canonical_format = True
def _sum_duplicates(self, row, col, data):
# Assumes (data, row, col) not in canonical format.
if len(data) == 0:
return row, col, data
order = np.lexsort((row, col))
row = row[order]
col = col[order]
data = data[order]
unique_mask = ((row[1:] != row[:-1]) |
(col[1:] != col[:-1]))
unique_mask = np.append(True, unique_mask)
row = row[unique_mask]
col = col[unique_mask]
unique_inds, = np.nonzero(unique_mask)
data = np.add.reduceat(data, unique_inds, dtype=self.dtype)
return row, col, data
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
mask = self.data != 0
self.data = self.data[mask]
self.row = self.row[mask]
self.col = self.col[mask]
#######################
# Arithmetic handlers #
#######################
def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError('Incompatible shapes.')
dtype = upcast_char(self.dtype.char, other.dtype.char)
result = np.array(other, dtype=dtype, copy=True)
fortran = int(result.flags.f_contiguous)
M, N = self.shape
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
result.ravel('A'), fortran)
return np.matrix(result, copy=False)
def _mul_vector(self, other):
#output array
result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
other.dtype.char))
coo_matvec(self.nnz, self.row, self.col, self.data, other, result)
return result
def _mul_multivector(self, other):
result = np.zeros((other.shape[1], self.shape[0]),
dtype=upcast_char(self.dtype.char, other.dtype.char))
for i, col in enumerate(other.T):
coo_matvec(self.nnz, self.row, self.col, self.data, col, result[i])
return result.T.view(type=type(other))
def isspmatrix_coo(x):
"""Is x of coo_matrix type?
Parameters
----------
x
object to check for being a coo matrix
Returns
-------
bool
True if x is a coo matrix, False otherwise
Examples
--------
>>> from scipy.sparse import coo_matrix, isspmatrix_coo
>>> isspmatrix_coo(coo_matrix([[5]]))
True
>>> from scipy.sparse import coo_matrix, csr_matrix, isspmatrix_coo
>>> isspmatrix_coo(csr_matrix([[5]]))
False
"""
return isinstance(x, coo_matrix)
| 21,427 | 34.301483 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/lil.py
|
"""LInked List sparse matrix class
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['lil_matrix','isspmatrix_lil']
from bisect import bisect_left
import numpy as np
from scipy._lib.six import xrange, zip
from .base import spmatrix, isspmatrix
from .sputils import (getdtype, isshape, isscalarlike, IndexMixin,
upcast_scalar, get_index_dtype, isintlike, check_shape,
check_reshape_kwargs)
from . import _csparsetools
class lil_matrix(spmatrix, IndexMixin):
"""Row-based linked list sparse matrix
This is a structure for constructing sparse matrices incrementally.
Note that inserting a single item can take linear time in the worst case;
to construct a matrix efficiently, make sure the items are pre-sorted by
index, per row.
This can be instantiated in several ways:
lil_matrix(D)
with a dense matrix or rank-2 ndarray D
lil_matrix(S)
with another sparse matrix S (equivalent to S.tolil())
lil_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
LIL format data array of the matrix
rows
LIL format row index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the LIL format
- supports flexible slicing
- changes to the matrix sparsity structure are efficient
Disadvantages of the LIL format
- arithmetic operations LIL + LIL are slow (consider CSR or CSC)
- slow column slicing (consider CSC)
- slow matrix vector products (consider CSR or CSC)
Intended Usage
- LIL is a convenient format for constructing sparse matrices
- once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- consider using the COO format when constructing large matrices
Data Structure
- An array (``self.rows``) of rows, each of which is a sorted
list of column indices of non-zero elements.
- The corresponding nonzero values are stored in similar
fashion in ``self.data``.
"""
format = 'lil'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
spmatrix.__init__(self)
self.dtype = getdtype(dtype, arg1, default=float)
# First get the shape
if isspmatrix(arg1):
if isspmatrix_lil(arg1) and copy:
A = arg1.copy()
else:
A = arg1.tolil()
if dtype is not None:
A = A.astype(dtype)
self._shape = check_shape(A.shape)
self.dtype = A.dtype
self.rows = A.rows
self.data = A.data
elif isinstance(arg1,tuple):
if isshape(arg1):
if shape is not None:
raise ValueError('invalid use of shape parameter')
M, N = arg1
self._shape = check_shape((M, N))
self.rows = np.empty((M,), dtype=object)
self.data = np.empty((M,), dtype=object)
for i in range(M):
self.rows[i] = []
self.data[i] = []
else:
raise TypeError('unrecognized lil_matrix constructor usage')
else:
# assume A is dense
try:
A = np.asmatrix(arg1)
except TypeError:
raise TypeError('unsupported matrix type')
else:
from .csr import csr_matrix
A = csr_matrix(A, dtype=dtype).tolil()
self._shape = check_shape(A.shape)
self.dtype = A.dtype
self.rows = A.rows
self.data = A.data
def __iadd__(self,other):
self[:,:] = self + other
return self
def __isub__(self,other):
self[:,:] = self - other
return self
def __imul__(self,other):
if isscalarlike(other):
self[:,:] = self * other
return self
else:
return NotImplemented
def __itruediv__(self,other):
if isscalarlike(other):
self[:,:] = self / other
return self
else:
return NotImplemented
# Whenever the dimensions change, empty lists should be created for each
# row
def getnnz(self, axis=None):
if axis is None:
return sum([len(rowvals) for rowvals in self.data])
if axis < 0:
axis += 2
if axis == 0:
out = np.zeros(self.shape[1], dtype=np.intp)
for row in self.rows:
out[row] += 1
return out
elif axis == 1:
return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp)
else:
raise ValueError('axis out of bounds')
def count_nonzero(self):
return sum(np.count_nonzero(rowvals) for rowvals in self.data)
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def __str__(self):
val = ''
for i, row in enumerate(self.rows):
for pos, j in enumerate(row):
val += " %s\t%s\n" % (str((i, j)), str(self.data[i][pos]))
return val[:-1]
def getrowview(self, i):
"""Returns a view of the 'i'th row (without copying).
"""
new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
new.rows[0] = self.rows[i]
new.data[0] = self.data[i]
return new
def getrow(self, i):
"""Returns a copy of the 'i'th row.
"""
i = self._check_row_bounds(i)
new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
new.rows[0] = self.rows[i][:]
new.data[0] = self.data[i][:]
return new
def _check_row_bounds(self, i):
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('row index out of bounds')
return i
def _check_col_bounds(self, j):
if j < 0:
j += self.shape[1]
if j < 0 or j >= self.shape[1]:
raise IndexError('column index out of bounds')
return j
def __getitem__(self, index):
"""Return the element(s) index=(i, j), where j may be a slice.
This always returns a copy for consistency, since slices into
Python lists return copies.
"""
# Scalar fast path first
if isinstance(index, tuple) and len(index) == 2:
i, j = index
# Use isinstance checks for common index types; this is
# ~25-50% faster than isscalarlike. Other types are
# handled below.
if ((isinstance(i, int) or isinstance(i, np.integer)) and
(isinstance(j, int) or isinstance(j, np.integer))):
v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
self.rows, self.data,
i, j)
return self.dtype.type(v)
# Utilities found in IndexMixin
i, j = self._unpack_index(index)
# Proper check for other scalar index types
i_intlike = isintlike(i)
j_intlike = isintlike(j)
if i_intlike and j_intlike:
v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
self.rows, self.data,
i, j)
return self.dtype.type(v)
elif j_intlike or isinstance(j, slice):
# column slicing fast path
if j_intlike:
j = self._check_col_bounds(j)
j = slice(j, j+1)
if i_intlike:
i = self._check_row_bounds(i)
i = xrange(i, i+1)
i_shape = None
elif isinstance(i, slice):
i = xrange(*i.indices(self.shape[0]))
i_shape = None
else:
i = np.atleast_1d(i)
i_shape = i.shape
if i_shape is None or len(i_shape) == 1:
return self._get_row_ranges(i, j)
i, j = self._index_to_arrays(i, j)
if i.size == 0:
return lil_matrix(i.shape, dtype=self.dtype)
new = lil_matrix(i.shape, dtype=self.dtype)
i, j = _prepare_index_for_memoryview(i, j)
_csparsetools.lil_fancy_get(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
i, j)
return new
def _get_row_ranges(self, rows, col_slice):
"""
Fast path for indexing in the case where column index is slice.
This gains performance improvement over brute force by more
efficient skipping of zeros, by accessing the elements
column-wise in order.
Parameters
----------
rows : sequence or xrange
Rows indexed. If xrange, must be within valid bounds.
col_slice : slice
Columns indexed
"""
j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
col_range = xrange(j_start, j_stop, j_stride)
nj = len(col_range)
new = lil_matrix((len(rows), nj), dtype=self.dtype)
_csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
rows,
j_start, j_stop, j_stride, nj)
return new
def __setitem__(self, index, x):
# Scalar fast path first
if isinstance(index, tuple) and len(index) == 2:
i, j = index
# Use isinstance checks for common index types; this is
# ~25-50% faster than isscalarlike. Scalar index
# assignment for other types is handled below together
# with fancy indexing.
if ((isinstance(i, int) or isinstance(i, np.integer)) and
(isinstance(j, int) or isinstance(j, np.integer))):
x = self.dtype.type(x)
if x.size > 1:
# Triggered if input was an ndarray
raise ValueError("Trying to assign a sequence to an item")
_csparsetools.lil_insert(self.shape[0], self.shape[1],
self.rows, self.data, i, j, x)
return
# General indexing
i, j = self._unpack_index(index)
# shortcut for common case of full matrix assign:
if (isspmatrix(x) and isinstance(i, slice) and i == slice(None) and
isinstance(j, slice) and j == slice(None)
and x.shape == self.shape):
x = lil_matrix(x, dtype=self.dtype)
self.rows = x.rows
self.data = x.data
return
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
x = x.toarray()
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
# Set values
i, j, x = _prepare_index_for_memoryview(i, j, x)
_csparsetools.lil_fancy_set(self.shape[0], self.shape[1],
self.rows, self.data,
i, j, x)
def _mul_scalar(self, other):
if other == 0:
# Multiply by zero: return the zero matrix
new = lil_matrix(self.shape, dtype=self.dtype)
else:
res_dtype = upcast_scalar(self.dtype, other)
new = self.copy()
new = new.astype(res_dtype)
# Multiply this scalar by every element.
for j, rowvals in enumerate(new.data):
new.data[j] = [val*other for val in rowvals]
return new
def __truediv__(self, other): # self / other
if isscalarlike(other):
new = self.copy()
# Divide every element by this scalar
for j, rowvals in enumerate(new.data):
new.data[j] = [val/other for val in rowvals]
return new
else:
return self.tocsr() / other
def copy(self):
from copy import deepcopy
new = lil_matrix(self.shape, dtype=self.dtype)
new.data = deepcopy(self.data)
new.rows = deepcopy(self.rows)
return new
copy.__doc__ = spmatrix.copy.__doc__
def reshape(self, *args, **kwargs):
shape = check_shape(args, self.shape)
order, copy = check_reshape_kwargs(kwargs)
# Return early if reshape is not required
if shape == self.shape:
if copy:
return self.copy()
else:
return self
new = lil_matrix(shape, dtype=self.dtype)
if order == 'C':
ncols = self.shape[1]
for i, row in enumerate(self.rows):
for col, j in enumerate(row):
new_r, new_c = np.unravel_index(i * ncols + j, shape)
new[new_r, new_c] = self[i, j]
elif order == 'F':
nrows = self.shape[0]
for i, row in enumerate(self.rows):
for col, j in enumerate(row):
new_r, new_c = np.unravel_index(i + j * nrows, shape, order)
new[new_r, new_c] = self[i, j]
else:
raise ValueError("'order' must be 'C' or 'F'")
return new
reshape.__doc__ = spmatrix.reshape.__doc__
def resize(self, *shape):
shape = check_shape(shape)
new_M, new_N = shape
M, N = self.shape
if new_M < M:
self.rows = self.rows[:new_M]
self.data = self.data[:new_M]
elif new_M > M:
self.rows = np.resize(self.rows, new_M)
self.data = np.resize(self.data, new_M)
for i in range(M, new_M):
self.rows[i] = []
self.data[i] = []
if new_N < N:
for row, data in zip(self.rows, self.data):
trunc = bisect_left(row, new_N)
del row[trunc:]
del data[trunc:]
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
def toarray(self, order=None, out=None):
d = self._process_toarray_args(order, out)
for i, row in enumerate(self.rows):
for pos, j in enumerate(row):
d[i, j] = self.data[i][pos]
return d
toarray.__doc__ = spmatrix.toarray.__doc__
def transpose(self, axes=None, copy=False):
return self.tocsr(copy=copy).transpose(axes=axes, copy=False).tolil(copy=False)
transpose.__doc__ = spmatrix.transpose.__doc__
def tolil(self, copy=False):
if copy:
return self.copy()
else:
return self
tolil.__doc__ = spmatrix.tolil.__doc__
def tocsr(self, copy=False):
lst = [len(x) for x in self.rows]
idx_dtype = get_index_dtype(maxval=max(self.shape[1], sum(lst)))
indptr = np.cumsum([0] + lst, dtype=idx_dtype)
indices = np.array([x for y in self.rows for x in y], dtype=idx_dtype)
data = np.array([x for y in self.data for x in y], dtype=self.dtype)
from .csr import csr_matrix
return csr_matrix((data, indices, indptr), shape=self.shape)
tocsr.__doc__ = spmatrix.tocsr.__doc__
def _prepare_index_for_memoryview(i, j, x=None):
"""
Convert index and data arrays to form suitable for passing to the
Cython fancy getset routines.
The conversions are necessary since to (i) ensure the integer
index arrays are in one of the accepted types, and (ii) to ensure
the arrays are writable so that Cython memoryview support doesn't
choke on them.
Parameters
----------
i, j
Index arrays
x : optional
Data arrays
Returns
-------
i, j, x
Re-formatted arrays (x is omitted, if input was None)
"""
if i.dtype > j.dtype:
j = j.astype(i.dtype)
elif i.dtype < j.dtype:
i = i.astype(j.dtype)
if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
i = i.astype(np.intp)
if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
j = j.astype(np.intp)
if x is not None:
if not x.flags.writeable:
x = x.copy()
return i, j, x
else:
return i, j
def isspmatrix_lil(x):
"""Is x of lil_matrix type?
Parameters
----------
x
object to check for being a lil matrix
Returns
-------
bool
True if x is a lil matrix, False otherwise
Examples
--------
>>> from scipy.sparse import lil_matrix, isspmatrix_lil
>>> isspmatrix_lil(lil_matrix([[5]]))
True
>>> from scipy.sparse import lil_matrix, csr_matrix, isspmatrix_lil
>>> isspmatrix_lil(csr_matrix([[5]]))
False
"""
return isinstance(x, lil_matrix)
| 17,782 | 31.391621 | 87 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('csgraph', parent_package, top_path)
config.add_data_dir('tests')
config.add_extension('_shortest_path',
sources=['_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('_traversal',
sources=['_traversal.c'],
include_dirs=[numpy.get_include()])
config.add_extension('_min_spanning_tree',
sources=['_min_spanning_tree.c'],
include_dirs=[numpy.get_include()])
config.add_extension('_reordering',
sources=['_reordering.c'],
include_dirs=[numpy.get_include()])
config.add_extension('_tools',
sources=['_tools.c'],
include_dirs=[numpy.get_include()])
return config
| 933 | 27.30303 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/_validation.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc
from ._tools import csgraph_to_dense, csgraph_from_dense,\
csgraph_masked_from_dense, csgraph_from_masked
DTYPE = np.float64
def validate_graph(csgraph, directed, dtype=DTYPE,
csr_output=True, dense_output=True,
copy_if_dense=False, copy_if_sparse=False,
null_value_in=0, null_value_out=np.inf,
infinity_null=True, nan_null=True):
"""Routine for validation and conversion of csgraph inputs"""
if not (csr_output or dense_output):
raise ValueError("Internal: dense or csr output must be true")
# if undirected and csc storage, then transposing in-place
# is quicker than later converting to csr.
if (not directed) and isspmatrix_csc(csgraph):
csgraph = csgraph.T
if isspmatrix(csgraph):
if csr_output:
csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
else:
csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
elif np.ma.isMaskedArray(csgraph):
if dense_output:
mask = csgraph.mask
csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_masked(csgraph)
else:
if dense_output:
csgraph = csgraph_masked_from_dense(csgraph,
copy=copy_if_dense,
null_value=null_value_in,
nan_null=nan_null,
infinity_null=infinity_null)
mask = csgraph.mask
csgraph = np.asarray(csgraph.data, dtype=DTYPE)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
infinity_null=infinity_null,
nan_null=nan_null)
if csgraph.ndim != 2:
raise ValueError("compressed-sparse graph must be two dimensional")
if csgraph.shape[0] != csgraph.shape[1]:
raise ValueError("compressed-sparse graph must be shape (N, N)")
return csgraph
| 2,405 | 39.779661 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/_laplacian.py
|
"""
Laplacian of a compressed-sparse graph
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import isspmatrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False, use_out_degree=False):
"""
Return the Laplacian matrix of a directed graph.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then also return an array related to vertex degrees.
use_out_degree : bool, optional
If True, then use out-degree instead of in-degree.
This distinction matters only if the graph is asymmetric.
Default: False.
Returns
-------
lap : ndarray or sparse matrix
The N x N laplacian matrix of csgraph. It will be a numpy array (dense)
if the input was dense, or a sparse matrix otherwise.
diag : ndarray, optional
The length-N diagonal of the Laplacian matrix.
For the normalized Laplacian, this is the array of square roots
of vertex degrees or 1 if the degree is zero.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.signedinteger)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(float)
create_lap = _laplacian_sparse if isspmatrix(csgraph) else _laplacian_dense
degree_axis = 1 if use_out_degree else 0
lap, d = create_lap(csgraph, normed=normed, axis=degree_axis)
if return_diag:
return lap, d
return lap
def _setdiag_dense(A, d):
A.flat[::len(d)+1] = d
def _laplacian_sparse(graph, normed=False, axis=0):
if graph.format in ('lil', 'dok'):
m = graph.tocoo()
needs_copy = False
else:
m = graph
needs_copy = True
w = m.sum(axis=axis).getA1() - m.diagonal()
if normed:
m = m.tocoo(copy=needs_copy)
isolated_node_mask = (w == 0)
w = np.where(isolated_node_mask, 1, np.sqrt(w))
m.data /= w[m.row]
m.data /= w[m.col]
m.data *= -1
m.setdiag(1 - isolated_node_mask)
else:
if m.format == 'dia':
m = m.copy()
else:
m = m.tocoo(copy=needs_copy)
m.data *= -1
m.setdiag(w)
return m, w
def _laplacian_dense(graph, normed=False, axis=0):
m = np.array(graph)
np.fill_diagonal(m, 0)
w = m.sum(axis=axis)
if normed:
isolated_node_mask = (w == 0)
w = np.where(isolated_node_mask, 1, np.sqrt(w))
m /= w
m /= w[:, np.newaxis]
m *= -1
_setdiag_dense(m, 1 - isolated_node_mask)
else:
m *= -1
_setdiag_dense(m, w)
return m, w
| 4,081 | 30.643411 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/__init__.py
|
r"""
==============================================================
Compressed Sparse Graph Routines (:mod:`scipy.sparse.csgraph`)
==============================================================
.. currentmodule:: scipy.sparse.csgraph
Fast graph algorithms based on sparse matrix representations.
Contents
========
.. autosummary::
:toctree: generated/
connected_components -- determine connected components of a graph
laplacian -- compute the laplacian of a graph
shortest_path -- compute the shortest path between points on a positive graph
dijkstra -- use Dijkstra's algorithm for shortest path
floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
bellman_ford -- use the Bellman-Ford algorithm for shortest path
johnson -- use Johnson's algorithm for shortest path
breadth_first_order -- compute a breadth-first order of nodes
depth_first_order -- compute a depth-first order of nodes
breadth_first_tree -- construct the breadth-first tree from a given node
depth_first_tree -- construct a depth-first tree from a given node
minimum_spanning_tree -- construct the minimum spanning tree of a graph
reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering
maximum_bipartite_matching -- compute permutation to make diagonal zero free
structural_rank -- compute the structural rank of a graph
NegativeCycleError
.. autosummary::
:toctree: generated/
construct_dist_matrix
csgraph_from_dense
csgraph_from_masked
csgraph_masked_from_dense
csgraph_to_dense
csgraph_to_masked
reconstruct_path
Graph Representations
=====================
This module uses graphs which are stored in a matrix format. A
graph with N nodes can be represented by an (N x N) adjacency matrix G.
If there is a connection from node i to node j, then G[i, j] = w, where
w is the weight of the connection. For nodes i and j which are
not connected, the value depends on the representation:
- for dense array representations, non-edges are represented by
G[i, j] = 0, infinity, or NaN.
- for dense masked representations (of type np.ma.MaskedArray), non-edges
are represented by masked values. This can be useful when graphs with
zero-weight edges are desired.
- for sparse array representations, non-edges are represented by
non-entries in the matrix. This sort of sparse representation also
allows for edges with zero weights.
As a concrete example, imagine that you would like to represent the following
undirected graph::
G
(0)
/ \
1 2
/ \
(2) (1)
This graph has three nodes, where node 0 and 1 are connected by an edge of
weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
We can construct the dense, masked, and sparse representations as follows,
keeping in mind that an undirected graph is represented by a symmetric matrix::
>>> G_dense = np.array([[0, 2, 1],
... [2, 0, 0],
... [1, 0, 0]])
>>> G_masked = np.ma.masked_values(G_dense, 0)
>>> from scipy.sparse import csr_matrix
>>> G_sparse = csr_matrix(G_dense)
This becomes more difficult when zero edges are significant. For example,
consider the situation when we slightly modify the above graph::
G2
(0)
/ \
0 2
/ \
(2) (1)
This is identical to the previous graph, except nodes 0 and 2 are connected
by an edge of zero weight. In this case, the dense representation above
leads to ambiguities: how can non-edges be represented if zero is a meaningful
value? In this case, either a masked or sparse representation must be used
to eliminate the ambiguity::
>>> G2_data = np.array([[np.inf, 2, 0 ],
... [2, np.inf, np.inf],
... [0, np.inf, np.inf]])
>>> G2_masked = np.ma.masked_invalid(G2_data)
>>> from scipy.sparse.csgraph import csgraph_from_dense
>>> # G2_sparse = csr_matrix(G2_data) would give the wrong result
>>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
>>> G2_sparse.data
array([ 2., 0., 2., 0.])
Here we have used a utility routine from the csgraph submodule in order to
convert the dense representation to a sparse representation which can be
understood by the algorithms in submodule. By viewing the data array, we
can see that the zero values are explicitly encoded in the graph.
Directed vs. Undirected
-----------------------
Matrices may represent either directed or undirected graphs. This is
specified throughout the csgraph module by a boolean keyword. Graphs are
assumed to be directed by default. In a directed graph, traversal from node
i to node j can be accomplished over the edge G[i, j], but not the edge
G[j, i]. In a non-directed graph, traversal from node i to node j can be
accomplished over either G[i, j] or G[j, i]. If both edges are not null,
and the two have unequal weights, then the smaller of the two is used.
Note that a symmetric matrix will represent an undirected graph, regardless
of whether the 'directed' keyword is set to True or False. In this case,
using ``directed=True`` generally leads to more efficient computation.
The routines in this module accept as input either scipy.sparse representations
(csr, csc, or lil format), masked representations, or dense representations
with non-edges indicated by zeros, infinities, and NaN entries.
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['connected_components',
'laplacian',
'shortest_path',
'floyd_warshall',
'dijkstra',
'bellman_ford',
'johnson',
'breadth_first_order',
'depth_first_order',
'breadth_first_tree',
'depth_first_tree',
'minimum_spanning_tree',
'reverse_cuthill_mckee',
'maximum_bipartite_matching',
'structural_rank',
'construct_dist_matrix',
'reconstruct_path',
'csgraph_masked_from_dense',
'csgraph_from_dense',
'csgraph_from_masked',
'csgraph_to_dense',
'csgraph_to_masked',
'NegativeCycleError']
from ._laplacian import laplacian
from ._shortest_path import shortest_path, floyd_warshall, dijkstra,\
bellman_ford, johnson, NegativeCycleError
from ._traversal import breadth_first_order, depth_first_order, \
breadth_first_tree, depth_first_tree, connected_components
from ._min_spanning_tree import minimum_spanning_tree
from ._reordering import reverse_cuthill_mckee, maximum_bipartite_matching, \
structural_rank
from ._tools import construct_dist_matrix, reconstruct_path,\
csgraph_from_dense, csgraph_to_dense, csgraph_masked_from_dense,\
csgraph_from_masked, csgraph_to_masked
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 7,086 | 38.592179 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py
|
"""Test the minimum spanning tree function"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_
import numpy.testing as npt
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
def test_minimum_spanning_tree():
# Create a graph with two connected components.
graph = [[0,1,0,0,0],
[1,0,0,0,0],
[0,0,0,8,5],
[0,0,8,0,1],
[0,0,5,1,0]]
graph = np.asarray(graph)
# Create the expected spanning tree.
expected = [[0,1,0,0,0],
[0,0,0,0,0],
[0,0,0,0,5],
[0,0,0,0,1],
[0,0,0,0,0]]
expected = np.asarray(expected)
# Ensure minimum spanning tree code gives this expected output.
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
# Ensure that the original graph was not modified.
npt.assert_array_equal(csgraph.todense(), graph,
'Original graph was modified.')
# Now let the algorithm modify the csgraph in place.
mintree = minimum_spanning_tree(csgraph, overwrite=True)
npt.assert_array_equal(mintree.todense(), expected,
'Graph was not properly modified to contain MST.')
np.random.seed(1234)
for N in (5, 10, 15, 20):
# Create a random graph.
graph = 3 + np.random.random((N, N))
csgraph = csr_matrix(graph)
# The spanning tree has at most N - 1 edges.
mintree = minimum_spanning_tree(csgraph)
assert_(mintree.nnz < N)
# Set the sub diagonal to 1 to create a known spanning tree.
idx = np.arange(N-1)
graph[idx,idx+1] = 1
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
# We expect to see this pattern in the spanning tree and otherwise
# have this zero.
expected = np.zeros((N, N))
expected[idx, idx+1] = 1
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
| 2,181 | 31.088235 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/tests/test_conversions.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense
def test_csgraph_from_dense():
np.random.seed(1234)
G = np.random.random((10, 10))
some_nulls = (G < 0.4)
all_nulls = (G < 0.8)
for null_value in [0, np.nan, np.inf]:
G[all_nulls] = null_value
olderr = np.seterr(invalid="ignore")
try:
G_csr = csgraph_from_dense(G, null_value=0)
finally:
np.seterr(**olderr)
G[all_nulls] = 0
assert_array_almost_equal(G, G_csr.toarray())
for null_value in [np.nan, np.inf]:
G[all_nulls] = 0
G[some_nulls] = null_value
olderr = np.seterr(invalid="ignore")
try:
G_csr = csgraph_from_dense(G, null_value=0)
finally:
np.seterr(**olderr)
G[all_nulls] = 0
assert_array_almost_equal(G, G_csr.toarray())
def test_csgraph_to_dense():
np.random.seed(1234)
G = np.random.random((10, 10))
nulls = (G < 0.8)
G[nulls] = np.inf
G_csr = csgraph_from_dense(G)
for null_value in [0, 10, -np.inf, np.inf]:
G[nulls] = null_value
assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
def test_multiple_edges():
# create a random sqare matrix with an even number of elements
np.random.seed(1234)
X = np.random.random((10, 10))
Xcsr = csr_matrix(X)
# now double-up every other column
Xcsr.indices[::2] = Xcsr.indices[1::2]
# normal sparse toarray() will sum the duplicated edges
Xdense = Xcsr.toarray()
assert_array_almost_equal(Xdense[:, 1::2],
X[:, ::2] + X[:, 1::2])
# csgraph_to_dense chooses the minimum of each duplicated edge
Xdense = csgraph_to_dense(Xcsr)
assert_array_almost_equal(Xdense[:, 1::2],
np.minimum(X[:, ::2], X[:, 1::2]))
| 2,047 | 28.257143 | 73 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py
|
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
from pytest import raises as assert_raises
from scipy import sparse
from scipy.sparse import csgraph
def _explicit_laplacian(x, normed=False):
if sparse.issparse(x):
x = x.todense()
x = np.asarray(x)
y = -1.0 * x
for j in range(y.shape[0]):
y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
if normed:
d = np.diag(y).copy()
d[d == 0] = 1.0
y /= d[:,None]**.5
y /= d[None,:]**.5
return y
def _check_symmetric_graph_laplacian(mat, normed):
if not hasattr(mat, 'shape'):
mat = eval(mat, dict(np=np, sparse=sparse))
if sparse.issparse(mat):
sp_mat = mat
mat = sp_mat.todense()
else:
sp_mat = sparse.csr_matrix(mat)
laplacian = csgraph.laplacian(mat, normed=normed)
n_nodes = mat.shape[0]
if not normed:
assert_array_almost_equal(laplacian.sum(axis=0), np.zeros(n_nodes))
assert_array_almost_equal(laplacian.T, laplacian)
assert_array_almost_equal(laplacian,
csgraph.laplacian(sp_mat, normed=normed).todense())
assert_array_almost_equal(laplacian,
_explicit_laplacian(mat, normed=normed))
def test_laplacian_value_error():
for t in int, float, complex:
for m in ([1, 1],
[[[1]]],
[[1, 2, 3], [4, 5, 6]],
[[1, 2], [3, 4], [5, 5]]):
A = np.array(m, dtype=t)
assert_raises(ValueError, csgraph.laplacian, A)
def test_symmetric_graph_laplacian():
symmetric_mats = ('np.arange(10) * np.arange(10)[:, np.newaxis]',
'np.ones((7, 7))',
'np.eye(19)',
'sparse.diags([1, 1], [-1, 1], shape=(4,4))',
'sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense()',
'np.asarray(sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense())',
'np.vander(np.arange(4)) + np.vander(np.arange(4)).T')
for mat_str in symmetric_mats:
for normed in True, False:
_check_symmetric_graph_laplacian(mat_str, normed)
def _assert_allclose_sparse(a, b, **kwargs):
# helper function that can deal with sparse matrices
if sparse.issparse(a):
a = a.toarray()
if sparse.issparse(b):
b = a.toarray()
assert_allclose(a, b, **kwargs)
def _check_laplacian(A, desired_L, desired_d, normed, use_out_degree):
for arr_type in np.array, sparse.csr_matrix, sparse.coo_matrix:
for t in int, float, complex:
adj = arr_type(A, dtype=t)
L = csgraph.laplacian(adj, normed=normed, return_diag=False,
use_out_degree=use_out_degree)
_assert_allclose_sparse(L, desired_L, atol=1e-12)
L, d = csgraph.laplacian(adj, normed=normed, return_diag=True,
use_out_degree=use_out_degree)
_assert_allclose_sparse(L, desired_L, atol=1e-12)
_assert_allclose_sparse(d, desired_d, atol=1e-12)
def test_asymmetric_laplacian():
# adjacency matrix
A = [[0, 1, 0],
[4, 2, 0],
[0, 0, 0]]
# Laplacian matrix using out-degree
L = [[1, -1, 0],
[-4, 4, 0],
[0, 0, 0]]
d = [1, 4, 0]
_check_laplacian(A, L, d, normed=False, use_out_degree=True)
# normalized Laplacian matrix using out-degree
L = [[1, -0.5, 0],
[-2, 1, 0],
[0, 0, 0]]
d = [1, 2, 1]
_check_laplacian(A, L, d, normed=True, use_out_degree=True)
# Laplacian matrix using in-degree
L = [[4, -1, 0],
[-4, 1, 0],
[0, 0, 0]]
d = [4, 1, 0]
_check_laplacian(A, L, d, normed=False, use_out_degree=False)
# normalized Laplacian matrix using in-degree
L = [[1, -0.5, 0],
[-2, 1, 0],
[0, 0, 0]]
d = [2, 1, 1]
_check_laplacian(A, L, d, normed=True, use_out_degree=False)
def test_sparse_formats():
for fmt in ('csr', 'csc', 'coo', 'lil', 'dok', 'dia', 'bsr'):
mat = sparse.diags([1, 1], [-1, 1], shape=(4,4), format=fmt)
for normed in True, False:
_check_symmetric_graph_laplacian(mat, normed)
| 4,388 | 31.036496 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_array_almost_equal
from scipy.sparse import csgraph
def test_weak_connections():
Xde = np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
for X in Xsp, Xde:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='weak')
assert_equal(n_components, 2)
assert_array_almost_equal(labels, [0, 0, 1])
def test_strong_connections():
X1de = np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
X2de = X1de + X1de.T
X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
for X in X1sp, X1de:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_equal(n_components, 3)
labels.sort()
assert_array_almost_equal(labels, [0, 1, 2])
for X in X2sp, X2de:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_equal(n_components, 2)
labels.sort()
assert_array_almost_equal(labels, [0, 0, 1])
def test_strong_connections2():
X = np.array([[0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]])
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_equal(n_components, 5)
labels.sort()
assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4])
def test_weak_connections2():
X = np.array([[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]])
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='weak')
assert_equal(n_components, 2)
labels.sort()
assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1])
def test_ticket1876():
# Regression test: this failed in the original implementation
# There should be two strongly-connected components; previously gave one
g = np.array([[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 1, 0]])
n_components, labels = csgraph.connected_components(g, connection='strong')
assert_equal(n_components, 2)
assert_equal(labels[0], labels[1])
assert_equal(labels[2], labels[3])
def test_fully_connected_graph():
# Fully connected dense matrices raised an exception.
# https://github.com/scipy/scipy/issues/3818
g = np.ones((4, 4))
n_components, labels = csgraph.connected_components(g)
assert_equal(n_components, 1)
| 3,265 | 31.019608 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/tests/test_reordering.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal
from scipy.sparse.csgraph import (reverse_cuthill_mckee,
maximum_bipartite_matching, structural_rank)
from scipy.sparse import diags, csc_matrix, csr_matrix, coo_matrix
def test_graph_reverse_cuthill_mckee():
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
graph = csr_matrix(A)
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
assert_equal(perm, correct_perm)
# Test int64 indices input
graph.indices = graph.indices.astype('int64')
graph.indptr = graph.indptr.astype('int64')
perm = reverse_cuthill_mckee(graph, True)
assert_equal(perm, correct_perm)
def test_graph_reverse_cuthill_mckee_ordering():
data = np.ones(63,dtype=int)
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
14, 15, 15, 15, 15, 15])
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
5, 7, 10, 13, 15])
graph = coo_matrix((data, (rows,cols))).tocsr()
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
0, 13, 7, 5, 9, 11, 1, 3])
assert_equal(perm, correct_perm)
def test_graph_maximum_bipartite_matching():
A = diags(np.ones(25), offsets=0, format='csc')
rand_perm = np.random.permutation(25)
rand_perm2 = np.random.permutation(25)
Rrow = np.arange(25)
Rcol = rand_perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
Crow = rand_perm2
Ccol = np.arange(25)
Cdata = np.ones(25,dtype=int)
Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc()
# Randomly permute identity matrix
B = Rmat*A*Cmat
# Row permute
perm = maximum_bipartite_matching(B,perm_type='row')
Rrow = np.arange(25)
Rcol = perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
C1 = Rmat*B
# Column permute
perm2 = maximum_bipartite_matching(B,perm_type='column')
Crow = perm2
Ccol = np.arange(25)
Cdata = np.ones(25,dtype=int)
Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc()
C2 = B*Cmat
# Should get identity matrix back
assert_equal(any(C1.diagonal() == 0), False)
assert_equal(any(C2.diagonal() == 0), False)
# Test int64 indices input
B.indices = B.indices.astype('int64')
B.indptr = B.indptr.astype('int64')
perm = maximum_bipartite_matching(B,perm_type='row')
Rrow = np.arange(25)
Rcol = perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
C3 = Rmat*B
assert_equal(any(C3.diagonal() == 0), False)
def test_graph_structural_rank():
# Test square matrix #1
A = csc_matrix([[1, 1, 0],
[1, 0, 1],
[0, 1, 0]])
assert_equal(structural_rank(A), 3)
# Test square matrix #2
rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7])
cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4])
data = np.ones_like(rows)
B = coo_matrix((data,(rows,cols)), shape=(8,8))
assert_equal(structural_rank(B), 6)
#Test non-square matrix
C = csc_matrix([[1, 0, 2, 0],
[2, 0, 4, 0]])
assert_equal(structural_rank(C), 2)
#Test tall matrix
assert_equal(structural_rank(C.T), 2)
| 4,190 | 33.352459 | 68 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/tests/test_traversal.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.sparse.csgraph import (breadth_first_tree, depth_first_tree,
csgraph_to_dense, csgraph_from_dense)
def test_graph_breadth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
def test_graph_breadth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
| 2,391 | 32.690141 | 71 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pytest import raises as assert_raises
from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson,
bellman_ford, construct_dist_matrix, NegativeCycleError)
directed_G = np.array([[0, 3, 3, 0, 0],
[0, 0, 0, 2, 4],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[2, 0, 0, 2, 0]], dtype=float)
undirected_G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
unweighted_G = (directed_G > 0).astype(float)
directed_SP = [[0, 3, 3, 5, 7],
[3, 0, 6, 2, 4],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 4, 4, 0, 8],
[2, 5, 5, 2, 0]]
directed_pred = np.array([[-9999, 0, 0, 1, 1],
[3, -9999, 0, 1, 1],
[-9999, -9999, -9999, -9999, -9999],
[3, 0, 0, -9999, 1],
[4, 0, 0, 4, -9999]], dtype=float)
undirected_SP = np.array([[0, 3, 3, 1, 2],
[3, 0, 6, 2, 4],
[3, 6, 0, 4, 5],
[1, 2, 4, 0, 2],
[2, 4, 5, 2, 0]], dtype=float)
undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2],
[np.inf, 0, np.inf, 2, np.inf],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 2, np.inf, 0, 2],
[2, np.inf, np.inf, 2, 0]], dtype=float)
undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5)
undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf
undirected_pred = np.array([[-9999, 0, 0, 0, 0],
[1, -9999, 0, 1, 1],
[2, 0, -9999, 0, 0],
[3, 3, 0, -9999, 3],
[4, 4, 0, 4, -9999]], dtype=float)
methods = ['auto', 'FW', 'D', 'BF', 'J']
def test_dijkstra_limit():
limits = [0, 2, np.inf]
results = [undirected_SP_limit_0,
undirected_SP_limit_2,
undirected_SP]
def check(limit, result):
SP = dijkstra(undirected_G, directed=False, limit=limit)
assert_array_almost_equal(SP, result)
for limit, result in zip(limits, results):
check(limit, result)
def test_directed():
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_undirected():
def check(method, directed_in):
if directed_in:
SP1 = shortest_path(directed_G, method=method, directed=False,
overwrite=False)
assert_array_almost_equal(SP1, undirected_SP)
else:
SP2 = shortest_path(undirected_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP2, undirected_SP)
for method in methods:
for directed_in in (True, False):
check(method, directed_in)
def test_shortest_path_indices():
indices = np.arange(4)
def check(func, indshape):
outshape = indshape + (5,)
SP = func(directed_G, directed=False,
indices=indices.reshape(indshape))
assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape))
for indshape in [(4,), (4, 1), (2, 2)]:
for func in (dijkstra, bellman_ford, johnson, shortest_path):
check(func, indshape)
assert_raises(ValueError, shortest_path, directed_G, method='FW',
indices=indices)
def test_predecessors():
SP_res = {True: directed_SP,
False: undirected_SP}
pred_res = {True: directed_pred,
False: undirected_pred}
def check(method, directed):
SP, pred = shortest_path(directed_G, method, directed=directed,
overwrite=False,
return_predecessors=True)
assert_array_almost_equal(SP, SP_res[directed])
assert_array_almost_equal(pred, pred_res[directed])
for method in methods:
for directed in (True, False):
check(method, directed)
def test_construct_shortest_path():
def check(method, directed):
SP1, pred = shortest_path(directed_G,
directed=directed,
overwrite=False,
return_predecessors=True)
SP2 = construct_dist_matrix(directed_G, pred, directed=directed)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_unweighted_path():
def check(method, directed):
SP1 = shortest_path(directed_G,
directed=directed,
overwrite=False,
unweighted=True)
SP2 = shortest_path(unweighted_G,
directed=directed,
overwrite=False,
unweighted=False)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_negative_cycles():
# create a small graph with a negative cycle
graph = np.ones([5, 5])
graph.flat[::6] = 0
graph[1, 2] = -2
def check(method, directed):
assert_raises(NegativeCycleError, shortest_path, graph, method,
directed)
for method in ['FW', 'J', 'BF']:
for directed in (True, False):
check(method, directed)
def test_masked_input():
G = np.ma.masked_equal(directed_G, 0)
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_overwrite():
G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
foo = G.copy()
shortest_path(foo, overwrite=False)
assert_array_equal(foo, G)
| 6,718 | 32.098522 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/csgraph/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_spfuncs.py
|
from __future__ import division, print_function, absolute_import
from numpy import array, kron, matrix, diag
from numpy.testing import assert_, assert_equal
from scipy.sparse import spfuncs
from scipy.sparse import csr_matrix, csc_matrix, bsr_matrix
from scipy.sparse._sparsetools import (csr_scale_rows, csr_scale_columns,
bsr_scale_rows, bsr_scale_columns)
class TestSparseFunctions(object):
def test_scale_rows_and_cols(self):
D = matrix([[1,0,0,2,3],
[0,4,0,5,0],
[0,0,6,7,0]])
#TODO expose through function
S = csr_matrix(D)
v = array([1,2,3])
csr_scale_rows(3,5,S.indptr,S.indices,S.data,v)
assert_equal(S.todense(), diag(v)*D)
S = csr_matrix(D)
v = array([1,2,3,4,5])
csr_scale_columns(3,5,S.indptr,S.indices,S.data,v)
assert_equal(S.todense(), D*diag(v))
# blocks
E = kron(D,[[1,2],[3,4]])
S = bsr_matrix(E,blocksize=(2,2))
v = array([1,2,3,4,5,6])
bsr_scale_rows(3,5,2,2,S.indptr,S.indices,S.data,v)
assert_equal(S.todense(), diag(v)*E)
S = bsr_matrix(E,blocksize=(2,2))
v = array([1,2,3,4,5,6,7,8,9,10])
bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v)
assert_equal(S.todense(), E*diag(v))
E = kron(D,[[1,2,3],[4,5,6]])
S = bsr_matrix(E,blocksize=(2,3))
v = array([1,2,3,4,5,6])
bsr_scale_rows(3,5,2,3,S.indptr,S.indices,S.data,v)
assert_equal(S.todense(), diag(v)*E)
S = bsr_matrix(E,blocksize=(2,3))
v = array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
bsr_scale_columns(3,5,2,3,S.indptr,S.indices,S.data,v)
assert_equal(S.todense(), E*diag(v))
def test_estimate_blocksize(self):
mats = []
mats.append([[0,1],[1,0]])
mats.append([[1,1,0],[0,0,1],[1,0,1]])
mats.append([[0],[0],[1]])
mats = [array(x) for x in mats]
blks = []
blks.append([[1]])
blks.append([[1,1],[1,1]])
blks.append([[1,1],[0,1]])
blks.append([[1,1,0],[1,0,1],[1,1,1]])
blks = [array(x) for x in blks]
for A in mats:
for B in blks:
X = kron(A,B)
r,c = spfuncs.estimate_blocksize(X)
assert_(r >= B.shape[0])
assert_(c >= B.shape[1])
def test_count_blocks(self):
def gold(A,bs):
R,C = bs
I,J = A.nonzero()
return len(set(zip(I//R,J//C)))
mats = []
mats.append([[0]])
mats.append([[1]])
mats.append([[1,0]])
mats.append([[1,1]])
mats.append([[0,1],[1,0]])
mats.append([[1,1,0],[0,0,1],[1,0,1]])
mats.append([[0],[0],[1]])
for A in mats:
for B in mats:
X = kron(A,B)
Y = csr_matrix(X)
for R in range(1,6):
for C in range(1,6):
assert_equal(spfuncs.count_blocks(Y, (R, C)), gold(X, (R, C)))
X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]])
Y = csc_matrix(X)
assert_equal(spfuncs.count_blocks(X, (1, 2)), gold(X, (1, 2)))
assert_equal(spfuncs.count_blocks(Y, (1, 2)), gold(X, (1, 2)))
| 3,319 | 32.2 | 86 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_sputils.py
|
"""unit tests for sparse utility functions"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_raises
from pytest import raises as assert_raises
from scipy.sparse import sputils
from scipy._lib._numpy_compat import suppress_warnings
class TestSparseUtils(object):
def test_upcast(self):
assert_equal(sputils.upcast('intc'), np.intc)
assert_equal(sputils.upcast('int32', 'float32'), np.float64)
assert_equal(sputils.upcast('bool', complex, float), np.complex128)
assert_equal(sputils.upcast('i', 'd'), np.float64)
def test_getdtype(self):
A = np.array([1], dtype='int8')
assert_equal(sputils.getdtype(None, default=float), float)
assert_equal(sputils.getdtype(None, a=A), np.int8)
def test_isscalarlike(self):
assert_equal(sputils.isscalarlike(3.0), True)
assert_equal(sputils.isscalarlike(-4), True)
assert_equal(sputils.isscalarlike(2.5), True)
assert_equal(sputils.isscalarlike(1 + 3j), True)
assert_equal(sputils.isscalarlike(np.array(3)), True)
assert_equal(sputils.isscalarlike("16"), True)
assert_equal(sputils.isscalarlike(np.array([3])), False)
assert_equal(sputils.isscalarlike([[3]]), False)
assert_equal(sputils.isscalarlike((1,)), False)
assert_equal(sputils.isscalarlike((1, 2)), False)
def test_isintlike(self):
assert_equal(sputils.isintlike(-4), True)
assert_equal(sputils.isintlike(np.array(3)), True)
assert_equal(sputils.isintlike(np.array([3])), False)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Inexact indices into sparse matrices are deprecated")
assert_equal(sputils.isintlike(3.0), True)
assert_equal(sputils.isintlike(2.5), False)
assert_equal(sputils.isintlike(1 + 3j), False)
assert_equal(sputils.isintlike((1,)), False)
assert_equal(sputils.isintlike((1, 2)), False)
def test_isshape(self):
assert_equal(sputils.isshape((1, 2)), True)
assert_equal(sputils.isshape((5, 2)), True)
assert_equal(sputils.isshape((1.5, 2)), False)
assert_equal(sputils.isshape((2, 2, 2)), False)
assert_equal(sputils.isshape(([2], 2)), False)
assert_equal(sputils.isshape((-1, 2), nonneg=False),True)
assert_equal(sputils.isshape((2, -1), nonneg=False),True)
assert_equal(sputils.isshape((-1, 2), nonneg=True),False)
assert_equal(sputils.isshape((2, -1), nonneg=True),False)
def test_issequence(self):
assert_equal(sputils.issequence((1,)), True)
assert_equal(sputils.issequence((1, 2, 3)), True)
assert_equal(sputils.issequence([1]), True)
assert_equal(sputils.issequence([1, 2, 3]), True)
assert_equal(sputils.issequence(np.array([1, 2, 3])), True)
assert_equal(sputils.issequence(np.array([[1], [2], [3]])), False)
assert_equal(sputils.issequence(3), False)
def test_ismatrix(self):
assert_equal(sputils.ismatrix(((),)), True)
assert_equal(sputils.ismatrix([[1], [2]]), True)
assert_equal(sputils.ismatrix(np.arange(3)[None]), True)
assert_equal(sputils.ismatrix([1, 2]), False)
assert_equal(sputils.ismatrix(np.arange(3)), False)
assert_equal(sputils.ismatrix([[[1]]]), False)
assert_equal(sputils.ismatrix(3), False)
def test_isdense(self):
assert_equal(sputils.isdense(np.array([1])), True)
assert_equal(sputils.isdense(np.matrix([1])), True)
def test_validateaxis(self):
assert_raises(TypeError, sputils.validateaxis, (0, 1))
assert_raises(TypeError, sputils.validateaxis, 1.5)
assert_raises(ValueError, sputils.validateaxis, 3)
# These function calls should not raise errors
for axis in (-2, -1, 0, 1, None):
sputils.validateaxis(axis)
def test_get_index_dtype(self):
imax = np.iinfo(np.int32).max
too_big = imax + 1
# Check that uint32's with no values too large doesn't return
# int64
a1 = np.ones(90, dtype='uint32')
a2 = np.ones(90, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int32')
)
# Check that if we can not convert but all values are less than or
# equal to max that we can just convert to int32
a1[-1] = imax
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int32')
)
# Check that if it can not convert directly and the contents are
# too large that we return int64
a1[-1] = too_big
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int64')
)
# test that if can not convert and didn't specify to check_contents
# we return int64
a1 = np.ones(89, dtype='uint32')
a2 = np.ones(89, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2))),
np.dtype('int64')
)
# Check that even if we have arrays that can be converted directly
# that if we specify a maxval directly it takes precedence
a1 = np.ones(12, dtype='uint32')
a2 = np.ones(12, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype(
(a1, a2), maxval=too_big, check_contents=True
)),
np.dtype('int64')
)
# Check that an array with a too max size and maxval set
# still returns int64
a1[-1] = too_big
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), maxval=too_big)),
np.dtype('int64')
)
| 5,967 | 37.753247 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_extract.py
|
"""test sparse matrix construction functions"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_equal
from scipy.sparse import csr_matrix
import numpy as np
from scipy.sparse import extract
class TestExtract(object):
def setup_method(self):
self.cases = [
csr_matrix([[1,2]]),
csr_matrix([[1,0]]),
csr_matrix([[0,0]]),
csr_matrix([[1],[2]]),
csr_matrix([[1],[0]]),
csr_matrix([[0],[0]]),
csr_matrix([[1,2],[3,4]]),
csr_matrix([[0,1],[0,0]]),
csr_matrix([[0,0],[1,0]]),
csr_matrix([[0,0],[0,0]]),
csr_matrix([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]),
csr_matrix([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]).T,
]
def find(self):
for A in self.cases:
I,J,V = extract.find(A)
assert_equal(A.toarray(), csr_matrix(((I,J),V), shape=A.shape))
def test_tril(self):
for A in self.cases:
B = A.toarray()
for k in [-3,-2,-1,0,1,2,3]:
assert_equal(extract.tril(A,k=k).toarray(), np.tril(B,k=k))
def test_triu(self):
for A in self.cases:
B = A.toarray()
for k in [-3,-2,-1,0,1,2,3]:
assert_equal(extract.triu(A,k=k).toarray(), np.triu(B,k=k))
| 1,383 | 29.755556 | 75 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_csr.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_
from scipy.sparse import csr_matrix
def _check_csr_rowslice(i, sl, X, Xcsr):
np_slice = X[i, sl]
csr_slice = Xcsr[i, sl]
assert_array_almost_equal(np_slice, csr_slice.toarray()[0])
assert_(type(csr_slice) is csr_matrix)
def test_csr_rowslice():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
slices = [slice(None, None, None),
slice(None, None, -1),
slice(1, -2, 2),
slice(-2, 1, -2)]
for i in range(N):
for sl in slices:
_check_csr_rowslice(i, sl, X, Xcsr)
def test_csr_getrow():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
for i in range(N):
arr_row = X[i:i + 1, :]
csr_row = Xcsr.getrow(i)
assert_array_almost_equal(arr_row, csr_row.toarray())
assert_(type(csr_row) is csr_matrix)
def test_csr_getcol():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
for i in range(N):
arr_col = X[:, i:i + 1]
csr_col = Xcsr.getcol(i)
assert_array_almost_equal(arr_col, csr_col.toarray())
assert_(type(csr_col) is csr_matrix)
| 1,424 | 22.360656 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_construct.py
|
"""test sparse matrix construction functions"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, matrix
from numpy.testing import (assert_equal, assert_,
assert_array_equal, assert_array_almost_equal_nulp)
import pytest
from pytest import raises as assert_raises
from scipy._lib._testutils import check_free_memory
from scipy.sparse import csr_matrix, coo_matrix
from scipy.sparse import construct
from scipy.sparse.construct import rand as sprand
sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok']
#TODO check whether format=XXX is respected
def _sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None):
# Helper function for testing.
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
data_rvs = random_state.randn
return construct.random(m, n, density, format, dtype,
random_state, data_rvs)
class TestConstructUtils(object):
def test_spdiags(self):
diags1 = array([[1, 2, 3, 4, 5]])
diags2 = array([[1, 2, 3, 4, 5],
[6, 7, 8, 9,10]])
diags3 = array([[1, 2, 3, 4, 5],
[6, 7, 8, 9,10],
[11,12,13,14,15]])
cases = []
cases.append((diags1, 0, 1, 1, [[1]]))
cases.append((diags1, [0], 1, 1, [[1]]))
cases.append((diags1, [0], 2, 1, [[1],[0]]))
cases.append((diags1, [0], 1, 2, [[1,0]]))
cases.append((diags1, [1], 1, 2, [[0,2]]))
cases.append((diags1,[-1], 1, 2, [[0,0]]))
cases.append((diags1, [0], 2, 2, [[1,0],[0,2]]))
cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]]))
cases.append((diags1, [3], 2, 2, [[0,0],[0,0]]))
cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]]))
cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]]))
cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]]))
cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0],
[0,0,0,4,0,0],
[0,0,0,0,5,0],
[6,0,0,0,0,0],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]))
cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0],
[1, 7,13, 0, 0, 0],
[0, 2, 8,14, 0, 0],
[0, 0, 3, 9,15, 0],
[0, 0, 0, 4,10, 0],
[0, 0, 0, 0, 5, 0]]))
cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0],
[11, 0, 0, 9, 0],
[0,12, 0, 0,10],
[0, 0,13, 0, 0],
[1, 0, 0,14, 0],
[0, 2, 0, 0,15]]))
for d,o,m,n,result in cases:
assert_equal(construct.spdiags(d,o,m,n).todense(), result)
def test_diags(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append((a[:1], 0, (1, 1), [[1]]))
cases.append(([a[:1]], [0], (1, 1), [[1]]))
cases.append(([a[:1]], [0], (2, 1), [[1],[0]]))
cases.append(([a[:1]], [0], (1, 2), [[1,0]]))
cases.append(([a[:1]], [1], (1, 2), [[0,1]]))
cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]]))
cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]]))
cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]]))
cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]]))
cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]]))
cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]]))
cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]]))
cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]]))
cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]]))
cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]]))
cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]]))
cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]]))
cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]]))
cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]]))
cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]]))
cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]]))
cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]))
cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]]))
cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0],
[0,0,0,2,0,0],
[0,0,0,0,3,0],
[6,0,0,0,0,4],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]))
cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0],
[1, 7,12, 0, 0],
[0, 2, 8,13, 0],
[0, 0, 3, 9,14],
[0, 0, 0, 4,10]]))
cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0],
[11, 0, 0, 7, 0],
[0,12, 0, 0, 8],
[0, 0,13, 0, 0],
[1, 0, 0,14, 0],
[0, 2, 0, 0,15]]))
# too long arrays are OK
cases.append(([a], [0], (1, 1), [[1]]))
cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]]))
cases.append((np.array([[1, 2, 3], [4, 5, 6]]), [0,-1], (3, 3), [[1, 0, 0], [4, 2, 0], [0, 5, 3]]))
# scalar case: broadcasting
cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0],
[1, -2, 1],
[0, 1, -2]]))
for d, o, shape, result in cases:
try:
assert_equal(construct.diags(d, o, shape=shape).todense(),
result)
if shape[0] == shape[1] and hasattr(d[0], '__len__') and len(d[0]) <= max(shape):
# should be able to find the shape automatically
assert_equal(construct.diags(d, o).todense(), result)
except:
print("%r %r %r %r" % (d, o, shape, result))
raise
def test_diags_default(self):
a = array([1, 2, 3, 4, 5])
assert_equal(construct.diags(a).todense(), np.diag(a))
def test_diags_default_bad(self):
a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])
assert_raises(ValueError, construct.diags, a)
def test_diags_bad(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append(([a[:0]], 0, (1, 1)))
cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5)))
cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5)))
cases.append(([a[:2],c,b[:3]], [-4,2,-1], None))
cases.append(([], [-4,2,-1], None))
cases.append(([1], [-5], (4, 4)))
cases.append(([a], 0, None))
for d, o, shape in cases:
try:
assert_raises(ValueError, construct.diags, d, o, shape)
except:
print("%r %r %r" % (d, o, shape))
raise
assert_raises(TypeError, construct.diags, [[None]], [0])
def test_diags_vs_diag(self):
# Check that
#
# diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ...
#
np.random.seed(1234)
for n_diags in [1, 2, 3, 4, 5, 10]:
n = 1 + n_diags//2 + np.random.randint(0, 10)
offsets = np.arange(-n+1, n-1)
np.random.shuffle(offsets)
offsets = offsets[:n_diags]
diagonals = [np.random.rand(n - abs(q)) for q in offsets]
mat = construct.diags(diagonals, offsets)
dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)])
assert_array_almost_equal_nulp(mat.todense(), dense_mat)
if len(offsets) == 1:
mat = construct.diags(diagonals[0], offsets[0])
dense_mat = np.diag(diagonals[0], offsets[0])
assert_array_almost_equal_nulp(mat.todense(), dense_mat)
def test_diags_dtype(self):
x = construct.diags([2.2], [0], shape=(2, 2), dtype=int)
assert_equal(x.dtype, int)
assert_equal(x.todense(), [[2, 0], [0, 2]])
def test_diags_one_diagonal(self):
d = list(range(5))
for k in range(-5, 6):
assert_equal(construct.diags(d, k).toarray(),
construct.diags([d], [k]).toarray())
def test_diags_empty(self):
x = construct.diags([])
assert_equal(x.shape, (0, 0))
def test_identity(self):
assert_equal(construct.identity(1).toarray(), [[1]])
assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]])
I = construct.identity(3, dtype='int8', format='dia')
assert_equal(I.dtype, np.dtype('int8'))
assert_equal(I.format, 'dia')
for fmt in sparse_formats:
I = construct.identity(3, format=fmt)
assert_equal(I.format, fmt)
assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_eye(self):
assert_equal(construct.eye(1,1).toarray(), [[1]])
assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]])
assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]])
assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]])
assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16'))
for m in [3, 5]:
for n in [3, 5]:
for k in range(-5,6):
assert_equal(construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k))
if m == n:
assert_equal(construct.eye(m, k=k).toarray(), np.eye(m, n, k=k))
def test_eye_one(self):
assert_equal(construct.eye(1).toarray(), [[1]])
assert_equal(construct.eye(2).toarray(), [[1,0],[0,1]])
I = construct.eye(3, dtype='int8', format='dia')
assert_equal(I.dtype, np.dtype('int8'))
assert_equal(I.format, 'dia')
for fmt in sparse_formats:
I = construct.eye(3, format=fmt)
assert_equal(I.format, fmt)
assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_kron(self):
cases = []
cases.append(array([[0]]))
cases.append(array([[-1]]))
cases.append(array([[4]]))
cases.append(array([[10]]))
cases.append(array([[0],[0]]))
cases.append(array([[0,0]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14]]))
cases.append(array([[5,4],[0,0],[6,0]]))
cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))
cases.append(array([[0,1,0,2,0,5,8]]))
cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))
for a in cases:
for b in cases:
result = construct.kron(csr_matrix(a),csr_matrix(b)).todense()
expected = np.kron(a,b)
assert_array_equal(result,expected)
def test_kronsum(self):
cases = []
cases.append(array([[0]]))
cases.append(array([[-1]]))
cases.append(array([[4]]))
cases.append(array([[10]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14],[0,3,0]]))
cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]]))
for a in cases:
for b in cases:
result = construct.kronsum(csr_matrix(a),csr_matrix(b)).todense()
expected = np.kron(np.eye(len(b)), a) + \
np.kron(b, np.eye(len(a)))
assert_array_equal(result,expected)
def test_vstack(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5,6]])
expected = matrix([[1, 2],
[3, 4],
[5, 6]])
assert_equal(construct.vstack([A,B]).todense(), expected)
assert_equal(construct.vstack([A,B], dtype=np.float32).dtype, np.float32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()]).todense(),
expected)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()], dtype=np.float32).dtype,
np.float32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()],
dtype=np.float32).indices.dtype, np.int32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()],
dtype=np.float32).indptr.dtype, np.int32)
def test_hstack(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
expected = matrix([[1, 2, 5],
[3, 4, 6]])
assert_equal(construct.hstack([A,B]).todense(), expected)
assert_equal(construct.hstack([A,B], dtype=np.float32).dtype, np.float32)
assert_equal(construct.hstack([A.tocsc(),B.tocsc()]).todense(),
expected)
assert_equal(construct.hstack([A.tocsc(),B.tocsc()], dtype=np.float32).dtype,
np.float32)
def test_bmat(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
C = coo_matrix([[7]])
D = coo_matrix((0,0))
expected = matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
assert_equal(construct.bmat([[A,B],[None,C]]).todense(), expected)
expected = matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
assert_equal(construct.bmat([[A,None],[None,C]]).todense(), expected)
expected = matrix([[0, 5],
[0, 6],
[7, 0]])
assert_equal(construct.bmat([[None,B],[C,None]]).todense(), expected)
expected = matrix(np.empty((0,0)))
assert_equal(construct.bmat([[None,None]]).todense(), expected)
assert_equal(construct.bmat([[None,D],[D,None]]).todense(), expected)
# test bug reported in gh-5976
expected = matrix([[7]])
assert_equal(construct.bmat([[None,D],[C,None]]).todense(), expected)
# test failure cases
with assert_raises(ValueError) as excinfo:
construct.bmat([[A], [B]])
excinfo.match(r'Got blocks\[1,0\]\.shape\[1\] == 1, expected 2')
with assert_raises(ValueError) as excinfo:
construct.bmat([[A, C]])
excinfo.match(r'Got blocks\[0,1\]\.shape\[0\] == 1, expected 2')
@pytest.mark.slow
def test_concatenate_int32_overflow(self):
""" test for indptr overflow when concatenating matrices """
check_free_memory(30000)
n = 33000
A = csr_matrix(np.ones((n, n), dtype=bool))
B = A.copy()
C = construct._compressed_sparse_stack((A,B), 0)
assert_(np.all(np.equal(np.diff(C.indptr), n)))
assert_equal(C.indices.dtype, np.int64)
assert_equal(C.indptr.dtype, np.int64)
def test_block_diag_basic(self):
""" basic test for block_diag """
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
C = coo_matrix([[7]])
expected = matrix([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
assert_equal(construct.block_diag((A, B, C)).todense(), expected)
def test_block_diag_scalar_1d_args(self):
""" block_diag with scalar and 1d arguments """
# one 1d matrix and a scalar
assert_array_equal(construct.block_diag([[2,3], 4]).toarray(),
[[2, 3, 0], [0, 0, 4]])
def test_block_diag_1(self):
""" block_diag with one matrix """
assert_equal(construct.block_diag([[1, 0]]).todense(),
matrix([[1, 0]]))
assert_equal(construct.block_diag([[[1, 0]]]).todense(),
matrix([[1, 0]]))
assert_equal(construct.block_diag([[[1], [0]]]).todense(),
matrix([[1], [0]]))
# just on scalar
assert_equal(construct.block_diag([1]).todense(),
matrix([[1]]))
def test_random_sampling(self):
# Simple sanity checks for sparse random sampling.
for f in sprand, _sprandn:
for t in [np.float32, np.float64, np.longdouble]:
x = f(5, 10, density=0.1, dtype=t)
assert_equal(x.dtype, t)
assert_equal(x.shape, (5, 10))
assert_equal(x.nonzero()[0].size, 5)
x1 = f(5, 10, density=0.1, random_state=4321)
assert_equal(x1.dtype, np.double)
x2 = f(5, 10, density=0.1, random_state=np.random.RandomState(4321))
assert_array_equal(x1.data, x2.data)
assert_array_equal(x1.row, x2.row)
assert_array_equal(x1.col, x2.col)
for density in [0.0, 0.1, 0.5, 1.0]:
x = f(5, 10, density=density)
assert_equal(x.nnz, int(density * np.prod(x.shape)))
for fmt in ['coo', 'csc', 'csr', 'lil']:
x = f(5, 10, format=fmt)
assert_equal(x.format, fmt)
assert_raises(ValueError, lambda: f(5, 10, 1.1))
assert_raises(ValueError, lambda: f(5, 10, -0.1))
def test_rand(self):
# Simple distributional checks for sparse.rand.
for random_state in None, 4321, np.random.RandomState():
x = sprand(10, 20, density=0.5, dtype=np.float64,
random_state=random_state)
assert_(np.all(np.less_equal(0, x.data)))
assert_(np.all(np.less_equal(x.data, 1)))
def test_randn(self):
# Simple distributional checks for sparse.randn.
# Statistically, some of these should be negative
# and some should be greater than 1.
for random_state in None, 4321, np.random.RandomState():
x = _sprandn(10, 20, density=0.5, dtype=np.float64,
random_state=random_state)
assert_(np.any(np.less(x.data, 0)))
assert_(np.any(np.less(1, x.data)))
def test_random_accept_str_dtype(self):
# anything that np.dtype can convert to a dtype should be accepted
# for the dtype
a = construct.random(10, 10, dtype='d')
| 20,293 | 41.191268 | 107 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_matrix_io.py
|
from __future__ import division, print_function, absolute_import
import sys
import os
import numpy as np
import tempfile
import pytest
from pytest import raises as assert_raises
from numpy.testing import assert_equal, assert_
from scipy._lib._version import NumpyVersion
from scipy.sparse import (csc_matrix, csr_matrix, bsr_matrix, dia_matrix,
coo_matrix, save_npz, load_npz, dok_matrix)
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
def _save_and_load(matrix):
fd, tmpfile = tempfile.mkstemp(suffix='.npz')
os.close(fd)
try:
save_npz(tmpfile, matrix)
loaded_matrix = load_npz(tmpfile)
finally:
os.remove(tmpfile)
return loaded_matrix
def _check_save_and_load(dense_matrix):
for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]:
matrix = matrix_class(dense_matrix)
loaded_matrix = _save_and_load(matrix)
assert_(type(loaded_matrix) is matrix_class)
assert_(loaded_matrix.shape == dense_matrix.shape)
assert_(loaded_matrix.dtype == dense_matrix.dtype)
assert_equal(loaded_matrix.toarray(), dense_matrix)
def test_save_and_load_random():
N = 10
np.random.seed(0)
dense_matrix = np.random.random((N, N))
dense_matrix[dense_matrix > 0.7] = 0
_check_save_and_load(dense_matrix)
def test_save_and_load_empty():
dense_matrix = np.zeros((4,6))
_check_save_and_load(dense_matrix)
def test_save_and_load_one_entry():
dense_matrix = np.zeros((4,6))
dense_matrix[1,2] = 1
_check_save_and_load(dense_matrix)
@pytest.mark.skipif(NumpyVersion(np.__version__) < '1.10.0',
reason='disabling unpickling requires numpy >= 1.10.0')
def test_malicious_load():
class Executor(object):
def __reduce__(self):
return (assert_, (False, 'unexpected code execution'))
fd, tmpfile = tempfile.mkstemp(suffix='.npz')
os.close(fd)
try:
np.savez(tmpfile, format=Executor())
# Should raise a ValueError, not execute code
assert_raises(ValueError, load_npz, tmpfile)
finally:
os.remove(tmpfile)
def test_py23_compatibility():
# Try loading files saved on Python 2 and Python 3. They are not
# the same, since files saved with Scipy versions < 1.0.0 may
# contain unicode.
a = load_npz(os.path.join(DATA_DIR, 'csc_py2.npz'))
b = load_npz(os.path.join(DATA_DIR, 'csc_py3.npz'))
c = csc_matrix([[0]])
assert_equal(a.toarray(), c.toarray())
assert_equal(b.toarray(), c.toarray())
def test_implemented_error():
# Attempts to save an unsupported type and checks that an
# NotImplementedError is raised.
x = dok_matrix((2,3))
x[0,1] = 1
assert_raises(NotImplementedError, save_npz, 'x.npz', x)
| 2,823 | 29.042553 | 85 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_sparsetools.py
|
from __future__ import division, print_function, absolute_import
import sys
import os
import gc
import re
import threading
import numpy as np
from numpy.testing import assert_equal, assert_, assert_allclose
from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix,
bsr_matrix, dia_matrix)
from scipy.sparse.sputils import supported_dtypes
from scipy._lib._testutils import check_free_memory
import pytest
from pytest import raises as assert_raises
def test_exception():
assert_raises(MemoryError, _sparsetools.test_throw_error)
def test_threads():
# Smoke test for parallel threaded execution; doesn't actually
# check that code runs in parallel, but just that it produces
# expected results.
nthreads = 10
niter = 100
n = 20
a = csr_matrix(np.ones([n, n]))
bres = []
class Worker(threading.Thread):
def run(self):
b = a.copy()
for j in range(niter):
_sparsetools.csr_plus_csr(n, n,
a.indptr, a.indices, a.data,
a.indptr, a.indices, a.data,
b.indptr, b.indices, b.data)
bres.append(b)
threads = [Worker() for _ in range(nthreads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for b in bres:
assert_(np.all(b.toarray() == 2))
def test_regression_std_vector_dtypes():
# Regression test for gh-3780, checking the std::vector typemaps
# in sparsetools.cxx are complete.
for dtype in supported_dtypes:
ad = np.matrix([[1, 2], [3, 4]]).astype(dtype)
a = csr_matrix(ad, dtype=dtype)
# getcol is one function using std::vector typemaps, and should not fail
assert_equal(a.getcol(0).todense(), ad[:,0])
def test_nnz_overflow():
# Regression test for gh-7230 / gh-7871, checking that coo_todense
# with nnz > int32max doesn't overflow.
nnz = np.iinfo(np.int32).max + 1
# Ensure ~20 GB of RAM is free to run this test.
check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5)
# Use nnz duplicate entries to keep the dense version small.
row = np.zeros(nnz, dtype=np.int32)
col = np.zeros(nnz, dtype=np.int32)
data = np.zeros(nnz, dtype=np.int8)
data[-1] = 4
s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False)
# Sums nnz duplicates to produce a 1x1 array containing 4.
d = s.toarray()
assert_allclose(d, [[4]])
@pytest.mark.skipif(not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8),
reason="test requires 64-bit Linux")
class TestInt32Overflow(object):
"""
Some of the sparsetools routines use dense 2D matrices whose
total size is not bounded by the nnz of the sparse matrix. These
routines used to suffer from int32 wraparounds; here, we try to
check that the wraparounds don't occur any more.
"""
# choose n large enough
n = 50000
def setup_method(self):
assert self.n**2 > np.iinfo(np.int32).max
# check there's enough memory even if everything is run at the
# same time
try:
parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1'))
except ValueError:
parallel_count = np.inf
check_free_memory(3000 * parallel_count)
def teardown_method(self):
gc.collect()
def test_coo_todense(self):
# Check *_todense routines (cf. gh-2179)
#
# All of them in the end call coo_matrix.todense
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
r = m.todense()
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
@pytest.mark.slow
def test_matvecs(self):
# Check *_matvecs routines
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
b = np.ones((n, n), dtype=np.int8)
for sptype in (csr_matrix, csc_matrix, bsr_matrix):
m2 = sptype(m)
r = m2.dot(b)
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
del b
gc.collect()
@pytest.mark.slow
def test_dia_matvec(self):
# Check: huge dia_matrix _matvec
n = self.n
data = np.ones((n, n), dtype=np.int8)
offsets = np.arange(n)
m = dia_matrix((data, offsets), shape=(n, n))
v = np.ones(m.shape[1], dtype=np.int8)
r = m.dot(v)
assert_equal(r[0], np.int8(n))
del data, offsets, m, v, r
gc.collect()
_bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow),
pytest.param("matvecs", marks=pytest.mark.xslow),
"matvec",
"diagonal",
"sort_indices",
pytest.param("transpose", marks=pytest.mark.xslow)]
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_1_block(self, op):
# Check: huge bsr_matrix (1-block)
#
# The point here is that indices inside a block may overflow.
def get_matrix():
n = self.n
data = np.ones((1, n, n), dtype=np.int8)
indptr = np.array([0, 1], dtype=np.int32)
indices = np.array([0], dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_n_block(self, op):
# Check: huge bsr_matrix (n-block)
#
# The point here is that while indices within a block don't
# overflow, accumulators across many block may.
def get_matrix():
n = self.n
data = np.ones((n, n, 1), dtype=np.int8)
indptr = np.array([0, n], dtype=np.int32)
indices = np.arange(n, dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
def _check_bsr_matvecs(self, m):
m = m()
n = self.n
# _matvecs
r = m.dot(np.ones((n, 2), dtype=np.int8))
assert_equal(r[0,0], np.int8(n))
def _check_bsr_matvec(self, m):
m = m()
n = self.n
# _matvec
r = m.dot(np.ones((n,), dtype=np.int8))
assert_equal(r[0], np.int8(n))
def _check_bsr_diagonal(self, m):
m = m()
n = self.n
# _diagonal
r = m.diagonal()
assert_equal(r, np.ones(n))
def _check_bsr_sort_indices(self, m):
# _sort_indices
m = m()
m.sort_indices()
def _check_bsr_transpose(self, m):
# _transpose
m = m()
m.transpose()
def _check_bsr_matmat(self, m):
m = m()
n = self.n
# _bsr_matmat
m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2))
m.dot(m2) # shouldn't SIGSEGV
del m2
# _bsr_matmat
m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0]))
m2.dot(m) # shouldn't SIGSEGV
@pytest.mark.skip(reason="64-bit indices in sparse matrices not available")
def test_csr_matmat_int64_overflow():
n = 3037000500
assert n**2 > np.iinfo(np.int64).max
# the test would take crazy amounts of memory
check_free_memory(n * (8*2 + 1) * 3 / 1e6)
# int64 overflow
data = np.ones((n,), dtype=np.int8)
indptr = np.arange(n+1, dtype=np.int64)
indices = np.zeros(n, dtype=np.int64)
a = csr_matrix((data, indices, indptr))
b = a.T
assert_raises(RuntimeError, a.dot, b)
def test_upcast():
a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex)
b0 = np.array([256+1j, 2**32], dtype=complex)
for a_dtype in supported_dtypes:
for b_dtype in supported_dtypes:
msg = "(%r, %r)" % (a_dtype, b_dtype)
if np.issubdtype(a_dtype, np.complexfloating):
a = a0.copy().astype(a_dtype)
else:
a = a0.real.copy().astype(a_dtype)
if np.issubdtype(b_dtype, np.complexfloating):
b = b0.copy().astype(b_dtype)
else:
b = b0.real.copy().astype(b_dtype)
if not (a_dtype == np.bool_ and b_dtype == np.bool_):
c = np.zeros((2,), dtype=np.bool_)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
if ((np.issubdtype(a_dtype, np.complexfloating) and
not np.issubdtype(b_dtype, np.complexfloating)) or
(not np.issubdtype(a_dtype, np.complexfloating) and
np.issubdtype(b_dtype, np.complexfloating))):
c = np.zeros((2,), dtype=np.float64)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype))
_sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c)
assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg)
def test_endianness():
d = np.ones((3,4))
offsets = [-1,0,1]
a = dia_matrix((d.astype('<f8'), offsets), (4, 4))
b = dia_matrix((d.astype('>f8'), offsets), (4, 4))
v = np.arange(4)
assert_allclose(a.dot(v), [1, 3, 6, 5])
assert_allclose(b.dot(v), [1, 3, 6, 5])
| 10,136 | 29.905488 | 95 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_base.py
|
#
# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others
""" Test functions for sparse matrices. Each class in the "Matrix class
based tests" section become subclasses of the classes in the "Generic
tests" section. This is done by the functions in the "Tailored base
class for generic tests" section.
"""
from __future__ import division, print_function, absolute_import
__usage__ = """
Build sparse:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.sparse.test()'
Run tests if sparse is not installed:
python tests/test_base.py
"""
import operator
import contextlib
import functools
import numpy as np
from scipy._lib.six import xrange, zip as izip
from numpy import (arange, zeros, array, dot, matrix, asmatrix, asarray,
vstack, ndarray, transpose, diag, kron, inf, conjugate,
int8, ComplexWarning)
import random
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_almost_equal, assert_,
assert_allclose)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
import scipy.linalg
import scipy.sparse as sparse
from scipy.sparse import (csc_matrix, csr_matrix, dok_matrix,
coo_matrix, lil_matrix, dia_matrix, bsr_matrix,
eye, isspmatrix, SparseEfficiencyWarning, issparse)
from scipy.sparse.sputils import supported_dtypes, isscalarlike, get_index_dtype
from scipy.sparse.linalg import splu, expm, inv
from scipy._lib._version import NumpyVersion
from scipy._lib.decorator import decorator
import pytest
def assert_in(member, collection, msg=None):
assert_(member in collection, msg=msg if msg is not None else "%r not found in %r" % (member, collection))
# Only test matmul operator (A @ B) when available (Python 3.5+)
TEST_MATMUL = hasattr(operator, 'matmul')
sup_complex = suppress_warnings()
sup_complex.filter(ComplexWarning)
def with_64bit_maxval_limit(maxval_limit=None, random=False, fixed_dtype=None,
downcast_maxval=None, assert_32bit=False):
"""
Monkeypatch the maxval threshold at which scipy.sparse switches to
64-bit index arrays, or make it (pseudo-)random.
"""
if maxval_limit is None:
maxval_limit = 10
if assert_32bit:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
tp = get_index_dtype(arrays, maxval, check_contents)
assert_equal(np.iinfo(tp).max, np.iinfo(np.int32).max)
assert_(tp == np.int32 or tp == np.intc)
return tp
elif fixed_dtype is not None:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
return fixed_dtype
elif random:
counter = np.random.RandomState(seed=1234)
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
return (np.int32, np.int64)[counter.randint(2)]
else:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
dtype = np.int32
if maxval is not None:
if maxval > maxval_limit:
dtype = np.int64
for arr in arrays:
arr = np.asarray(arr)
if arr.dtype > np.int32:
if check_contents:
if arr.size == 0:
# a bigger type not needed
continue
elif np.issubdtype(arr.dtype, np.integer):
maxval = arr.max()
minval = arr.min()
if minval >= -maxval_limit and maxval <= maxval_limit:
# a bigger type not needed
continue
dtype = np.int64
return dtype
if downcast_maxval is not None:
def new_downcast_intp_index(arr):
if arr.max() > downcast_maxval:
raise AssertionError("downcast limited")
return arr.astype(np.intp)
@decorator
def deco(func, *a, **kw):
backup = []
modules = [scipy.sparse.bsr, scipy.sparse.coo, scipy.sparse.csc,
scipy.sparse.csr, scipy.sparse.dia, scipy.sparse.dok,
scipy.sparse.lil, scipy.sparse.sputils,
scipy.sparse.compressed, scipy.sparse.construct]
try:
for mod in modules:
backup.append((mod, 'get_index_dtype',
getattr(mod, 'get_index_dtype', None)))
setattr(mod, 'get_index_dtype', new_get_index_dtype)
if downcast_maxval is not None:
backup.append((mod, 'downcast_intp_index',
getattr(mod, 'downcast_intp_index', None)))
setattr(mod, 'downcast_intp_index', new_downcast_intp_index)
return func(*a, **kw)
finally:
for mod, name, oldfunc in backup:
if oldfunc is not None:
setattr(mod, name, oldfunc)
return deco
def todense(a):
if isinstance(a, np.ndarray) or isscalarlike(a):
return a
return a.todense()
class BinopTester(object):
# Custom type to test binary operations on sparse matrices.
def __add__(self, mat):
return "matrix on the right"
def __mul__(self, mat):
return "matrix on the right"
def __sub__(self, mat):
return "matrix on the right"
def __radd__(self, mat):
return "matrix on the left"
def __rmul__(self, mat):
return "matrix on the left"
def __rsub__(self, mat):
return "matrix on the left"
def __matmul__(self, mat):
return "matrix on the right"
def __rmatmul__(self, mat):
return "matrix on the left"
class BinopTester_with_shape(object):
# Custom type to test binary operations on sparse matrices
# with object which has shape attribute.
def __init__(self,shape):
self._shape = shape
def shape(self):
return self._shape
def ndim(self):
return len(self._shape)
def __add__(self, mat):
return "matrix on the right"
def __mul__(self, mat):
return "matrix on the right"
def __sub__(self, mat):
return "matrix on the right"
def __radd__(self, mat):
return "matrix on the left"
def __rmul__(self, mat):
return "matrix on the left"
def __rsub__(self, mat):
return "matrix on the left"
def __matmul__(self, mat):
return "matrix on the right"
def __rmatmul__(self, mat):
return "matrix on the left"
#------------------------------------------------------------------------------
# Generic tests
#------------------------------------------------------------------------------
# TODO check that spmatrix( ... , copy=X ) is respected
# TODO test prune
# TODO test has_sorted_indices
class _TestCommon(object):
"""test common functionality shared by all sparse formats"""
math_dtypes = supported_dtypes
@classmethod
def init_class(cls):
# Canonical data.
cls.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d')
cls.datsp = cls.spmatrix(cls.dat)
# Some sparse and dense matrices with data for every supported
# dtype.
# This set union is a workaround for numpy#6295, which means that
# two np.int64 dtypes don't hash to the same value.
cls.checked_dtypes = set(supported_dtypes).union(cls.math_dtypes)
cls.dat_dtypes = {}
cls.datsp_dtypes = {}
for dtype in cls.checked_dtypes:
cls.dat_dtypes[dtype] = cls.dat.astype(dtype)
cls.datsp_dtypes[dtype] = cls.spmatrix(cls.dat.astype(dtype))
# Check that the original data is equivalent to the
# corresponding dat_dtypes & datsp_dtypes.
assert_equal(cls.dat, cls.dat_dtypes[np.float64])
assert_equal(cls.datsp.todense(),
cls.datsp_dtypes[np.float64].todense())
def test_bool(self):
def check(dtype):
datsp = self.datsp_dtypes[dtype]
assert_raises(ValueError, bool, datsp)
assert_(self.spmatrix([1]))
assert_(not self.spmatrix([0]))
if isinstance(self, TestDOK):
pytest.skip("Cannot create a rank <= 2 DOK matrix.")
for dtype in self.checked_dtypes:
check(dtype)
def test_bool_rollover(self):
# bool's underlying dtype is 1 byte, check that it does not
# rollover True -> False at 256.
dat = np.matrix([[True, False]])
datsp = self.spmatrix(dat)
for _ in range(10):
datsp = datsp + datsp
dat = dat + dat
assert_array_equal(dat, datsp.todense())
def test_eq(self):
sup = suppress_warnings()
sup.filter(SparseEfficiencyWarning)
@sup
@sup_complex
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datbsr = bsr_matrix(dat)
datcsr = csr_matrix(dat)
datcsc = csc_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat == dat2, (datsp == datsp2).todense())
# mix sparse types
assert_array_equal(dat == dat2, (datbsr == datsp2).todense())
assert_array_equal(dat == dat2, (datcsr == datsp2).todense())
assert_array_equal(dat == dat2, (datcsc == datsp2).todense())
assert_array_equal(dat == dat2, (datlil == datsp2).todense())
# sparse/dense
assert_array_equal(dat == datsp2, datsp2 == dat)
# sparse/scalar
assert_array_equal(dat == 0, (datsp == 0).todense())
assert_array_equal(dat == 1, (datsp == 1).todense())
assert_array_equal(dat == np.nan, (datsp == np.nan).todense())
if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
for dtype in self.checked_dtypes:
check(dtype)
def test_ne(self):
sup = suppress_warnings()
sup.filter(SparseEfficiencyWarning)
@sup
@sup_complex
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat != dat2, (datsp != datsp2).todense())
# mix sparse types
assert_array_equal(dat != dat2, (datbsr != datsp2).todense())
assert_array_equal(dat != dat2, (datcsc != datsp2).todense())
assert_array_equal(dat != dat2, (datcsr != datsp2).todense())
assert_array_equal(dat != dat2, (datlil != datsp2).todense())
# sparse/dense
assert_array_equal(dat != datsp2, datsp2 != dat)
# sparse/scalar
assert_array_equal(dat != 0, (datsp != 0).todense())
assert_array_equal(dat != 1, (datsp != 1).todense())
assert_array_equal(0 != dat, (0 != datsp).todense())
assert_array_equal(1 != dat, (1 != datsp).todense())
assert_array_equal(dat != np.nan, (datsp != np.nan).todense())
if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
for dtype in self.checked_dtypes:
check(dtype)
def test_lt(self):
sup = suppress_warnings()
sup.filter(SparseEfficiencyWarning)
@sup
@sup_complex
def check(dtype):
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat < dat2, (datsp < datsp2).todense())
assert_array_equal(datcomplex < dat2, (datspcomplex < datsp2).todense())
# mix sparse types
assert_array_equal(dat < dat2, (datbsr < datsp2).todense())
assert_array_equal(dat < dat2, (datcsc < datsp2).todense())
assert_array_equal(dat < dat2, (datcsr < datsp2).todense())
assert_array_equal(dat < dat2, (datlil < datsp2).todense())
assert_array_equal(dat2 < dat, (datsp2 < datbsr).todense())
assert_array_equal(dat2 < dat, (datsp2 < datcsc).todense())
assert_array_equal(dat2 < dat, (datsp2 < datcsr).todense())
assert_array_equal(dat2 < dat, (datsp2 < datlil).todense())
# sparse/dense
assert_array_equal(dat < dat2, datsp < dat2)
assert_array_equal(datcomplex < dat2, datspcomplex < dat2)
# sparse/scalar
assert_array_equal((datsp < 2).todense(), dat < 2)
assert_array_equal((datsp < 1).todense(), dat < 1)
assert_array_equal((datsp < 0).todense(), dat < 0)
assert_array_equal((datsp < -1).todense(), dat < -1)
assert_array_equal((datsp < -2).todense(), dat < -2)
with np.errstate(invalid='ignore'):
assert_array_equal((datsp < np.nan).todense(), dat < np.nan)
assert_array_equal((2 < datsp).todense(), 2 < dat)
assert_array_equal((1 < datsp).todense(), 1 < dat)
assert_array_equal((0 < datsp).todense(), 0 < dat)
assert_array_equal((-1 < datsp).todense(), -1 < dat)
assert_array_equal((-2 < datsp).todense(), -2 < dat)
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat < datsp2, datsp < dat2)
if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
for dtype in self.checked_dtypes:
check(dtype)
def test_gt(self):
sup = suppress_warnings()
sup.filter(SparseEfficiencyWarning)
@sup
@sup_complex
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat > dat2, (datsp > datsp2).todense())
assert_array_equal(datcomplex > dat2, (datspcomplex > datsp2).todense())
# mix sparse types
assert_array_equal(dat > dat2, (datbsr > datsp2).todense())
assert_array_equal(dat > dat2, (datcsc > datsp2).todense())
assert_array_equal(dat > dat2, (datcsr > datsp2).todense())
assert_array_equal(dat > dat2, (datlil > datsp2).todense())
assert_array_equal(dat2 > dat, (datsp2 > datbsr).todense())
assert_array_equal(dat2 > dat, (datsp2 > datcsc).todense())
assert_array_equal(dat2 > dat, (datsp2 > datcsr).todense())
assert_array_equal(dat2 > dat, (datsp2 > datlil).todense())
# sparse/dense
assert_array_equal(dat > dat2, datsp > dat2)
assert_array_equal(datcomplex > dat2, datspcomplex > dat2)
# sparse/scalar
assert_array_equal((datsp > 2).todense(), dat > 2)
assert_array_equal((datsp > 1).todense(), dat > 1)
assert_array_equal((datsp > 0).todense(), dat > 0)
assert_array_equal((datsp > -1).todense(), dat > -1)
assert_array_equal((datsp > -2).todense(), dat > -2)
with np.errstate(invalid='ignore'):
assert_array_equal((datsp > np.nan).todense(), dat > np.nan)
assert_array_equal((2 > datsp).todense(), 2 > dat)
assert_array_equal((1 > datsp).todense(), 1 > dat)
assert_array_equal((0 > datsp).todense(), 0 > dat)
assert_array_equal((-1 > datsp).todense(), -1 > dat)
assert_array_equal((-2 > datsp).todense(), -2 > dat)
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat > datsp2, datsp > dat2)
if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
for dtype in self.checked_dtypes:
check(dtype)
def test_le(self):
sup = suppress_warnings()
sup.filter(SparseEfficiencyWarning)
@sup
@sup_complex
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat <= dat2, (datsp <= datsp2).todense())
assert_array_equal(datcomplex <= dat2, (datspcomplex <= datsp2).todense())
# mix sparse types
assert_array_equal((datbsr <= datsp2).todense(), dat <= dat2)
assert_array_equal((datcsc <= datsp2).todense(), dat <= dat2)
assert_array_equal((datcsr <= datsp2).todense(), dat <= dat2)
assert_array_equal((datlil <= datsp2).todense(), dat <= dat2)
assert_array_equal((datsp2 <= datbsr).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datcsc).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datcsr).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datlil).todense(), dat2 <= dat)
# sparse/dense
assert_array_equal(datsp <= dat2, dat <= dat2)
assert_array_equal(datspcomplex <= dat2, datcomplex <= dat2)
# sparse/scalar
assert_array_equal((datsp <= 2).todense(), dat <= 2)
assert_array_equal((datsp <= 1).todense(), dat <= 1)
assert_array_equal((datsp <= -1).todense(), dat <= -1)
assert_array_equal((datsp <= -2).todense(), dat <= -2)
assert_array_equal((2 <= datsp).todense(), 2 <= dat)
assert_array_equal((1 <= datsp).todense(), 1 <= dat)
assert_array_equal((-1 <= datsp).todense(), -1 <= dat)
assert_array_equal((-2 <= datsp).todense(), -2 <= dat)
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat <= datsp2, datsp <= dat2)
if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
for dtype in self.checked_dtypes:
check(dtype)
def test_ge(self):
sup = suppress_warnings()
sup.filter(SparseEfficiencyWarning)
@sup
@sup_complex
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat >= dat2, (datsp >= datsp2).todense())
assert_array_equal(datcomplex >= dat2, (datspcomplex >= datsp2).todense())
# mix sparse types
# mix sparse types
assert_array_equal((datbsr >= datsp2).todense(), dat >= dat2)
assert_array_equal((datcsc >= datsp2).todense(), dat >= dat2)
assert_array_equal((datcsr >= datsp2).todense(), dat >= dat2)
assert_array_equal((datlil >= datsp2).todense(), dat >= dat2)
assert_array_equal((datsp2 >= datbsr).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datcsc).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datcsr).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datlil).todense(), dat2 >= dat)
# sparse/dense
assert_array_equal(datsp >= dat2, dat >= dat2)
assert_array_equal(datspcomplex >= dat2, datcomplex >= dat2)
# sparse/scalar
assert_array_equal((datsp >= 2).todense(), dat >= 2)
assert_array_equal((datsp >= 1).todense(), dat >= 1)
assert_array_equal((datsp >= -1).todense(), dat >= -1)
assert_array_equal((datsp >= -2).todense(), dat >= -2)
assert_array_equal((2 >= datsp).todense(), 2 >= dat)
assert_array_equal((1 >= datsp).todense(), 1 >= dat)
assert_array_equal((-1 >= datsp).todense(), -1 >= dat)
assert_array_equal((-2 >= datsp).todense(), -2 >= dat)
# dense data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat >= datsp2, datsp >= dat2)
if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
for dtype in self.checked_dtypes:
check(dtype)
def test_empty(self):
# create empty matrices
assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3)))
assert_equal(self.spmatrix((3,3)).nnz, 0)
assert_equal(self.spmatrix((3,3)).count_nonzero(), 0)
def test_count_nonzero(self):
expected = np.count_nonzero(self.datsp.toarray())
assert_equal(self.datsp.count_nonzero(), expected)
assert_equal(self.datsp.T.count_nonzero(), expected)
def test_invalid_shapes(self):
assert_raises(ValueError, self.spmatrix, (-1,3))
assert_raises(ValueError, self.spmatrix, (3,-1))
assert_raises(ValueError, self.spmatrix, (-1,-1))
def test_repr(self):
repr(self.datsp)
def test_str(self):
str(self.datsp)
def test_empty_arithmetic(self):
# Test manipulating empty matrices. Fails in SciPy SVN <= r1768
shape = (5, 5)
for mytype in [np.dtype('int32'), np.dtype('float32'),
np.dtype('float64'), np.dtype('complex64'),
np.dtype('complex128')]:
a = self.spmatrix(shape, dtype=mytype)
b = a + a
c = 2 * a
d = a * a.tocsc()
e = a * a.tocsr()
f = a * a.tocoo()
for m in [a,b,c,d,e,f]:
assert_equal(m.A, a.A*a.A)
# These fail in all revisions <= r1768:
assert_equal(m.dtype,mytype)
assert_equal(m.A.dtype,mytype)
def test_abs(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(abs(A),abs(self.spmatrix(A)).todense())
def test_elementwise_power(self):
A = matrix([[-4, -3, -2],[-1, 0, 1],[2, 3, 4]], 'd')
assert_equal(np.power(A, 2), self.spmatrix(A).power(2).todense())
#it's element-wise power function, input has to be a scalar
assert_raises(NotImplementedError, self.spmatrix(A).power, A)
def test_neg(self):
A = matrix([[-1, 0, 17], [0, -5, 0], [1, -4, 0], [0, 0, 0]], 'd')
assert_equal(-A, (-self.spmatrix(A)).todense())
# see gh-5843
A = matrix([[True, False, False], [False, False, True]])
assert_raises(NotImplementedError, self.spmatrix(A).__neg__)
def test_real(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.real.todense(),D.real)
def test_imag(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.imag.todense(),D.imag)
def test_diagonal(self):
# Does the matrix's .diagonal() method work?
mats = []
mats.append([[1,0,2]])
mats.append([[1],[0],[2]])
mats.append([[0,1],[0,2],[0,3]])
mats.append([[0,0,1],[0,0,2],[0,3,0]])
mats.append(kron(mats[0],[[1,2]]))
mats.append(kron(mats[0],[[1],[2]]))
mats.append(kron(mats[1],[[1,2],[3,4]]))
mats.append(kron(mats[2],[[1,2],[3,4]]))
mats.append(kron(mats[3],[[1,2],[3,4]]))
mats.append(kron(mats[3],[[1,2,3,4]]))
for m in mats:
rows, cols = array(m).shape
sparse_mat = self.spmatrix(m)
for k in range(-rows + 1, cols):
assert_equal(sparse_mat.diagonal(k=k), diag(m, k=k))
assert_raises(ValueError, sparse_mat.diagonal, -rows)
assert_raises(ValueError, sparse_mat.diagonal, cols)
# Test all-zero matrix.
assert_equal(self.spmatrix((40, 16130)).diagonal(), np.zeros(40))
def test_reshape(self):
# This first example is taken from the lil_matrix reshaping test.
x = self.spmatrix([[1, 0, 7], [0, 0, 0], [0, 3, 0], [0, 0, 5]])
for order in ['C', 'F']:
for s in [(12, 1), (1, 12)]:
assert_array_equal(x.reshape(s, order=order).todense(),
x.todense().reshape(s, order=order))
# This example is taken from the stackoverflow answer at
# http://stackoverflow.com/questions/16511879
x = self.spmatrix([[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]])
y = x.reshape((2, 6)) # Default order is 'C'
desired = [[0, 10, 0, 0, 0, 0], [0, 0, 0, 20, 30, 40]]
assert_array_equal(y.A, desired)
# Reshape with negative indexes
y = x.reshape((2, -1))
assert_array_equal(y.A, desired)
y = x.reshape((-1, 6))
assert_array_equal(y.A, desired)
assert_raises(ValueError, x.reshape, (-1, -1))
# Reshape with star args
y = x.reshape(2, 6)
assert_array_equal(y.A, desired)
assert_raises(TypeError, x.reshape, 2, 6, not_an_arg=1)
# Reshape with same size is noop unless copy=True
y = x.reshape((3, 4))
assert_(y is x)
y = x.reshape((3, 4), copy=True)
assert_(y is not x)
# Ensure reshape did not alter original size
assert_array_equal(x.shape, (3, 4))
# Reshape in place
x.shape = (2, 6)
assert_array_equal(x.A, desired)
@pytest.mark.slow
def test_setdiag_comprehensive(self):
def dense_setdiag(a, v, k):
v = np.asarray(v)
if k >= 0:
n = min(a.shape[0], a.shape[1] - k)
if v.ndim != 0:
n = min(n, len(v))
v = v[:n]
i = np.arange(0, n)
j = np.arange(k, k + n)
a[i,j] = v
elif k < 0:
dense_setdiag(a.T, v, -k)
def check_setdiag(a, b, k):
# Check setting diagonal using a scalar, a vector of
# correct length, and too short or too long vectors
for r in [-1, len(np.diag(a, k)), 2, 30]:
if r < 0:
v = int(np.random.randint(1, 20, size=1))
else:
v = np.random.randint(1, 20, size=r)
dense_setdiag(a, v, k)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive")
b.setdiag(v, k)
# check that dense_setdiag worked
d = np.diag(a, k)
if np.asarray(v).ndim == 0:
assert_array_equal(d, v, err_msg="%s %d" % (msg, r))
else:
n = min(len(d), len(v))
assert_array_equal(d[:n], v[:n], err_msg="%s %d" % (msg, r))
# check that sparse setdiag worked
assert_array_equal(b.A, a, err_msg="%s %d" % (msg, r))
# comprehensive test
np.random.seed(1234)
shapes = [(0,5), (5,0), (1,5), (5,1), (5,5)]
for dtype in [np.int8, np.float64]:
for m,n in shapes:
ks = np.arange(-m+1, n-1)
for k in ks:
msg = repr((dtype, m, n, k))
a = np.zeros((m, n), dtype=dtype)
b = self.spmatrix((m, n), dtype=dtype)
check_setdiag(a, b, k)
# check overwriting etc
for k2 in np.random.choice(ks, size=min(len(ks), 5)):
check_setdiag(a, b, k2)
def test_setdiag(self):
# simple test cases
m = self.spmatrix(np.eye(3))
values = [3, 2, 1]
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
assert_raises(ValueError, m.setdiag, values, k=4)
m.setdiag(values)
assert_array_equal(m.diagonal(), values)
m.setdiag(values, k=1)
assert_array_equal(m.A, np.array([[3, 3, 0],
[0, 2, 2],
[0, 0, 1]]))
m.setdiag(values, k=-2)
assert_array_equal(m.A, np.array([[3, 3, 0],
[0, 2, 2],
[3, 0, 1]]))
m.setdiag((9,), k=2)
assert_array_equal(m.A[0,2], 9)
m.setdiag((9,), k=-2)
assert_array_equal(m.A[2,0], 9)
def test_nonzero(self):
A = array([[1, 0, 1],[0, 1, 1],[0, 0, 1]])
Asp = self.spmatrix(A)
A_nz = set([tuple(ij) for ij in transpose(A.nonzero())])
Asp_nz = set([tuple(ij) for ij in transpose(Asp.nonzero())])
assert_equal(A_nz, Asp_nz)
def test_numpy_nonzero(self):
# See gh-5987
A = array([[1, 0, 1], [0, 1, 1], [0, 0, 1]])
Asp = self.spmatrix(A)
A_nz = set([tuple(ij) for ij in transpose(np.nonzero(A))])
Asp_nz = set([tuple(ij) for ij in transpose(np.nonzero(Asp))])
assert_equal(A_nz, Asp_nz)
def test_getrow(self):
assert_array_equal(self.datsp.getrow(1).todense(), self.dat[1,:])
assert_array_equal(self.datsp.getrow(-1).todense(), self.dat[-1,:])
def test_getcol(self):
assert_array_equal(self.datsp.getcol(1).todense(), self.dat[:,1])
assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1])
def test_sum(self):
np.random.seed(1234)
dat_1 = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
dat_2 = np.random.rand(5, 5)
dat_3 = np.array([[]])
dat_4 = np.zeros((40, 40))
dat_5 = sparse.rand(5, 5, density=1e-2).A
matrices = [dat_1, dat_2, dat_3, dat_4, dat_5]
def check(dtype, j):
dat = np.matrix(matrices[j], dtype=dtype)
datsp = self.spmatrix(dat, dtype=dtype)
with np.errstate(over='ignore'):
assert_array_almost_equal(dat.sum(), datsp.sum())
assert_equal(dat.sum().dtype, datsp.sum().dtype)
assert_(np.isscalar(datsp.sum(axis=None)))
assert_array_almost_equal(dat.sum(axis=None),
datsp.sum(axis=None))
assert_equal(dat.sum(axis=None).dtype,
datsp.sum(axis=None).dtype)
assert_array_almost_equal(dat.sum(axis=0), datsp.sum(axis=0))
assert_equal(dat.sum(axis=0).dtype, datsp.sum(axis=0).dtype)
assert_array_almost_equal(dat.sum(axis=1), datsp.sum(axis=1))
assert_equal(dat.sum(axis=1).dtype, datsp.sum(axis=1).dtype)
assert_array_almost_equal(dat.sum(axis=-2), datsp.sum(axis=-2))
assert_equal(dat.sum(axis=-2).dtype, datsp.sum(axis=-2).dtype)
assert_array_almost_equal(dat.sum(axis=-1), datsp.sum(axis=-1))
assert_equal(dat.sum(axis=-1).dtype, datsp.sum(axis=-1).dtype)
for dtype in self.checked_dtypes:
for j in range(len(matrices)):
check(dtype, j)
def test_sum_invalid_params(self):
out = np.asmatrix(np.zeros((1, 3)))
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
assert_raises(ValueError, datsp.sum, axis=3)
assert_raises(TypeError, datsp.sum, axis=(0, 1))
assert_raises(TypeError, datsp.sum, axis=1.5)
assert_raises(ValueError, datsp.sum, axis=1, out=out)
def test_sum_dtype(self):
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
def check(dtype):
dat_mean = dat.mean(dtype=dtype)
datsp_mean = datsp.mean(dtype=dtype)
assert_array_almost_equal(dat_mean, datsp_mean)
assert_equal(dat_mean.dtype, datsp_mean.dtype)
for dtype in self.checked_dtypes:
check(dtype)
def test_sum_out(self):
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
dat_out = np.matrix(0)
datsp_out = np.matrix(0)
dat.sum(out=dat_out)
datsp.sum(out=datsp_out)
assert_array_almost_equal(dat_out, datsp_out)
dat_out = np.asmatrix(np.zeros((3, 1)))
datsp_out = np.asmatrix(np.zeros((3, 1)))
dat.sum(axis=1, out=dat_out)
datsp.sum(axis=1, out=datsp_out)
assert_array_almost_equal(dat_out, datsp_out)
def test_numpy_sum(self):
# See gh-5987
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
dat_mean = np.sum(dat)
datsp_mean = np.sum(datsp)
assert_array_almost_equal(dat_mean, datsp_mean)
assert_equal(dat_mean.dtype, datsp_mean.dtype)
def test_mean(self):
def check(dtype):
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]], dtype=dtype)
datsp = self.spmatrix(dat, dtype=dtype)
assert_array_almost_equal(dat.mean(), datsp.mean())
assert_equal(dat.mean().dtype, datsp.mean().dtype)
assert_(np.isscalar(datsp.mean(axis=None)))
assert_array_almost_equal(dat.mean(axis=None), datsp.mean(axis=None))
assert_equal(dat.mean(axis=None).dtype, datsp.mean(axis=None).dtype)
assert_array_almost_equal(dat.mean(axis=0), datsp.mean(axis=0))
assert_equal(dat.mean(axis=0).dtype, datsp.mean(axis=0).dtype)
assert_array_almost_equal(dat.mean(axis=1), datsp.mean(axis=1))
assert_equal(dat.mean(axis=1).dtype, datsp.mean(axis=1).dtype)
assert_array_almost_equal(dat.mean(axis=-2), datsp.mean(axis=-2))
assert_equal(dat.mean(axis=-2).dtype, datsp.mean(axis=-2).dtype)
assert_array_almost_equal(dat.mean(axis=-1), datsp.mean(axis=-1))
assert_equal(dat.mean(axis=-1).dtype, datsp.mean(axis=-1).dtype)
for dtype in self.checked_dtypes:
check(dtype)
def test_mean_invalid_params(self):
out = np.asmatrix(np.zeros((1, 3)))
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
assert_raises(ValueError, datsp.mean, axis=3)
assert_raises(TypeError, datsp.mean, axis=(0, 1))
assert_raises(TypeError, datsp.mean, axis=1.5)
assert_raises(ValueError, datsp.mean, axis=1, out=out)
def test_mean_dtype(self):
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
def check(dtype):
dat_mean = dat.mean(dtype=dtype)
datsp_mean = datsp.mean(dtype=dtype)
assert_array_almost_equal(dat_mean, datsp_mean)
assert_equal(dat_mean.dtype, datsp_mean.dtype)
for dtype in self.checked_dtypes:
check(dtype)
def test_mean_out(self):
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
dat_out = np.matrix(0)
datsp_out = np.matrix(0)
dat.mean(out=dat_out)
datsp.mean(out=datsp_out)
assert_array_almost_equal(dat_out, datsp_out)
dat_out = np.asmatrix(np.zeros((3, 1)))
datsp_out = np.asmatrix(np.zeros((3, 1)))
dat.mean(axis=1, out=dat_out)
datsp.mean(axis=1, out=datsp_out)
assert_array_almost_equal(dat_out, datsp_out)
def test_numpy_mean(self):
# See gh-5987
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
dat_mean = np.mean(dat)
datsp_mean = np.mean(datsp)
assert_array_almost_equal(dat_mean, datsp_mean)
assert_equal(dat_mean.dtype, datsp_mean.dtype)
def test_expm(self):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
Mexp = scipy.linalg.expm(M)
N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]])
sN = self.spmatrix(N, shape=(3,3), dtype=float)
Nexp = scipy.linalg.expm(N)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve requires A be CSC or CSR matrix format")
sMexp = expm(sM).todense()
sNexp = expm(sN).todense()
assert_array_almost_equal((sMexp - Mexp), zeros((3, 3)))
assert_array_almost_equal((sNexp - Nexp), zeros((3, 3)))
def test_inv(self):
def check(dtype):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"spsolve requires A be CSC or CSR matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sM = self.spmatrix(M, shape=(3,3), dtype=dtype)
sMinv = inv(sM)
assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3))
assert_raises(TypeError, inv, M)
for dtype in [float]:
check(dtype)
@sup_complex
def test_from_array(self):
A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
A = array([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
@sup_complex
def test_from_matrix(self):
A = matrix([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).todense(), A)
A = matrix([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
@sup_complex
def test_from_list(self):
A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]]
assert_array_equal(self.spmatrix(A).todense(), A)
A = [[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]]
assert_array_equal(self.spmatrix(A).toarray(), array(A))
assert_array_equal(self.spmatrix(A, dtype='int16').todense(), array(A).astype('int16'))
@sup_complex
def test_from_sparse(self):
D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
D = array([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
# def test_array(self):
# """test array(A) where A is in sparse format"""
# assert_equal( array(self.datsp), self.dat )
def test_todense(self):
# Check C- or F-contiguous (default).
chk = self.datsp.todense()
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous != chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.todense(order='C')
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.todense(order='F')
assert_array_equal(chk, self.dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with out argument (array).
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk.base is out)
# Check with out array (matrix).
out = np.asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype))
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk is out)
a = matrix([1.,2.,3.])
dense_dot_dense = a * self.dat
check = a * self.datsp.todense()
assert_array_equal(dense_dot_dense, check)
b = matrix([1.,2.,3.,4.]).T
dense_dot_dense = self.dat * b
check2 = self.datsp.todense() * b
assert_array_equal(dense_dot_dense, check2)
# Check bool data works.
spbool = self.spmatrix(self.dat, dtype=bool)
matbool = self.dat.astype(bool)
assert_array_equal(spbool.todense(), matbool)
def test_toarray(self):
# Check C- or F-contiguous (default).
dat = asarray(self.dat)
chk = self.datsp.toarray()
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous != chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.toarray(order='C')
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.toarray(order='F')
assert_array_equal(chk, dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with output arg.
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
# Check that things are fine when we don't initialize with zeros.
out[...] = 1.
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
a = array([1.,2.,3.])
dense_dot_dense = dot(a, dat)
check = dot(a, self.datsp.toarray())
assert_array_equal(dense_dot_dense, check)
b = array([1.,2.,3.,4.])
dense_dot_dense = dot(dat, b)
check2 = dot(self.datsp.toarray(), b)
assert_array_equal(dense_dot_dense, check2)
# Check bool data works.
spbool = self.spmatrix(self.dat, dtype=bool)
arrbool = dat.astype(bool)
assert_array_equal(spbool.toarray(), arrbool)
@sup_complex
def test_astype(self):
D = array([[2.0 + 3j, 0, 0],
[0, 4.0 + 5j, 0],
[0, 0, 0]])
S = self.spmatrix(D)
for x in supported_dtypes:
# Check correctly casted
D_casted = D.astype(x)
for copy in (True, False):
S_casted = S.astype(x, copy=copy)
assert_equal(S_casted.dtype, D_casted.dtype) # correct type
assert_equal(S_casted.toarray(), D_casted) # correct values
assert_equal(S_casted.format, S.format) # format preserved
# Check correctly copied
assert_(S_casted.astype(x, copy=False) is S_casted)
S_copied = S_casted.astype(x, copy=True)
assert_(S_copied is not S_casted)
def check_equal_but_not_same_array_attribute(attribute):
a = getattr(S_casted, attribute)
b = getattr(S_copied, attribute)
assert_array_equal(a, b)
assert_(a is not b)
i = (0,) * b.ndim
b_i = b[i]
b[i] = not b[i]
assert_(a[i] != b[i])
b[i] = b_i
if S_casted.format in ('csr', 'csc', 'bsr'):
for attribute in ('indices', 'indptr', 'data'):
check_equal_but_not_same_array_attribute(attribute)
elif S_casted.format == 'coo':
for attribute in ('row', 'col', 'data'):
check_equal_but_not_same_array_attribute(attribute)
elif S_casted.format == 'dia':
for attribute in ('offsets', 'data'):
check_equal_but_not_same_array_attribute(attribute)
def test_asfptype(self):
A = self.spmatrix(arange(6,dtype='int32').reshape(2,3))
assert_equal(A.dtype, np.dtype('int32'))
assert_equal(A.asfptype().dtype, np.dtype('float64'))
assert_equal(A.asfptype().format, A.format)
assert_equal(A.astype('int16').asfptype().dtype, np.dtype('float32'))
assert_equal(A.astype('complex128').asfptype().dtype, np.dtype('complex128'))
B = A.asfptype()
C = B.asfptype()
assert_(B is C)
def test_mul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal(dat*2,(datsp*2).todense())
assert_array_equal(dat*17.3,(datsp*17.3).todense())
for dtype in self.math_dtypes:
check(dtype)
def test_rmul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal(2*dat,(2*datsp).todense())
assert_array_equal(17.3*dat,(17.3*datsp).todense())
for dtype in self.math_dtypes:
check(dtype)
def test_add(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
a = dat.copy()
a[0,2] = 2.0
b = datsp
c = b + a
assert_array_equal(c, b.todense() + a)
c = b + b.tocsr()
assert_array_equal(c.todense(),
b.todense() + b.todense())
# test broadcasting
c = b + a[0]
assert_array_equal(c, b.todense() + a[0])
for dtype in self.math_dtypes:
check(dtype)
def test_radd(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
a = dat.copy()
a[0,2] = 2.0
b = datsp
c = a + b
assert_array_equal(c, a + b.todense())
for dtype in self.math_dtypes:
check(dtype)
def test_sub(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal((datsp - datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((datsp - 0).todense(), dat)
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((datsp - A).todense(),dat - A.todense())
assert_array_equal((A - datsp).todense(),A.todense() - dat)
# test broadcasting
assert_array_equal(datsp - dat[0], dat - dat[0])
for dtype in self.math_dtypes:
if dtype == np.dtype('bool'):
# boolean array subtraction deprecated in 1.9.0
continue
check(dtype)
def test_rsub(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((0 - datsp).todense(), -dat)
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((dat - A),dat - A.todense())
assert_array_equal((A - dat),A.todense() - dat)
assert_array_equal(A.todense() - datsp,A.todense() - dat)
assert_array_equal(datsp - A.todense(),dat - A.todense())
# test broadcasting
assert_array_equal(dat[0] - datsp, dat[0] - dat)
for dtype in self.math_dtypes:
if dtype == np.dtype('bool'):
# boolean array subtraction deprecated in 1.9.0
continue
check(dtype)
def test_add0(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Adding 0 to a sparse matrix
assert_array_equal((datsp + 0).todense(), dat)
# use sum (which takes 0 as a starting value)
sumS = sum([k * datsp for k in range(1, 3)])
sumD = sum([k * dat for k in range(1, 3)])
assert_almost_equal(sumS.todense(), sumD)
for dtype in self.math_dtypes:
check(dtype)
def test_elementwise_multiply(self):
# real/real
A = array([[4,0,9],[2,-3,5]])
B = array([[0,7,0],[0,-4,0]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal(Asp.multiply(Bsp).todense(), A*B) # sparse/sparse
assert_almost_equal(Asp.multiply(B).todense(), A*B) # sparse/dense
# complex/complex
C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Csp = self.spmatrix(C)
Dsp = self.spmatrix(D)
assert_almost_equal(Csp.multiply(Dsp).todense(), C*D) # sparse/sparse
assert_almost_equal(Csp.multiply(D).todense(), C*D) # sparse/dense
# real/complex
assert_almost_equal(Asp.multiply(Dsp).todense(), A*D) # sparse/sparse
assert_almost_equal(Asp.multiply(D).todense(), A*D) # sparse/dense
def test_elementwise_multiply_broadcast(self):
A = array([4])
B = array([[-9]])
C = array([1,-1,0])
D = array([[7,9,-9]])
E = array([[3],[2],[1]])
F = array([[8,6,3],[-4,3,2],[6,6,6]])
G = [1, 2, 3]
H = np.ones((3, 4))
J = H.T
K = array([[0]])
L = array([[[1,2],[0,1]]])
# Some arrays can't be cast as spmatrices (A,C,L) so leave
# them out.
Bsp = self.spmatrix(B)
Dsp = self.spmatrix(D)
Esp = self.spmatrix(E)
Fsp = self.spmatrix(F)
Hsp = self.spmatrix(H)
Hspp = self.spmatrix(H[0,None])
Jsp = self.spmatrix(J)
Jspp = self.spmatrix(J[:,0,None])
Ksp = self.spmatrix(K)
matrices = [A, B, C, D, E, F, G, H, J, K, L]
spmatrices = [Bsp, Dsp, Esp, Fsp, Hsp, Hspp, Jsp, Jspp, Ksp]
# sparse/sparse
for i in spmatrices:
for j in spmatrices:
try:
dense_mult = np.multiply(i.todense(), j.todense())
except ValueError:
assert_raises(ValueError, i.multiply, j)
continue
sp_mult = i.multiply(j)
assert_almost_equal(sp_mult.todense(), dense_mult)
# sparse/dense
for i in spmatrices:
for j in matrices:
try:
dense_mult = np.multiply(i.todense(), j)
except TypeError:
continue
except ValueError:
assert_raises(ValueError, i.multiply, j)
continue
sp_mult = i.multiply(j)
if isspmatrix(sp_mult):
assert_almost_equal(sp_mult.todense(), dense_mult)
else:
assert_almost_equal(sp_mult, dense_mult)
def test_elementwise_divide(self):
expected = [[1,np.nan,np.nan,1],
[1,np.nan,1,np.nan],
[np.nan,1,np.nan,np.nan]]
assert_array_equal(todense(self.datsp / self.datsp),expected)
denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
expected = [[1,np.nan,np.nan,0.5],
[-3,np.nan,inf,np.nan],
[np.nan,0.25,np.nan,0]]
assert_array_equal(todense(self.datsp / denom), expected)
# complex
A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal(todense(Asp / Bsp), A/B)
# integer
A = array([[1,2,3],[-3,2,1]])
B = array([[0,1,2],[0,-2,3]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
with np.errstate(divide='ignore'):
assert_array_equal(todense(Asp / Bsp), A / B)
# mismatching sparsity patterns
A = array([[0,1],[1,0]])
B = array([[1,0],[1,0]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
with np.errstate(divide='ignore', invalid='ignore'):
assert_array_equal(np.array(todense(Asp / Bsp)), A / B)
def test_pow(self):
A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]])
B = self.spmatrix(A)
for exponent in [0,1,2,3]:
assert_array_equal((B**exponent).todense(),A**exponent)
# invalid exponents
for exponent in [-1, 2.2, 1 + 3j]:
assert_raises(Exception, B.__pow__, exponent)
# nonsquare matrix
B = self.spmatrix(A[:3,:])
assert_raises(Exception, B.__pow__, 1)
def test_rmatvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray()))
row = matrix([[1,2,3,4]])
assert_array_almost_equal(row*M, row*M.todense())
def test_small_multiplication(self):
# test that A*x works for x with shape () (1,) (1,1) and (1,0)
A = self.spmatrix([[1],[2],[3]])
assert_(isspmatrix(A * array(1)))
assert_equal((A * array(1)).todense(), [[1],[2],[3]])
assert_equal(A * array([1]), array([1,2,3]))
assert_equal(A * array([[1]]), array([[1],[2],[3]]))
assert_equal(A * np.ones((1,0)), np.ones((3,0)))
def test_binop_custom_type(self):
# Non-regression test: previously, binary operations would raise
# NotImplementedError instead of returning NotImplemented
# (https://docs.python.org/library/constants.html#NotImplemented)
# so overloading Custom + matrix etc. didn't work.
A = self.spmatrix([[1], [2], [3]])
B = BinopTester()
assert_equal(A + B, "matrix on the left")
assert_equal(A - B, "matrix on the left")
assert_equal(A * B, "matrix on the left")
assert_equal(B + A, "matrix on the right")
assert_equal(B - A, "matrix on the right")
assert_equal(B * A, "matrix on the right")
if TEST_MATMUL:
assert_equal(eval('A @ B'), "matrix on the left")
assert_equal(eval('B @ A'), "matrix on the right")
def test_binop_custom_type_with_shape(self):
A = self.spmatrix([[1], [2], [3]])
B = BinopTester_with_shape((3,1))
assert_equal(A + B, "matrix on the left")
assert_equal(A - B, "matrix on the left")
assert_equal(A * B, "matrix on the left")
assert_equal(B + A, "matrix on the right")
assert_equal(B - A, "matrix on the right")
assert_equal(B * A, "matrix on the right")
if TEST_MATMUL:
assert_equal(eval('A @ B'), "matrix on the left")
assert_equal(eval('B @ A'), "matrix on the right")
def test_matmul(self):
if not TEST_MATMUL:
pytest.skip("matmul is only tested in Python 3.5+")
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
B = self.spmatrix(matrix([[0,1],[1,0],[0,2]],'d'))
col = matrix([1,2,3]).T
# check matrix-vector
assert_array_almost_equal(operator.matmul(M, col),
M.todense() * col)
# check matrix-matrix
assert_array_almost_equal(operator.matmul(M, B).todense(),
(M * B).todense())
assert_array_almost_equal(operator.matmul(M.todense(), B),
(M * B).todense())
assert_array_almost_equal(operator.matmul(M, B.todense()),
(M * B).todense())
# check error on matrix-scalar
assert_raises(ValueError, operator.matmul, M, 1)
assert_raises(ValueError, operator.matmul, 1, M)
def test_matvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
col = matrix([1,2,3]).T
assert_array_almost_equal(M * col, M.todense() * col)
# check result dimensions (ticket #514)
assert_equal((M * array([1,2,3])).shape,(4,))
assert_equal((M * array([[1],[2],[3]])).shape,(4,1))
assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1))
# check result type
assert_(isinstance(M * array([1,2,3]), ndarray))
assert_(isinstance(M * matrix([1,2,3]).T, matrix))
# ensure exception is raised for improper dimensions
bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]),
matrix([1,2,3]), matrix([[1],[2]])]
for x in bad_vecs:
assert_raises(ValueError, M.__mul__, x)
# Should this be supported or not?!
# flat = array([1,2,3])
# assert_array_almost_equal(M*flat, M.todense()*flat)
# Currently numpy dense matrices promote the result to a 1x3 matrix,
# whereas sparse matrices leave the result as a rank-1 array. Which
# is preferable?
# Note: the following command does not work. Both NumPy matrices
# and spmatrices should raise exceptions!
# assert_array_almost_equal(M*[1,2,3], M.todense()*[1,2,3])
# The current relationship between sparse matrix products and array
# products is as follows:
assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3]))
assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T)
# Note that the result of M * x is dense if x has a singleton dimension.
# Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col)
# is rank-2. Is this desirable?
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal(asp*b, a*b)
assert_array_almost_equal(a*bsp, a*b)
assert_array_almost_equal(a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix([[1,2],[3,4]])
B = self.spmatrix([[1,2],[3,4],[5,6]])
assert_raises(ValueError, A.__mul__, B)
def test_matmat_dense(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
asp = self.spmatrix(a)
# check both array and matrix types
bs = [array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]])]
for b in bs:
result = asp*b
assert_(isinstance(result, type(b)))
assert_equal(result.shape, (4,2))
assert_equal(result, dot(a,b))
def test_sparse_format_conversions(self):
A = sparse.kron([[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]])
D = A.todense()
A = self.spmatrix(A)
for format in ['bsr','coo','csc','csr','dia','dok','lil']:
a = A.asformat(format)
assert_equal(a.format,format)
assert_array_equal(a.todense(), D)
b = self.spmatrix(D+3j).asformat(format)
assert_equal(b.format,format)
assert_array_equal(b.todense(), D+3j)
c = eval(format + '_matrix')(A)
assert_equal(c.format,format)
assert_array_equal(c.todense(), D)
def test_tobsr(self):
x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]])
y = array([[0,1,2],[3,0,5]])
A = kron(x,y)
Asp = self.spmatrix(A)
for format in ['bsr']:
fn = getattr(Asp, 'to' + format)
for X in [1, 2, 3, 6]:
for Y in [1, 2, 3, 4, 6, 12]:
assert_equal(fn(blocksize=(X,Y)).todense(), A)
def test_transpose(self):
dat_1 = self.dat
dat_2 = np.array([[]])
matrices = [dat_1, dat_2]
def check(dtype, j):
dat = np.matrix(matrices[j], dtype=dtype)
datsp = self.spmatrix(dat)
a = datsp.transpose()
b = dat.transpose()
assert_array_equal(a.todense(), b)
assert_array_equal(a.transpose().todense(), dat)
assert_equal(a.dtype, b.dtype)
# See gh-5987
empty = self.spmatrix((3, 4))
assert_array_equal(np.transpose(empty).todense(),
np.transpose(zeros((3, 4))))
assert_array_equal(empty.T.todense(), zeros((4, 3)))
assert_raises(ValueError, empty.transpose, axes=0)
for dtype in self.checked_dtypes:
for j in range(len(matrices)):
check(dtype, j)
def test_add_dense(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# adding a dense matrix to a sparse matrix
sum1 = dat + datsp
assert_array_equal(sum1, dat + dat)
sum2 = datsp + dat
assert_array_equal(sum2, dat + dat)
for dtype in self.math_dtypes:
check(dtype)
def test_sub_dense(self):
# subtracting a dense matrix to/from a sparse matrix
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Behavior is different for bool.
if dat.dtype == bool:
sum1 = dat - datsp
assert_array_equal(sum1, dat - dat)
sum2 = datsp - dat
assert_array_equal(sum2, dat - dat)
else:
# Manually add to avoid upcasting from scalar
# multiplication.
sum1 = (dat + dat + dat) - datsp
assert_array_equal(sum1, dat + dat)
sum2 = (datsp + datsp + datsp) - dat
assert_array_equal(sum2, dat + dat)
for dtype in self.math_dtypes:
if (dtype == np.dtype('bool')) and (
NumpyVersion(np.__version__) >= '1.9.0.dev'):
# boolean array subtraction deprecated in 1.9.0
continue
check(dtype)
def test_maximum_minimum(self):
A_dense = np.array([[1, 0, 3], [0, 4, 5], [0, 0, 0]])
B_dense = np.array([[1, 1, 2], [0, 3, 6], [1, -1, 0]])
A_dense_cpx = np.array([[1, 0, 3], [0, 4+2j, 5], [0, 1j, -1j]])
def check(dtype, dtype2, btype):
if np.issubdtype(dtype, np.complexfloating):
A = self.spmatrix(A_dense_cpx.astype(dtype))
else:
A = self.spmatrix(A_dense.astype(dtype))
if btype == 'scalar':
B = dtype2.type(1)
elif btype == 'scalar2':
B = dtype2.type(-1)
elif btype == 'dense':
B = B_dense.astype(dtype2)
elif btype == 'sparse':
B = self.spmatrix(B_dense.astype(dtype2))
else:
raise ValueError()
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Taking maximum .minimum. with > 0 .< 0. number results to a dense matrix")
max_s = A.maximum(B)
min_s = A.minimum(B)
max_d = np.maximum(todense(A), todense(B))
assert_array_equal(todense(max_s), max_d)
assert_equal(max_s.dtype, max_d.dtype)
min_d = np.minimum(todense(A), todense(B))
assert_array_equal(todense(min_s), min_d)
assert_equal(min_s.dtype, min_d.dtype)
for dtype in self.math_dtypes:
for dtype2 in [np.int8, np.float_, np.complex_]:
for btype in ['scalar', 'scalar2', 'dense', 'sparse']:
check(np.dtype(dtype), np.dtype(dtype2), btype)
def test_copy(self):
# Check whether the copy=True and copy=False keywords work
A = self.datsp
# check that copy preserves format
assert_equal(A.copy().format, A.format)
assert_equal(A.__class__(A,copy=True).format, A.format)
assert_equal(A.__class__(A,copy=False).format, A.format)
assert_equal(A.copy().todense(), A.todense())
assert_equal(A.__class__(A,copy=True).todense(), A.todense())
assert_equal(A.__class__(A,copy=False).todense(), A.todense())
# check that XXX_matrix.toXXX() works
toself = getattr(A,'to' + A.format)
assert_equal(toself().format, A.format)
assert_equal(toself(copy=True).format, A.format)
assert_equal(toself(copy=False).format, A.format)
assert_equal(toself().todense(), A.todense())
assert_equal(toself(copy=True).todense(), A.todense())
assert_equal(toself(copy=False).todense(), A.todense())
# check whether the data is copied?
# TODO: deal with non-indexable types somehow
B = A.copy()
try:
B[0,0] += 1
assert_(B[0,0] != A[0,0])
except NotImplementedError:
# not all sparse matrices can be indexed
pass
except TypeError:
# not all sparse matrices can be indexed
pass
# test that __iter__ is compatible with NumPy matrix
def test_iterator(self):
B = np.matrix(np.arange(50).reshape(5, 10))
A = self.spmatrix(B)
for x, y in zip(A, B):
assert_equal(x.todense(), y)
def test_size_zero_matrix_arithmetic(self):
# Test basic matrix arithmetic with shapes like (0,0), (10,0),
# (0, 3), etc.
mat = np.matrix([])
a = mat.reshape((0, 0))
b = mat.reshape((0, 1))
c = mat.reshape((0, 5))
d = mat.reshape((1, 0))
e = mat.reshape((5, 0))
f = np.matrix(np.ones([5, 5]))
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
csp = self.spmatrix(c)
dsp = self.spmatrix(d)
esp = self.spmatrix(e)
fsp = self.spmatrix(f)
# matrix product.
assert_array_equal(asp.dot(asp).A, np.dot(a, a).A)
assert_array_equal(bsp.dot(dsp).A, np.dot(b, d).A)
assert_array_equal(dsp.dot(bsp).A, np.dot(d, b).A)
assert_array_equal(csp.dot(esp).A, np.dot(c, e).A)
assert_array_equal(csp.dot(fsp).A, np.dot(c, f).A)
assert_array_equal(esp.dot(csp).A, np.dot(e, c).A)
assert_array_equal(dsp.dot(csp).A, np.dot(d, c).A)
assert_array_equal(fsp.dot(esp).A, np.dot(f, e).A)
# bad matrix products
assert_raises(ValueError, dsp.dot, e)
assert_raises(ValueError, asp.dot, d)
# elemente-wise multiplication
assert_array_equal(asp.multiply(asp).A, np.multiply(a, a).A)
assert_array_equal(bsp.multiply(bsp).A, np.multiply(b, b).A)
assert_array_equal(dsp.multiply(dsp).A, np.multiply(d, d).A)
assert_array_equal(asp.multiply(a).A, np.multiply(a, a).A)
assert_array_equal(bsp.multiply(b).A, np.multiply(b, b).A)
assert_array_equal(dsp.multiply(d).A, np.multiply(d, d).A)
assert_array_equal(asp.multiply(6).A, np.multiply(a, 6).A)
assert_array_equal(bsp.multiply(6).A, np.multiply(b, 6).A)
assert_array_equal(dsp.multiply(6).A, np.multiply(d, 6).A)
# bad element-wise multiplication
assert_raises(ValueError, asp.multiply, c)
assert_raises(ValueError, esp.multiply, c)
# Addition
assert_array_equal(asp.__add__(asp).A, a.__add__(a).A)
assert_array_equal(bsp.__add__(bsp).A, b.__add__(b).A)
assert_array_equal(dsp.__add__(dsp).A, d.__add__(d).A)
# bad addition
assert_raises(ValueError, asp.__add__, dsp)
assert_raises(ValueError, bsp.__add__, asp)
def test_size_zero_conversions(self):
mat = np.matrix([])
a = mat.reshape((0, 0))
b = mat.reshape((0, 5))
c = mat.reshape((5, 0))
for m in [a, b, c]:
spm = self.spmatrix(m)
assert_array_equal(spm.tocoo().A, m)
assert_array_equal(spm.tocsr().A, m)
assert_array_equal(spm.tocsc().A, m)
assert_array_equal(spm.tolil().A, m)
assert_array_equal(spm.todok().A, m)
assert_array_equal(spm.tobsr().A, m)
def test_pickle(self):
import pickle
sup = suppress_warnings()
sup.filter(SparseEfficiencyWarning)
@sup
def check():
datsp = self.datsp.copy()
for protocol in range(pickle.HIGHEST_PROTOCOL):
sploaded = pickle.loads(pickle.dumps(datsp, protocol=protocol))
assert_equal(datsp.shape, sploaded.shape)
assert_array_equal(datsp.toarray(), sploaded.toarray())
assert_equal(datsp.format, sploaded.format)
for key, val in datsp.__dict__.items():
if isinstance(val, np.ndarray):
assert_array_equal(val, sploaded.__dict__[key])
else:
assert_(val == sploaded.__dict__[key])
check()
def test_unary_ufunc_overrides(self):
def check(name):
if name == "sign":
pytest.skip("sign conflicts with comparison op "
"support on Numpy")
if self.spmatrix in (dok_matrix, lil_matrix):
pytest.skip("Unary ops not implemented for dok/lil")
ufunc = getattr(np, name)
X = self.spmatrix(np.arange(20).reshape(4, 5) / 20.)
X0 = ufunc(X.toarray())
X2 = ufunc(X)
assert_array_equal(X2.toarray(), X0)
for name in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt",
"abs"]:
check(name)
def test_resize(self):
# resize(shape) resizes the matrix in-place
D = np.array([[1, 0, 3, 4],
[2, 0, 0, 0],
[3, 0, 0, 0]])
S = self.spmatrix(D)
assert_(S.resize((3, 2)) is None)
assert_array_equal(S.A, [[1, 0],
[2, 0],
[3, 0]])
S.resize((2, 2))
assert_array_equal(S.A, [[1, 0],
[2, 0]])
S.resize((3, 2))
assert_array_equal(S.A, [[1, 0],
[2, 0],
[0, 0]])
S.resize((3, 3))
assert_array_equal(S.A, [[1, 0, 0],
[2, 0, 0],
[0, 0, 0]])
# test no-op
S.resize((3, 3))
assert_array_equal(S.A, [[1, 0, 0],
[2, 0, 0],
[0, 0, 0]])
# test *args
S.resize(3, 2)
assert_array_equal(S.A, [[1, 0],
[2, 0],
[0, 0]])
for bad_shape in [1, (-1, 2), (2, -1), (1, 2, 3)]:
assert_raises(ValueError, S.resize, bad_shape)
class _TestInplaceArithmetic(object):
@pytest.mark.skipif(NumpyVersion(np.__version__) < "1.13.0",
reason="numpy version doesn't respect array priority")
def test_inplace_dense(self):
a = np.ones((3, 4))
b = self.spmatrix(a)
x = a.copy()
y = a.copy()
x += a
y += b
assert_array_equal(x, y)
x = a.copy()
y = a.copy()
x -= a
y -= b
assert_array_equal(x, y)
# This is matrix product, from __rmul__
assert_raises(ValueError, operator.imul, x, b)
x = a.copy()
y = a.copy()
x = x.dot(a.T)
y *= b.T
assert_array_equal(x, y)
# Matrix (non-elementwise) floor division is not defined
assert_raises(TypeError, operator.ifloordiv, x, b)
def test_imul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Avoid implicit casting.
if np.can_cast(type(2), dtype, casting='same_kind'):
a = datsp.copy()
a *= 2
b = dat.copy()
b *= 2
assert_array_equal(b, a.todense())
if np.can_cast(type(17.3), dtype, casting='same_kind'):
a = datsp.copy()
a *= 17.3
b = dat.copy()
b *= 17.3
assert_array_equal(b, a.todense())
for dtype in self.math_dtypes:
check(dtype)
def test_idiv_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
if np.can_cast(type(2), dtype, casting='same_kind'):
a = datsp.copy()
a /= 2
b = dat.copy()
b /= 2
assert_array_equal(b, a.todense())
if np.can_cast(type(17.3), dtype, casting='same_kind'):
a = datsp.copy()
a /= 17.3
b = dat.copy()
b /= 17.3
assert_array_equal(b, a.todense())
for dtype in self.math_dtypes:
# /= should only be used with float dtypes to avoid implicit
# casting.
if not np.can_cast(dtype, np.int_):
check(dtype)
def test_inplace_success(self):
# Inplace ops should work even if a specialized version is not
# implemented, falling back to x = x <op> y
a = self.spmatrix(np.eye(5))
b = self.spmatrix(np.eye(5))
bp = self.spmatrix(np.eye(5))
b += a
bp = bp + a
assert_allclose(b.A, bp.A)
b *= a
bp = bp * a
assert_allclose(b.A, bp.A)
b -= a
bp = bp - a
assert_allclose(b.A, bp.A)
assert_raises(TypeError, operator.ifloordiv, a, b)
class _TestGetSet(object):
def test_getelement(self):
def check(dtype):
D = array([[1,0,0],
[4,3,0],
[0,2,0],
[0,0,0]], dtype=dtype)
A = self.spmatrix(D)
M,N = D.shape
for i in range(-M, M):
for j in range(-N, N):
assert_equal(A[i,j], D[i,j])
for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]:
assert_raises((IndexError, TypeError), A.__getitem__, ij)
for dtype in supported_dtypes:
check(np.dtype(dtype))
def test_setelement(self):
def check(dtype):
A = self.spmatrix((3,4), dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
A[0, 0] = dtype.type(0) # bug 870
A[1, 2] = dtype.type(4.0)
A[0, 1] = dtype.type(3)
A[2, 0] = dtype.type(2.0)
A[0,-1] = dtype.type(8)
A[-1,-2] = dtype.type(7)
A[0, 1] = dtype.type(5)
if dtype != np.bool_:
assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]])
for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]:
assert_raises(IndexError, A.__setitem__, ij, 123.0)
for v in [[1,2,3], array([1,2,3])]:
assert_raises(ValueError, A.__setitem__, (0,0), v)
if (not np.issubdtype(dtype, np.complexfloating) and
dtype != np.bool_):
for v in [3j]:
assert_raises(TypeError, A.__setitem__, (0,0), v)
for dtype in supported_dtypes:
check(np.dtype(dtype))
def test_negative_index_assignment(self):
# Regression test for github issue 4428.
def check(dtype):
A = self.spmatrix((3, 10), dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
A[0, -4] = 1
assert_equal(A[0, -4], 1)
for dtype in self.math_dtypes:
check(np.dtype(dtype))
def test_scalar_assign_2(self):
n, m = (5, 10)
def _test_set(i, j, nitems):
msg = "%r ; %r ; %r" % (i, j, nitems)
A = self.spmatrix((n, m))
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
A[i, j] = 1
assert_almost_equal(A.sum(), nitems, err_msg=msg)
assert_almost_equal(A[i, j], 1, err_msg=msg)
# [i,j]
for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)),
(array(-1), array(-2))]:
_test_set(i, j, 1)
def test_index_scalar_assign(self):
A = self.spmatrix((5, 5))
B = np.zeros((5, 5))
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
for C in [A, B]:
C[0,1] = 1
C[3,0] = 4
C[3,0] = 9
assert_array_equal(A.toarray(), B)
class _TestSolve(object):
def test_solve(self):
# Test whether the lu_solve command segfaults, as reported by Nils
# Wagner for a 64-bit machine, 02 March 2005 (EJS)
n = 20
np.random.seed(0) # make tests repeatable
A = zeros((n,n), dtype=complex)
x = np.random.rand(n)
y = np.random.rand(n-1)+1j*np.random.rand(n-1)
r = np.random.rand(n)
for i in range(len(x)):
A[i,i] = x[i]
for i in range(len(y)):
A[i,i+1] = y[i]
A[i+1,i] = conjugate(y[i])
A = self.spmatrix(A)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format")
x = splu(A).solve(r)
assert_almost_equal(A*x,r)
class _TestSlicing(object):
def test_dtype_preservation(self):
assert_equal(self.spmatrix((1,10), dtype=np.int16)[0,1:5].dtype, np.int16)
assert_equal(self.spmatrix((1,10), dtype=np.int32)[0,1:5].dtype, np.int32)
assert_equal(self.spmatrix((1,10), dtype=np.float32)[0,1:5].dtype, np.float32)
assert_equal(self.spmatrix((1,10), dtype=np.float64)[0,1:5].dtype, np.float64)
def test_get_horiz_slice(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[1,:], A[1,:].todense())
assert_array_equal(B[1,2:5], A[1,2:5].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1, 1:3], D[1, 1:3].todense())
# Now test slicing when a row contains only zeros
E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1, 1:3], F[1, 1:3].todense())
assert_array_equal(E[2, -2:], F[2, -2:].A)
# The following should raise exceptions:
assert_raises(IndexError, A.__getitem__, (slice(None), 11))
assert_raises(IndexError, A.__getitem__, (6, slice(3, 7)))
def test_get_vert_slice(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[2:5,0], A[2:5,0].todense())
assert_array_equal(B[:,1], A[:,1].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1:3, 1], D[1:3, 1].todense())
assert_array_equal(C[:, 2], D[:, 2].todense())
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[:, 1], F[:, 1].todense())
assert_array_equal(E[-2:, 2], F[-2:, 2].todense())
# The following should raise exceptions:
assert_raises(IndexError, A.__getitem__, (slice(None), 11))
assert_raises(IndexError, A.__getitem__, (6, slice(3, 7)))
def test_get_slices(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3])
assert_array_equal(A[1:,:-1].todense(), B[1:,:-1])
assert_array_equal(A[:-1,1:].todense(), B[:-1,1:])
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].todense())
assert_array_equal(E[:, 1:], F[:, 1:].todense())
def test_non_unit_stride_2d_indexing(self):
# Regression test -- used to silently ignore the stride.
v0 = np.random.rand(50, 50)
try:
v = self.spmatrix(v0)[0:25:2, 2:30:3]
except ValueError:
# if unsupported
raise pytest.skip("feature not implemented")
assert_array_equal(v.todense(),
v0[0:25:2, 2:30:3])
def test_slicing_2(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
# [i,j]
assert_equal(A[2,3], B[2,3])
assert_equal(A[-1,8], B[-1,8])
assert_equal(A[-1,-2],B[-1,-2])
assert_equal(A[array(-1),-2],B[-1,-2])
assert_equal(A[-1,array(-2)],B[-1,-2])
assert_equal(A[array(-1),array(-2)],B[-1,-2])
# [i,1:2]
assert_equal(A[2,:].todense(), B[2,:])
assert_equal(A[2,5:-2].todense(),B[2,5:-2])
assert_equal(A[array(2),5:-2].todense(),B[2,5:-2])
# [1:2,j]
assert_equal(A[:,2].todense(), B[:,2])
assert_equal(A[3:4,9].todense(), B[3:4,9])
assert_equal(A[1:4,-5].todense(),B[1:4,-5])
assert_equal(A[2:-1,3].todense(),B[2:-1,3])
assert_equal(A[2:-1,array(3)].todense(),B[2:-1,3])
# [1:2,1:2]
assert_equal(A[1:2,1:2].todense(),B[1:2,1:2])
assert_equal(A[4:,3:].todense(), B[4:,3:])
assert_equal(A[:4,:5].todense(), B[:4,:5])
assert_equal(A[2:-1,:5].todense(),B[2:-1,:5])
# [i]
assert_equal(A[1,:].todense(), B[1,:])
assert_equal(A[-2,:].todense(),B[-2,:])
assert_equal(A[array(-2),:].todense(),B[-2,:])
# [1:2]
assert_equal(A[1:4].todense(), B[1:4])
assert_equal(A[1:-2].todense(),B[1:-2])
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
def test_slicing_3(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
array(-1), np.int8(-3)]
def check_1(a):
x = A[a]
y = B[a]
if y.shape == ():
assert_equal(x, y, repr(a))
else:
if x.size == 0 and y.size == 0:
pass
else:
assert_array_equal(x.todense(), y, repr(a))
for j, a in enumerate(slices):
check_1(a)
def check_2(a, b):
# Indexing np.matrix with 0-d arrays seems to be broken,
# as they seem not to be treated as scalars.
# https://github.com/numpy/numpy/issues/3110
if isinstance(a, np.ndarray):
ai = int(a)
else:
ai = a
if isinstance(b, np.ndarray):
bi = int(b)
else:
bi = b
x = A[a, b]
y = B[ai, bi]
if y.shape == ():
assert_equal(x, y, repr((a, b)))
else:
if x.size == 0 and y.size == 0:
pass
else:
assert_array_equal(x.todense(), y, repr((a, b)))
for i, a in enumerate(slices):
for j, b in enumerate(slices):
check_2(a, b)
def test_ellipsis_slicing(self):
b = asmatrix(arange(50).reshape(5,10))
a = self.spmatrix(b)
assert_array_equal(a[...].A, b[...].A)
assert_array_equal(a[...,].A, b[...,].A)
assert_array_equal(a[1, ...].A, b[1, ...].A)
assert_array_equal(a[..., 1].A, b[..., 1].A)
assert_array_equal(a[1:, ...].A, b[1:, ...].A)
assert_array_equal(a[..., 1:].A, b[..., 1:].A)
assert_array_equal(a[1:, 1, ...].A, b[1:, 1, ...].A)
assert_array_equal(a[1, ..., 1:].A, b[1, ..., 1:].A)
# These return ints
assert_equal(a[1, 1, ...], b[1, 1, ...])
assert_equal(a[1, ..., 1], b[1, ..., 1])
@pytest.mark.skipif(NumpyVersion(np.__version__) >= '1.9.0.dev', reason="")
def test_multiple_ellipsis_slicing(self):
b = asmatrix(arange(50).reshape(5,10))
a = self.spmatrix(b)
assert_array_equal(a[..., ...].A, b[..., ...].A)
assert_array_equal(a[..., ..., ...].A, b[..., ..., ...].A)
assert_array_equal(a[1, ..., ...].A, b[1, ..., ...].A)
assert_array_equal(a[1:, ..., ...].A, b[1:, ..., ...].A)
assert_array_equal(a[..., ..., 1:].A, b[..., ..., 1:].A)
# Bug in NumPy's slicing
assert_array_equal(a[..., ..., 1].A, b[..., ..., 1].A.reshape((5,1)))
class _TestSlicingAssign(object):
def test_slice_scalar_assign(self):
A = self.spmatrix((5, 5))
B = np.zeros((5, 5))
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
for C in [A, B]:
C[0:1,1] = 1
C[3:0,0] = 4
C[3:4,0] = 9
C[0,4:] = 1
C[3::-1,4:] = 9
assert_array_equal(A.toarray(), B)
def test_slice_assign_2(self):
n, m = (5, 10)
def _test_set(i, j):
msg = "i=%r; j=%r" % (i, j)
A = self.spmatrix((n, m))
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
A[i, j] = 1
B = np.zeros((n, m))
B[i, j] = 1
assert_array_almost_equal(A.todense(), B, err_msg=msg)
# [i,1:2]
for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)),
(array(2), slice(5, -2))]:
_test_set(i, j)
def test_self_self_assignment(self):
# Tests whether a row of one lil_matrix can be assigned to
# another.
B = self.spmatrix((4,3))
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
B[0,0] = 2
B[1,2] = 7
B[2,1] = 3
B[3,0] = 10
A = B / 10
B[0,:] = A[0,:]
assert_array_equal(A[0,:].A, B[0,:].A)
A = B / 10
B[:,:] = A[:1,:1]
assert_array_equal(np.zeros((4,3)) + A[0,0], B.A)
A = B / 10
B[:-1,0] = A[0,:].T
assert_array_equal(A[0,:].A.T, B[:-1,0].A)
def test_slice_assignment(self):
B = self.spmatrix((4,3))
expected = array([[10,0,0],
[0,0,6],
[0,14,0],
[0,0,0]])
block = [[1,0],[0,4]]
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
B[:,:] = B+B
assert_array_equal(B.todense(),expected)
B[:2,:2] = csc_matrix(array(block))
assert_array_equal(B.todense()[:2,:2],block)
def test_sparsity_modifying_assignment(self):
B = self.spmatrix((4,3))
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
B[3,0] = 10
B[:3] = csr_matrix(np.eye(3))
expected = array([[1,0,0],[0,1,0],[0,0,1],[10,0,0]])
assert_array_equal(B.toarray(), expected)
def test_set_slice(self):
A = self.spmatrix((5,10))
B = matrix(zeros((5,10), float))
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
array(-1), np.int8(-3)]
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
for j, a in enumerate(slices):
A[a] = j
B[a] = j
assert_array_equal(A.todense(), B, repr(a))
for i, a in enumerate(slices):
for j, b in enumerate(slices):
A[a,b] = 10*i + 1000*(j+1)
B[a,b] = 10*i + 1000*(j+1)
assert_array_equal(A.todense(), B, repr((a, b)))
A[0, 1:10:2] = xrange(1,10,2)
B[0, 1:10:2] = xrange(1,10,2)
assert_array_equal(A.todense(), B)
A[1:5:2,0] = np.array(range(1,5,2))[:,None]
B[1:5:2,0] = np.array(range(1,5,2))[:,None]
assert_array_equal(A.todense(), B)
# The next commands should raise exceptions
assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100)))
assert_raises(ValueError, A.__setitem__, (0, 0), arange(100))
assert_raises(ValueError, A.__setitem__, (0, slice(None)),
list(range(100)))
assert_raises(ValueError, A.__setitem__, (slice(None), 1),
list(range(100)))
assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy())
assert_raises(ValueError, A.__setitem__,
([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4])
assert_raises(ValueError, A.__setitem__,
([[1, 2, 3], [0, 3, 4], [4, 1, 3]],
[[1, 2, 4], [0, 1, 3]]), [2, 3, 4])
class _TestFancyIndexing(object):
"""Tests fancy indexing features. The tests for any matrix formats
that implement these features should derive from this class.
"""
def test_bad_index(self):
A = self.spmatrix(np.zeros([5, 5]))
assert_raises((IndexError, ValueError, TypeError), A.__getitem__, "foo")
assert_raises((IndexError, ValueError, TypeError), A.__getitem__, (2, "foo"))
assert_raises((IndexError, ValueError), A.__getitem__,
([1, 2, 3], [1, 2, 3, 4]))
def test_fancy_indexing(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
# [i]
assert_equal(A[[1,3]].todense(), B[[1,3]])
# [i,[1,2]]
assert_equal(A[3,[1,3]].todense(), B[3,[1,3]])
assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[-1,array([2,-5])].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),array([2,-5])].todense(),B[-1,[2,-5]])
# [1:2,[1,2]]
assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]])
assert_equal(A[3:4,[9]].todense(), B[3:4,[9]])
assert_equal(A[1:4,[-1,-5]].todense(), B[1:4,[-1,-5]])
assert_equal(A[1:4,array([-1,-5])].todense(), B[1:4,[-1,-5]])
# [[1,2],j]
assert_equal(A[[1,3],3].todense(), B[[1,3],3])
assert_equal(A[[2,-5],-4].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),-4].todense(), B[[2,-5],-4])
assert_equal(A[[2,-5],array(-4)].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),array(-4)].todense(), B[[2,-5],-4])
# [[1,2],1:2]
assert_equal(A[[1,3],:].todense(), B[[1,3],:])
assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1])
assert_equal(A[array([2,-5]),8:-1].todense(),B[[2,-5],8:-1])
# [[1,2],[1,2]]
assert_equal(todense(A[[1,3],[2,4]]), B[[1,3],[2,4]])
assert_equal(todense(A[[-1,-3],[2,-4]]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[array([-1,-3]),[2,-4]]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[[-1,-3],array([2,-4])]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[array([-1,-3]),array([2,-4])]), B[[-1,-3],[2,-4]])
# [[[1],[2]],[1,2]]
assert_equal(A[[[1],[3]],[2,4]].todense(), B[[[1],[3]],[2,4]])
assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[[[-1],[-3],[-2]],array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
# [[1,2]]
assert_equal(A[[1,3]].todense(), B[[1,3]])
assert_equal(A[[-1,-3]].todense(),B[[-1,-3]])
assert_equal(A[array([-1,-3])].todense(),B[[-1,-3]])
# [[1,2],:][:,[1,2]]
assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]])
assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]])
assert_equal(A[array([-1,-3]),:][:,array([2,-4])].todense(), B[[-1,-3],:][:,[2,-4]])
# [:,[1,2]][[1,2],:]
assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:])
assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:])
assert_equal(A[:,array([-1,-3])][array([2,-4]),:].todense(), B[:,[-1,-3]][[2,-4],:])
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
# Regression for gh-4917: index with tuple of 2D arrays
i = np.array([[1]], dtype=int)
assert_equal(A[i,i].todense(), B[i,i])
# Regression for gh-4917: index with tuple of empty nested lists
assert_equal(A[[[]], [[]]].todense(), B[[[]], [[]]])
def test_fancy_indexing_randomized(self):
np.random.seed(1234) # make runs repeatable
NUM_SAMPLES = 50
M = 6
N = 4
D = np.asmatrix(np.random.rand(M,N))
D = np.multiply(D, D > 0.5)
I = np.random.randint(-M + 1, M, size=NUM_SAMPLES)
J = np.random.randint(-N + 1, N, size=NUM_SAMPLES)
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
I_bad = I + M
J_bad = J - N
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
def test_fancy_indexing_boolean(self):
np.random.seed(1234) # make runs repeatable
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
I = np.array(np.random.randint(0, 2, size=5), dtype=bool)
J = np.array(np.random.randint(0, 2, size=10), dtype=bool)
X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool)
assert_equal(todense(A[I]), B[I])
assert_equal(todense(A[:,J]), B[:, J])
assert_equal(todense(A[X]), B[X])
assert_equal(todense(A[B > 9]), B[B > 9])
I = np.array([True, False, True, True, False])
J = np.array([False, True, True, False, True,
False, False, False, False, False])
assert_equal(todense(A[I, J]), B[I, J])
Z1 = np.zeros((6, 11), dtype=bool)
Z2 = np.zeros((6, 11), dtype=bool)
Z2[0,-1] = True
Z3 = np.zeros((6, 11), dtype=bool)
Z3[-1,0] = True
assert_equal(A[Z1], np.array([]))
assert_raises(IndexError, A.__getitem__, Z2)
assert_raises(IndexError, A.__getitem__, Z3)
assert_raises((IndexError, ValueError), A.__getitem__, (X, 1))
def test_fancy_indexing_sparse_boolean(self):
np.random.seed(1234) # make runs repeatable
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool)
Xsp = csr_matrix(X)
assert_equal(todense(A[Xsp]), B[X])
assert_equal(todense(A[A > 9]), B[B > 9])
Z = np.array(np.random.randint(0, 2, size=(5, 11)), dtype=bool)
Y = np.array(np.random.randint(0, 2, size=(6, 10)), dtype=bool)
Zsp = csr_matrix(Z)
Ysp = csr_matrix(Y)
assert_raises(IndexError, A.__getitem__, Zsp)
assert_raises(IndexError, A.__getitem__, Ysp)
assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1))
def test_fancy_indexing_regression_3087(self):
mat = self.spmatrix(array([[1, 0, 0], [0,1,0], [1,0,0]]))
desired_cols = np.ravel(mat.sum(0)) > 0
assert_equal(mat[:, desired_cols].A, [[1, 0], [0, 1], [1, 0]])
def test_fancy_indexing_seq_assign(self):
mat = self.spmatrix(array([[1, 0], [0, 1]]))
assert_raises(ValueError, mat.__setitem__, (0, 0), np.array([1,2]))
def test_fancy_indexing_empty(self):
B = asmatrix(arange(50).reshape(5,10))
B[1,:] = 0
B[:,2] = 0
B[3,6] = 0
A = self.spmatrix(B)
K = np.array([False, False, False, False, False])
assert_equal(todense(A[K]), B[K])
K = np.array([], dtype=int)
assert_equal(todense(A[K]), B[K])
assert_equal(todense(A[K,K]), B[K,K])
J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None]
assert_equal(todense(A[K,J]), B[K,J])
assert_equal(todense(A[J,K]), B[J,K])
@contextlib.contextmanager
def check_remains_sorted(X):
"""Checks that sorted indices property is retained through an operation
"""
if not hasattr(X, 'has_sorted_indices') or not X.has_sorted_indices:
yield
return
yield
indices = X.indices.copy()
X.has_sorted_indices = False
X.sort_indices()
assert_array_equal(indices, X.indices,
'Expected sorted indices, found unsorted')
class _TestFancyIndexingAssign(object):
def test_bad_index_assign(self):
A = self.spmatrix(np.zeros([5, 5]))
assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2)
assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5)
def test_fancy_indexing_set(self):
n, m = (5, 10)
def _test_set_slice(i, j):
A = self.spmatrix((n, m))
B = asmatrix(np.zeros((n, m)))
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
B[i, j] = 1
with check_remains_sorted(A):
A[i, j] = 1
assert_array_almost_equal(A.todense(), B)
# [1:2,1:2]
for i, j in [((2, 3, 4), slice(None, 10, 4)),
(np.arange(3), slice(5, -2)),
(slice(2, 5), slice(5, -2))]:
_test_set_slice(i, j)
for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]:
_test_set_slice(i, j)
def test_fancy_assignment_dtypes(self):
def check(dtype):
A = self.spmatrix((5, 5), dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
A[[0,1],[0,1]] = dtype.type(1)
assert_equal(A.sum(), dtype.type(1)*2)
A[0:2,0:2] = dtype.type(1.0)
assert_equal(A.sum(), dtype.type(1)*4)
A[2,2] = dtype.type(1.0)
assert_equal(A.sum(), dtype.type(1)*4 + dtype.type(1))
for dtype in supported_dtypes:
check(np.dtype(dtype))
def test_sequence_assignment(self):
A = self.spmatrix((4,3))
B = self.spmatrix(eye(3,4))
i0 = [0,1,2]
i1 = (0,1,2)
i2 = array(i0)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
with check_remains_sorted(A):
A[0,i0] = B[i0,0].T
A[1,i1] = B[i1,1].T
A[2,i2] = B[i2,2].T
assert_array_equal(A.todense(),B.T.todense())
# column slice
A = self.spmatrix((2,3))
with check_remains_sorted(A):
A[1,1:3] = [10,20]
assert_array_equal(A.todense(), [[0,0,0],[0,10,20]])
# row slice
A = self.spmatrix((3,2))
with check_remains_sorted(A):
A[1:3,1] = [[10],[20]]
assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]])
# both slices
A = self.spmatrix((3,3))
B = asmatrix(np.zeros((3,3)))
with check_remains_sorted(A):
for C in [A, B]:
C[[0,1,2], [0,1,2]] = [4,5,6]
assert_array_equal(A.toarray(), B)
# both slices (2)
A = self.spmatrix((4, 3))
with check_remains_sorted(A):
A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3]
assert_almost_equal(A.sum(), 6)
B = asmatrix(np.zeros((4, 3)))
B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3]
assert_array_equal(A.todense(), B)
def test_fancy_assign_empty(self):
B = asmatrix(arange(50).reshape(5,10))
B[1,:] = 0
B[:,2] = 0
B[3,6] = 0
A = self.spmatrix(B)
K = np.array([False, False, False, False, False])
A[K] = 42
assert_equal(todense(A), B)
K = np.array([], dtype=int)
A[K] = 42
assert_equal(todense(A), B)
A[K,K] = 42
assert_equal(todense(A), B)
J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None]
A[K,J] = 42
assert_equal(todense(A), B)
A[J,K] = 42
assert_equal(todense(A), B)
class _TestFancyMultidim(object):
def test_fancy_indexing_ndarray(self):
sets = [
(np.array([[1], [2], [3]]), np.array([3, 4, 2])),
(np.array([[1], [2], [3]]), np.array([[3, 4, 2]])),
(np.array([[1, 2, 3]]), np.array([[3], [4], [2]])),
(np.array([1, 2, 3]), np.array([[3], [4], [2]])),
(np.array([[1, 2, 3], [3, 4, 2]]),
np.array([[5, 6, 3], [2, 3, 1]]))
]
# These inputs generate 3-D outputs
# (np.array([[[1], [2], [3]], [[3], [4], [2]]]),
# np.array([[[5], [6], [3]], [[2], [3], [1]]])),
for I, J in sets:
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
I_bad = I + 5
J_bad = J + 7
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
# This would generate 3-D arrays -- not supported
assert_raises(IndexError, S.__getitem__, ([I, I], slice(None)))
assert_raises(IndexError, S.__getitem__, (slice(None), [J, J]))
class _TestFancyMultidimAssign(object):
def test_fancy_assign_ndarray(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
X = np.random.rand(2, 3)
I = np.array([[1, 2, 3], [3, 4, 2]])
J = np.array([[5, 6, 3], [2, 3, 1]])
with check_remains_sorted(S):
S[I,J] = X
D[I,J] = X
assert_equal(S.todense(), D)
I_bad = I + 5
J_bad = J + 7
C = [1, 2, 3]
with check_remains_sorted(S):
S[I,J] = C
D[I,J] = C
assert_equal(S.todense(), D)
with check_remains_sorted(S):
S[I,J] = 3
D[I,J] = 3
assert_equal(S.todense(), D)
assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
def test_fancy_indexing_multidim_set(self):
n, m = (5, 10)
def _test_set_slice(i, j):
A = self.spmatrix((n, m))
with check_remains_sorted(A), suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
A[i, j] = 1
B = asmatrix(np.zeros((n, m)))
B[i, j] = 1
assert_array_almost_equal(A.todense(), B)
# [[[1, 2], [1, 2]], [1, 2]]
for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]),
(np.array([0, 4]), [[0, 3], [1, 2]]),
([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]:
_test_set_slice(i, j)
def test_fancy_assign_list(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
X = np.random.rand(2, 3)
I = [[1, 2, 3], [3, 4, 2]]
J = [[5, 6, 3], [2, 3, 1]]
S[I,J] = X
D[I,J] = X
assert_equal(S.todense(), D)
I_bad = [[ii + 5 for ii in i] for i in I]
J_bad = [[jj + 7 for jj in j] for j in J]
C = [1, 2, 3]
S[I,J] = C
D[I,J] = C
assert_equal(S.todense(), D)
S[I,J] = 3
D[I,J] = 3
assert_equal(S.todense(), D)
assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
def test_fancy_assign_slice(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
I = [[1, 2, 3], [3, 4, 2]]
J = [[5, 6, 3], [2, 3, 1]]
I_bad = [[ii + 5 for ii in i] for i in I]
J_bad = [[jj + 7 for jj in j] for j in J]
C = [1, 2, 3, 4, 5, 6, 7]
assert_raises(IndexError, S.__setitem__, (I_bad, slice(None)), C)
assert_raises(IndexError, S.__setitem__, (slice(None), J_bad), C)
class _TestArithmetic(object):
"""
Test real/complex arithmetic
"""
def __arith_init(self):
# these can be represented exactly in FP (so arithmetic should be exact)
self.__A = matrix([[-1.5, 6.5, 0, 2.25, 0, 0],
[3.125, -7.875, 0.625, 0, 0, 0],
[0, 0, -0.125, 1.0, 0, 0],
[0, 0, 8.375, 0, 0, 0]],'float64')
self.__B = matrix([[0.375, 0, 0, 0, -5, 2.5],
[14.25, -3.75, 0, 0, -0.125, 0],
[0, 7.25, 0, 0, 0, 0],
[18.5, -0.0625, 0, 0, 0, 0]],'complex128')
self.__B.imag = matrix([[1.25, 0, 0, 0, 6, -3.875],
[2.25, 4.125, 0, 0, 0, 2.75],
[0, 4.125, 0, 0, 0, 0],
[-0.0625, 0, 0, 0, 0, 0]],'float64')
# fractions are all x/16ths
assert_array_equal((self.__A*16).astype('int32'),16*self.__A)
assert_array_equal((self.__B.real*16).astype('int32'),16*self.__B.real)
assert_array_equal((self.__B.imag*16).astype('int32'),16*self.__B.imag)
self.__Asp = self.spmatrix(self.__A)
self.__Bsp = self.spmatrix(self.__B)
def test_add_sub(self):
self.__arith_init()
# basic tests
assert_array_equal((self.__Asp+self.__Bsp).todense(),self.__A+self.__B)
# check conversions
for x in supported_dtypes:
A = self.__A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
if not np.issubdtype(y, np.complexfloating):
B = self.__B.real.astype(y)
else:
B = self.__B.astype(y)
Bsp = self.spmatrix(B)
# addition
D1 = A + B
S1 = Asp + Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp + B,D1) # check sparse + dense
assert_array_equal(A + Bsp,D1) # check dense + sparse
# subtraction
if (np.dtype('bool') in [x, y]) and (
NumpyVersion(np.__version__) >= '1.9.0.dev'):
# boolean array subtraction deprecated in 1.9.0
continue
D1 = A - B
S1 = Asp - Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp - B,D1) # check sparse - dense
assert_array_equal(A - Bsp,D1) # check dense - sparse
def test_mu(self):
self.__arith_init()
# basic tests
assert_array_equal((self.__Asp*self.__Bsp.T).todense(),self.__A*self.__B.T)
for x in supported_dtypes:
A = self.__A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
if np.issubdtype(y, np.complexfloating):
B = self.__B.astype(y)
else:
B = self.__B.real.astype(y)
Bsp = self.spmatrix(B)
D1 = A * B.T
S1 = Asp * Bsp.T
assert_allclose(S1.todense(), D1,
atol=1e-14*abs(D1).max())
assert_equal(S1.dtype,D1.dtype)
class _TestMinMax(object):
def test_minmax(self):
for dtype in [np.float32, np.float64, np.int32, np.int64, np.complex128]:
D = np.arange(20, dtype=dtype).reshape(5,4)
X = self.spmatrix(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 19)
assert_equal(X.min().dtype, dtype)
assert_equal(X.max().dtype, dtype)
D *= -1
X = self.spmatrix(D)
assert_equal(X.min(), -19)
assert_equal(X.max(), 0)
D += 5
X = self.spmatrix(D)
assert_equal(X.min(), -14)
assert_equal(X.max(), 5)
# try a fully dense matrix
X = self.spmatrix(np.arange(1, 10).reshape(3, 3))
assert_equal(X.min(), 1)
assert_equal(X.min().dtype, X.dtype)
X = -X
assert_equal(X.max(), -1)
# and a fully sparse matrix
Z = self.spmatrix(np.zeros(1))
assert_equal(Z.min(), 0)
assert_equal(Z.max(), 0)
assert_equal(Z.max().dtype, Z.dtype)
# another test
D = np.arange(20, dtype=float).reshape(5,4)
D[0:2, :] = 0
X = self.spmatrix(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 19)
# zero-size matrices
for D in [np.zeros((0, 0)), np.zeros((0, 10)), np.zeros((10, 0))]:
X = self.spmatrix(D)
assert_raises(ValueError, X.min)
assert_raises(ValueError, X.max)
def test_minmax_axis(self):
D = np.matrix(np.arange(50).reshape(5,10))
# completely empty rows, leaving some completely full:
D[1, :] = 0
# empty at end for reduceat:
D[:, 9] = 0
# partial rows/cols:
D[3, 3] = 0
# entries on either side of 0:
D[2, 2] = -1
X = self.spmatrix(D)
axes = [-2, -1, 0, 1]
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
# full matrix
D = np.matrix(np.arange(1, 51).reshape(10, 5))
X = self.spmatrix(D)
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
# empty matrix
D = np.matrix(np.zeros((10, 5)))
X = self.spmatrix(D)
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
axes_even = [0, -2]
axes_odd = [1, -1]
# zero-size matrices
D = np.zeros((0, 10))
X = self.spmatrix(D)
for axis in axes_even:
assert_raises(ValueError, X.min, axis=axis)
assert_raises(ValueError, X.max, axis=axis)
for axis in axes_odd:
assert_array_equal(np.zeros((0, 1)), X.min(axis=axis).A)
assert_array_equal(np.zeros((0, 1)), X.max(axis=axis).A)
D = np.zeros((10, 0))
X = self.spmatrix(D)
for axis in axes_odd:
assert_raises(ValueError, X.min, axis=axis)
assert_raises(ValueError, X.max, axis=axis)
for axis in axes_even:
assert_array_equal(np.zeros((1, 0)), X.min(axis=axis).A)
assert_array_equal(np.zeros((1, 0)), X.max(axis=axis).A)
def test_minmax_invalid_params(self):
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
for fname in ('min', 'max'):
func = getattr(datsp, fname)
assert_raises(ValueError, func, axis=3)
assert_raises(TypeError, func, axis=(0, 1))
assert_raises(TypeError, func, axis=1.5)
assert_raises(ValueError, func, axis=1, out=1)
def test_numpy_minmax(self):
# See gh-5987
# xref gh-7460 in 'numpy'
from scipy.sparse import data
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spmatrix(dat)
# We are only testing sparse matrices who have
# implemented 'min' and 'max' because they are
# the ones with the compatibility issues with
# the 'numpy' implementation.
if isinstance(datsp, data._minmax_mixin):
assert_array_equal(np.min(datsp), np.min(dat))
assert_array_equal(np.max(datsp), np.max(dat))
def test_argmax(self):
D1 = np.array([
[-1, 5, 2, 3],
[0, 0, -1, -2],
[-1, -2, -3, -4],
[1, 2, 3, 4],
[1, 2, 0, 0],
])
D2 = D1.transpose()
for D in [D1, D2]:
mat = csr_matrix(D)
assert_equal(mat.argmax(), np.argmax(D))
assert_equal(mat.argmin(), np.argmin(D))
assert_equal(mat.argmax(axis=0),
np.asmatrix(np.argmax(D, axis=0)))
assert_equal(mat.argmin(axis=0),
np.asmatrix(np.argmin(D, axis=0)))
assert_equal(mat.argmax(axis=1),
np.asmatrix(np.argmax(D, axis=1).reshape(-1, 1)))
assert_equal(mat.argmin(axis=1),
np.asmatrix(np.argmin(D, axis=1).reshape(-1, 1)))
D1 = np.empty((0, 5))
D2 = np.empty((5, 0))
for axis in [None, 0]:
mat = self.spmatrix(D1)
assert_raises(ValueError, mat.argmax, axis=axis)
assert_raises(ValueError, mat.argmin, axis=axis)
for axis in [None, 1]:
mat = self.spmatrix(D2)
assert_raises(ValueError, mat.argmax, axis=axis)
assert_raises(ValueError, mat.argmin, axis=axis)
class _TestGetNnzAxis(object):
def test_getnnz_axis(self):
dat = np.matrix([[0, 2],
[3, 5],
[-6, 9]])
bool_dat = dat.astype(bool).A
datsp = self.spmatrix(dat)
accepted_return_dtypes = (np.int32, np.int64)
assert_array_equal(bool_dat.sum(axis=None), datsp.getnnz(axis=None))
assert_array_equal(bool_dat.sum(), datsp.getnnz())
assert_array_equal(bool_dat.sum(axis=0), datsp.getnnz(axis=0))
assert_in(datsp.getnnz(axis=0).dtype, accepted_return_dtypes)
assert_array_equal(bool_dat.sum(axis=1), datsp.getnnz(axis=1))
assert_in(datsp.getnnz(axis=1).dtype, accepted_return_dtypes)
assert_array_equal(bool_dat.sum(axis=-2), datsp.getnnz(axis=-2))
assert_in(datsp.getnnz(axis=-2).dtype, accepted_return_dtypes)
assert_array_equal(bool_dat.sum(axis=-1), datsp.getnnz(axis=-1))
assert_in(datsp.getnnz(axis=-1).dtype, accepted_return_dtypes)
assert_raises(ValueError, datsp.getnnz, axis=2)
#------------------------------------------------------------------------------
# Tailored base class for generic tests
#------------------------------------------------------------------------------
def _possibly_unimplemented(cls, require=True):
"""
Construct a class that either runs tests as usual (require=True),
or each method skips if it encounters a common error.
"""
if require:
return cls
else:
def wrap(fc):
@functools.wraps(fc)
def wrapper(*a, **kw):
try:
return fc(*a, **kw)
except (NotImplementedError, TypeError, ValueError,
IndexError, AttributeError):
raise pytest.skip("feature not implemented")
return wrapper
new_dict = dict(cls.__dict__)
for name, func in cls.__dict__.items():
if name.startswith('test_'):
new_dict[name] = wrap(func)
return type(cls.__name__ + "NotImplemented",
cls.__bases__,
new_dict)
def sparse_test_class(getset=True, slicing=True, slicing_assign=True,
fancy_indexing=True, fancy_assign=True,
fancy_multidim_indexing=True, fancy_multidim_assign=True,
minmax=True, nnz_axis=True):
"""
Construct a base class, optionally converting some of the tests in
the suite to check that the feature is not implemented.
"""
bases = (_TestCommon,
_possibly_unimplemented(_TestGetSet, getset),
_TestSolve,
_TestInplaceArithmetic,
_TestArithmetic,
_possibly_unimplemented(_TestSlicing, slicing),
_possibly_unimplemented(_TestSlicingAssign, slicing_assign),
_possibly_unimplemented(_TestFancyIndexing, fancy_indexing),
_possibly_unimplemented(_TestFancyIndexingAssign,
fancy_assign),
_possibly_unimplemented(_TestFancyMultidim,
fancy_indexing and fancy_multidim_indexing),
_possibly_unimplemented(_TestFancyMultidimAssign,
fancy_multidim_assign and fancy_assign),
_possibly_unimplemented(_TestMinMax, minmax),
_possibly_unimplemented(_TestGetNnzAxis, nnz_axis))
# check that test names do not clash
names = {}
for cls in bases:
for name in cls.__dict__:
if not name.startswith('test_'):
continue
old_cls = names.get(name)
if old_cls is not None:
raise ValueError("Test class %s overloads test %s defined in %s" % (
cls.__name__, name, old_cls.__name__))
names[name] = cls
return type("TestBase", bases, {})
#------------------------------------------------------------------------------
# Matrix class based tests
#------------------------------------------------------------------------------
class TestCSR(sparse_test_class()):
@classmethod
def spmatrix(cls, *args, **kwargs):
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a csr_matrix is expensive")
return csr_matrix(*args, **kwargs)
math_dtypes = [np.bool_, np.int_, np.float_, np.complex_]
def test_constructor1(self):
b = matrix([[0,4,0],
[3,0,0],
[0,2,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[4,3,2])
assert_array_equal(bsp.indices,[1,0,1])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_equal(bsp.getnnz(),3)
assert_equal(bsp.getformat(),'csr')
assert_array_equal(bsp.todense(),b)
def test_constructor2(self):
b = zeros((6,6),'d')
b[3,4] = 5
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[4])
assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1])
assert_array_almost_equal(bsp.todense(),b)
def test_constructor3(self):
b = matrix([[1,0],
[0,2],
[3,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,3])
assert_array_equal(bsp.indices,[0,1,0])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_array_almost_equal(bsp.todense(),b)
### currently disabled
## def test_constructor4(self):
## """try using int64 indices"""
## data = arange( 6 ) + 1
## col = array( [1, 2, 1, 0, 0, 2], dtype='int64' )
## ptr = array( [0, 2, 4, 6], dtype='int64' )
##
## a = csr_matrix( (data, col, ptr), shape = (3,3) )
##
## b = matrix([[0,1,2],
## [4,3,0],
## [5,0,6]],'d')
##
## assert_equal(a.indptr.dtype,numpy.dtype('int64'))
## assert_equal(a.indices.dtype,numpy.dtype('int64'))
## assert_array_equal(a.todense(),b)
def test_constructor4(self):
# using (data, ij) format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csr = csr_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csr.todense())
def test_constructor5(self):
# infer dimensions from arrays
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csr = csr_matrix((data, indices, indptr))
assert_array_equal(csr.shape,(3,6))
def test_constructor6(self):
# infer dimensions and dtype from lists
indptr = [0, 1, 3, 3]
indices = [0, 5, 1, 2]
data = [1, 2, 3, 4]
csr = csr_matrix((data, indices, indptr))
assert_array_equal(csr.shape, (3,6))
assert_(np.issubdtype(csr.dtype, np.signedinteger))
def test_sort_indices(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csr_matrix(np.arange(20).reshape(4, 5) / 20.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
data = arange(6)
indices = array([8, 1, 5, 7, 2, 4])
indptr = array([0, 2, 6])
bsp = csr_matrix((data, indices, indptr), shape=(2,10))
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
def test_fancy_indexing_broadcast(self):
# broadcasting indexing mode is supported
I = np.array([[1], [2], [3]])
J = np.array([3, 4, 2])
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
def test_has_sorted_indices(self):
"Ensure has_sorted_indices memoizes sorted state for sort_indices"
sorted_inds = np.array([0, 1])
unsorted_inds = np.array([1, 0])
data = np.array([1, 1])
indptr = np.array([0, 2])
M = csr_matrix((data, sorted_inds, indptr)).copy()
assert_equal(True, M.has_sorted_indices)
M = csr_matrix((data, unsorted_inds, indptr)).copy()
assert_equal(False, M.has_sorted_indices)
# set by sorting
M.sort_indices()
assert_equal(True, M.has_sorted_indices)
assert_array_equal(M.indices, sorted_inds)
M = csr_matrix((data, unsorted_inds, indptr)).copy()
# set manually (although underlyingly unsorted)
M.has_sorted_indices = True
assert_equal(True, M.has_sorted_indices)
assert_array_equal(M.indices, unsorted_inds)
# ensure sort bypassed when has_sorted_indices == True
M.sort_indices()
assert_array_equal(M.indices, unsorted_inds)
def test_has_canonical_format(self):
"Ensure has_canonical_format memoizes state for sum_duplicates"
M = csr_matrix((np.array([2]), np.array([0]), np.array([0, 1])))
assert_equal(True, M.has_canonical_format)
indices = np.array([0, 0]) # contains duplicate
data = np.array([1, 1])
indptr = np.array([0, 2])
M = csr_matrix((data, indices, indptr)).copy()
assert_equal(False, M.has_canonical_format)
# set by deduplicating
M.sum_duplicates()
assert_equal(True, M.has_canonical_format)
assert_equal(1, len(M.indices))
M = csr_matrix((data, indices, indptr)).copy()
# set manually (although underlyingly duplicated)
M.has_canonical_format = True
assert_equal(True, M.has_canonical_format)
assert_equal(2, len(M.indices)) # unaffected content
# ensure deduplication bypassed when has_canonical_format == True
M.sum_duplicates()
assert_equal(2, len(M.indices)) # unaffected content
def test_scalar_idx_dtype(self):
# Check that index dtype takes into account all parameters
# passed to sparsetools, including the scalar ones
indptr = np.zeros(2, dtype=np.int32)
indices = np.zeros(0, dtype=np.int32)
vals = np.zeros(0)
a = csr_matrix((vals, indices, indptr), shape=(1, 2**31-1))
b = csr_matrix((vals, indices, indptr), shape=(1, 2**31))
ij = np.zeros((2, 0), dtype=np.int32)
c = csr_matrix((vals, ij), shape=(1, 2**31-1))
d = csr_matrix((vals, ij), shape=(1, 2**31))
e = csr_matrix((1, 2**31-1))
f = csr_matrix((1, 2**31))
assert_equal(a.indptr.dtype, np.int32)
assert_equal(b.indptr.dtype, np.int64)
assert_equal(c.indptr.dtype, np.int32)
assert_equal(d.indptr.dtype, np.int64)
assert_equal(e.indptr.dtype, np.int32)
assert_equal(f.indptr.dtype, np.int64)
# These shouldn't fail
for x in [a, b, c, d, e, f]:
x + x
TestCSR.init_class()
class TestCSC(sparse_test_class()):
@classmethod
def spmatrix(cls, *args, **kwargs):
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a csc_matrix is expensive")
return csc_matrix(*args, **kwargs)
math_dtypes = [np.bool_, np.int_, np.float_, np.complex_]
def test_constructor1(self):
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,1,3])
assert_array_equal(bsp.indices,[0,2,1,2])
assert_array_equal(bsp.indptr,[0,1,2,3,4])
assert_equal(bsp.getnnz(),4)
assert_equal(bsp.shape,b.shape)
assert_equal(bsp.getformat(),'csc')
def test_constructor2(self):
b = zeros((6,6),'d')
b[2,4] = 5
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[2])
assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1])
def test_constructor3(self):
b = matrix([[1,0],[0,0],[0,2]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2])
assert_array_equal(bsp.indices,[0,2])
assert_array_equal(bsp.indptr,[0,1,2])
def test_constructor4(self):
# using (data, ij) format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csc = csc_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csc.todense())
def test_constructor5(self):
# infer dimensions from arrays
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csc = csc_matrix((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
def test_constructor6(self):
# infer dimensions and dtype from lists
indptr = [0, 1, 3, 3]
indices = [0, 5, 1, 2]
data = [1, 2, 3, 4]
csc = csc_matrix((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
assert_(np.issubdtype(csc.dtype, np.signedinteger))
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = csc_matrix((data, indices, indptr), shape=(10,2))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_sort_indices(self):
data = arange(5)
row = array([7, 2, 1, 5, 4])
ptr = [0, 3, 5]
asp = csc_matrix((data, row, ptr), shape=(10,2))
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csc_matrix(np.arange(21).reshape(7, 3) / 21.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csc_matrix((data, indices, indptr), shape=(10,2))
data = arange(6)
indices = array([8, 1, 5, 7, 2, 4])
indptr = array([0, 2, 6])
bsp = csc_matrix((data, indices, indptr), shape=(10,2))
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
def test_fancy_indexing_broadcast(self):
# broadcasting indexing mode is supported
I = np.array([[1], [2], [3]])
J = np.array([3, 4, 2])
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
def test_scalar_idx_dtype(self):
# Check that index dtype takes into account all parameters
# passed to sparsetools, including the scalar ones
indptr = np.zeros(2, dtype=np.int32)
indices = np.zeros(0, dtype=np.int32)
vals = np.zeros(0)
a = csc_matrix((vals, indices, indptr), shape=(2**31-1, 1))
b = csc_matrix((vals, indices, indptr), shape=(2**31, 1))
ij = np.zeros((2, 0), dtype=np.int32)
c = csc_matrix((vals, ij), shape=(2**31-1, 1))
d = csc_matrix((vals, ij), shape=(2**31, 1))
e = csr_matrix((1, 2**31-1))
f = csr_matrix((1, 2**31))
assert_equal(a.indptr.dtype, np.int32)
assert_equal(b.indptr.dtype, np.int64)
assert_equal(c.indptr.dtype, np.int32)
assert_equal(d.indptr.dtype, np.int64)
assert_equal(e.indptr.dtype, np.int32)
assert_equal(f.indptr.dtype, np.int64)
# These shouldn't fail
for x in [a, b, c, d, e, f]:
x + x
TestCSC.init_class()
class TestDOK(sparse_test_class(minmax=False, nnz_axis=False)):
spmatrix = dok_matrix
math_dtypes = [np.int_, np.float_, np.complex_]
def test_mult(self):
A = dok_matrix((10,10))
A[0,3] = 10
A[5,6] = 20
D = A*A.T
E = A*A.H
assert_array_equal(D.A, E.A)
def test_add_nonzero(self):
A = self.spmatrix((3,2))
A[0,1] = -10
A[2,0] = 20
A = A + 10
B = matrix([[10, 0], [10, 10], [30, 10]])
assert_array_equal(A.todense(), B)
A = A + 1j
B = B + 1j
assert_array_equal(A.todense(), B)
def test_dok_divide_scalar(self):
A = self.spmatrix((3,2))
A[0,1] = -10
A[2,0] = 20
assert_array_equal((A/1j).todense(), A.todense()/1j)
assert_array_equal((A/9).todense(), A.todense()/9)
def test_convert(self):
# Test provided by Andrew Straw. Fails in SciPy <= r1477.
(m, n) = (6, 7)
a = dok_matrix((m, n))
# set a few elements, but none in the last column
a[2,1] = 1
a[0,2] = 2
a[3,1] = 3
a[1,5] = 4
a[4,3] = 5
a[4,2] = 6
# assert that the last column is all zeros
assert_array_equal(a.toarray()[:,n-1], zeros(m,))
# make sure it still works for CSC format
csc = a.tocsc()
assert_array_equal(csc.toarray()[:,n-1], zeros(m,))
# now test CSR
(m, n) = (n, m)
b = a.transpose()
assert_equal(b.shape, (m, n))
# assert that the last row is all zeros
assert_array_equal(b.toarray()[m-1,:], zeros(n,))
# make sure it still works for CSR format
csr = b.tocsr()
assert_array_equal(csr.toarray()[m-1,:], zeros(n,))
def test_ctor(self):
# Empty ctor
assert_raises(TypeError, dok_matrix)
# Dense ctor
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
A = dok_matrix(b)
assert_equal(b.dtype, A.dtype)
assert_equal(A.todense(), b)
# Sparse ctor
c = csr_matrix(b)
assert_equal(A.todense(), c.todense())
data = [[0, 1, 2], [3, 0, 0]]
d = dok_matrix(data, dtype=np.float32)
assert_equal(d.dtype, np.float32)
da = d.toarray()
assert_equal(da.dtype, np.float32)
assert_array_equal(da, data)
def test_ticket1160(self):
# Regression test for ticket #1160.
a = dok_matrix((3,3))
a[0,0] = 0
# This assert would fail, because the above assignment would
# incorrectly call __set_item__ even though the value was 0.
assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys")
# Slice assignments were also affected.
b = dok_matrix((3,3))
b[:,0] = 0
assert_(len(b.keys()) == 0, "Unexpected entries in keys")
TestDOK.init_class()
class TestLIL(sparse_test_class(minmax=False)):
spmatrix = lil_matrix
math_dtypes = [np.int_, np.float_, np.complex_]
def test_dot(self):
A = matrix(zeros((10,10)))
A[0,3] = 10
A[5,6] = 20
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
assert_array_equal(A * A.T, (B * B.T).todense())
assert_array_equal(A * A.H, (B * B.H).todense())
def test_scalar_mul(self):
x = lil_matrix((3,3))
x[0,0] = 2
x = x*2
assert_equal(x[0,0],4)
x = x*0
assert_equal(x[0,0],0)
def test_inplace_ops(self):
A = lil_matrix([[0,2,3],[4,0,6]])
B = lil_matrix([[0,1,0],[0,2,3]])
data = {'add': (B,A + B),
'sub': (B,A - B),
'mul': (3,A * 3)}
for op,(other,expected) in data.items():
result = A.copy()
getattr(result, '__i%s__' % op)(other)
assert_array_equal(result.todense(), expected.todense())
# Ticket 1604.
A = lil_matrix((1,3), dtype=np.dtype('float64'))
B = array([0.1,0.1,0.1])
A[0,:] += B
assert_array_equal(A[0,:].toarray().squeeze(), B)
def test_lil_iteration(self):
row_data = [[1,2,3],[4,5,6]]
B = lil_matrix(array(row_data))
for r,row in enumerate(B):
assert_array_equal(row.todense(),array(row_data[r],ndmin=2))
def test_lil_from_csr(self):
# Tests whether a lil_matrix can be constructed from a
# csr_matrix.
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
B[8,3] = 30
B[3,8] = 40
B[8,9] = 50
C = B.tocsr()
D = lil_matrix(C)
assert_array_equal(C.A, D.A)
def test_fancy_indexing_lil(self):
M = asmatrix(arange(25).reshape(5,5))
A = lil_matrix(M)
assert_equal(A[array([1,2,3]),2:3].todense(), M[array([1,2,3]),2:3])
def test_point_wise_multiply(self):
l = lil_matrix((4,3))
l[0,0] = 1
l[1,1] = 2
l[2,2] = 3
l[3,1] = 4
m = lil_matrix((4,3))
m[0,0] = 1
m[0,1] = 2
m[2,2] = 3
m[3,1] = 4
m[3,2] = 4
assert_array_equal(l.multiply(m).todense(),
m.multiply(l).todense())
assert_array_equal(l.multiply(m).todense(),
[[1,0,0],
[0,0,0],
[0,0,9],
[0,16,0]])
def test_lil_multiply_removal(self):
# Ticket #1427.
a = lil_matrix(np.ones((3,3)))
a *= 2.
a[0, :] = 0
TestLIL.init_class()
class TestCOO(sparse_test_class(getset=False,
slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False)):
spmatrix = coo_matrix
math_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
# unsorted triplet format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
coo = coo_matrix((data,(row,col)),(4,3))
assert_array_equal(arange(12).reshape(4,3),coo.todense())
def test_constructor2(self):
# unsorted triplet format with duplicates (which are summed)
row = array([0,1,2,2,2,2,0,0,2,2])
col = array([0,2,0,2,1,1,1,0,0,2])
data = array([2,9,-4,5,7,0,-1,2,1,-5])
coo = coo_matrix((data,(row,col)),(3,3))
mat = matrix([[4,-1,0],[0,0,9],[-3,7,0]])
assert_array_equal(mat,coo.todense())
def test_constructor3(self):
# empty matrix
coo = coo_matrix((4,3))
assert_array_equal(coo.shape,(4,3))
assert_array_equal(coo.row,[])
assert_array_equal(coo.col,[])
assert_array_equal(coo.data,[])
assert_array_equal(coo.todense(),zeros((4,3)))
def test_constructor4(self):
# from dense matrix
mat = array([[0,1,0,0],
[7,0,3,0],
[0,4,0,0]])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat)
# upgrade rank 1 arrays to row matrix
mat = array([0,1,0,0])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat.reshape(1,-1))
@pytest.mark.xfail(run=False, reason='COO does not have a __getitem__')
def test_iterator(self):
pass
def test_todia_all_zeros(self):
zeros = [[0, 0]]
dia = coo_matrix(zeros).todia()
assert_array_equal(dia.A, zeros)
def test_sum_duplicates(self):
coo = coo_matrix((4,3))
coo.sum_duplicates()
coo = coo_matrix(([1,2], ([1,0], [1,0])))
coo.sum_duplicates()
assert_array_equal(coo.A, [[2,0],[0,1]])
coo = coo_matrix(([1,2], ([1,1], [1,1])))
coo.sum_duplicates()
assert_array_equal(coo.A, [[0,0],[0,3]])
assert_array_equal(coo.row, [1])
assert_array_equal(coo.col, [1])
assert_array_equal(coo.data, [3])
def test_todok_duplicates(self):
coo = coo_matrix(([1,1,1,1], ([0,2,2,0], [0,1,1,0])))
dok = coo.todok()
assert_array_equal(dok.A, coo.A)
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
row = array([0, 0, 0, 1, 1, 1, 1, 1])
col = array([1, 2, 3, 4, 5, 6, 7, 8])
asp = coo_matrix((data, (row, col)), shape=(2,10))
bsp = asp.copy()
asp.eliminate_zeros()
assert_((asp.data != 0).all())
assert_array_equal(asp.A, bsp.A)
def test_reshape_copy(self):
arr = [[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]]
new_shape = (2, 6)
x = coo_matrix(arr)
y = x.reshape(new_shape)
assert_(y.data is x.data)
y = x.reshape(new_shape, copy=False)
assert_(y.data is x.data)
y = x.reshape(new_shape, copy=True)
assert_(not np.may_share_memory(y.data, x.data))
TestCOO.init_class()
class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False,
minmax=False, nnz_axis=False)):
spmatrix = dia_matrix
math_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
D = matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
data = np.array([[1,2,3,4]]).repeat(3,axis=0)
offsets = np.array([0,-1,2])
assert_equal(dia_matrix((data,offsets), shape=(4,4)).todense(), D)
@pytest.mark.xfail(run=False, reason='DIA does not have a __getitem__')
def test_iterator(self):
pass
@with_64bit_maxval_limit(3)
def test_setdiag_dtype(self):
m = dia_matrix(np.eye(3))
assert_equal(m.offsets.dtype, np.int32)
m.setdiag((3,), k=2)
assert_equal(m.offsets.dtype, np.int32)
m = dia_matrix(np.eye(4))
assert_equal(m.offsets.dtype, np.int64)
m.setdiag((3,), k=3)
assert_equal(m.offsets.dtype, np.int64)
@pytest.mark.skip(reason='DIA stores extra zeros')
def test_getnnz_axis(self):
pass
TestDIA.init_class()
class TestBSR(sparse_test_class(getset=False,
slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False,
nnz_axis=False)):
spmatrix = bsr_matrix
math_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
# check native BSR format constructor
indptr = array([0,2,2,4])
indices = array([0,2,2,3])
data = zeros((4,2,3))
data[0] = array([[0, 1, 2],
[3, 0, 5]])
data[1] = array([[0, 2, 4],
[6, 0, 10]])
data[2] = array([[0, 4, 8],
[12, 0, 20]])
data[3] = array([[0, 5, 10],
[15, 0, 25]])
A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
Asp = bsr_matrix((data,indices,indptr),shape=(6,12))
assert_equal(Asp.todense(),A)
# infer shape from arrays
Asp = bsr_matrix((data,indices,indptr))
assert_equal(Asp.todense(),A)
def test_constructor2(self):
# construct from dense
# test zero mats
for shape in [(1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
A = zeros(shape)
assert_equal(bsr_matrix(A).todense(),A)
A = zeros((4,6))
assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
assert_equal(bsr_matrix(A).todense(),A)
assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,6)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A)
A = kron([[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]])
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
def test_constructor3(self):
# construct from coo-like (data,(row,col)) format
arg = ([1,2,3], ([0,1,1], [0,0,1]))
A = array([[1,0],[2,3]])
assert_equal(bsr_matrix(arg, blocksize=(2,2)).todense(), A)
def test_constructor4(self):
# regression test for gh-6292: bsr_matrix((data, indices, indptr)) was
# trying to compare an int to a None
n = 8
data = np.ones((n, n, 1), dtype=np.int8)
indptr = np.array([0, n], dtype=np.int32)
indices = np.arange(n, dtype=np.int32)
bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False)
def test_eliminate_zeros(self):
data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
data = data.reshape(-1,2,2)
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = bsr_matrix((data, indices, indptr), shape=(4,20))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3*4)
assert_array_equal(asp.todense(),bsp.todense())
def test_bsr_matvec(self):
A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
x = arange(A.shape[1]).reshape(-1,1)
assert_equal(A*x, A.todense()*x)
def test_bsr_matvecs(self):
A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
x = arange(A.shape[1]*6).reshape(-1,6)
assert_equal(A*x, A.todense()*x)
@pytest.mark.xfail(run=False, reason='BSR does not have a __getitem__')
def test_iterator(self):
pass
@pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__')
def test_setdiag(self):
pass
def test_resize_blocked(self):
# test resize() with non-(1,1) blocksize
D = np.array([[1, 0, 3, 4],
[2, 0, 0, 0],
[3, 0, 0, 0]])
S = self.spmatrix(D, blocksize=(1, 2))
assert_(S.resize((3, 2)) is None)
assert_array_equal(S.A, [[1, 0],
[2, 0],
[3, 0]])
S.resize((2, 2))
assert_array_equal(S.A, [[1, 0],
[2, 0]])
S.resize((3, 2))
assert_array_equal(S.A, [[1, 0],
[2, 0],
[0, 0]])
S.resize((3, 4))
assert_array_equal(S.A, [[1, 0, 0, 0],
[2, 0, 0, 0],
[0, 0, 0, 0]])
assert_raises(ValueError, S.resize, (2, 3))
@pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__')
def test_setdiag_comprehensive(self):
pass
def test_scalar_idx_dtype(self):
# Check that index dtype takes into account all parameters
# passed to sparsetools, including the scalar ones
indptr = np.zeros(2, dtype=np.int32)
indices = np.zeros(0, dtype=np.int32)
vals = np.zeros((0, 1, 1))
a = bsr_matrix((vals, indices, indptr), shape=(1, 2**31-1))
b = bsr_matrix((vals, indices, indptr), shape=(1, 2**31))
c = bsr_matrix((1, 2**31-1))
d = bsr_matrix((1, 2**31))
assert_equal(a.indptr.dtype, np.int32)
assert_equal(b.indptr.dtype, np.int64)
assert_equal(c.indptr.dtype, np.int32)
assert_equal(d.indptr.dtype, np.int64)
try:
vals2 = np.zeros((0, 1, 2**31-1))
vals3 = np.zeros((0, 1, 2**31))
e = bsr_matrix((vals2, indices, indptr), shape=(1, 2**31-1))
f = bsr_matrix((vals3, indices, indptr), shape=(1, 2**31))
assert_equal(e.indptr.dtype, np.int32)
assert_equal(f.indptr.dtype, np.int64)
except (MemoryError, ValueError):
# May fail on 32-bit Python
e = 0
f = 0
# These shouldn't fail
for x in [a, b, c, d, e, f]:
x + x
TestBSR.init_class()
#------------------------------------------------------------------------------
# Tests for non-canonical representations (with duplicates, unsorted indices)
#------------------------------------------------------------------------------
def _same_sum_duplicate(data, *inds, **kwargs):
"""Duplicates entries to produce the same matrix"""
indptr = kwargs.pop('indptr', None)
if np.issubdtype(data.dtype, np.bool_) or \
np.issubdtype(data.dtype, np.unsignedinteger):
if indptr is None:
return (data,) + inds
else:
return (data,) + inds + (indptr,)
zeros_pos = (data == 0).nonzero()
# duplicate data
data = data.repeat(2, axis=0)
data[::2] -= 1
data[1::2] = 1
# don't spoil all explicit zeros
if zeros_pos[0].size > 0:
pos = tuple(p[0] for p in zeros_pos)
pos1 = (2*pos[0],) + pos[1:]
pos2 = (2*pos[0]+1,) + pos[1:]
data[pos1] = 0
data[pos2] = 0
inds = tuple(indices.repeat(2) for indices in inds)
if indptr is None:
return (data,) + inds
else:
return (data,) + inds + (indptr * 2,)
class _NonCanonicalMixin(object):
def spmatrix(self, D, sorted_indices=False, **kwargs):
"""Replace D with a non-canonical equivalent: containing
duplicate elements and explicit zeros"""
construct = super(_NonCanonicalMixin, self).spmatrix
M = construct(D, **kwargs)
zero_pos = (M.A == 0).nonzero()
has_zeros = (zero_pos[0].size > 0)
if has_zeros:
k = zero_pos[0].size//2
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
M = self._insert_explicit_zero(M, zero_pos[0][k], zero_pos[1][k])
arg1 = self._arg1_for_noncanonical(M, sorted_indices)
if 'shape' not in kwargs:
kwargs['shape'] = M.shape
NC = construct(arg1, **kwargs)
# check that result is valid
if NC.dtype in [np.float32, np.complex64]:
# For single-precision floats, the differences between M and NC
# that are introduced by the extra operations involved in the
# construction of NC necessitate a more lenient tolerance level
# than the default.
rtol = 1e-05
else:
rtol = 1e-07
assert_allclose(NC.A, M.A, rtol=rtol)
# check that at least one explicit zero
if has_zeros:
assert_((NC.data == 0).any())
# TODO check that NC has duplicates (which are not explicit zeros)
return NC
@pytest.mark.skip(reason='bool(matrix) counts explicit zeros')
def test_bool(self):
pass
@pytest.mark.skip(reason='getnnz-axis counts explicit zeros')
def test_getnnz_axis(self):
pass
@pytest.mark.skip(reason='nnz counts explicit zeros')
def test_empty(self):
pass
class _NonCanonicalCompressedMixin(_NonCanonicalMixin):
def _arg1_for_noncanonical(self, M, sorted_indices=False):
"""Return non-canonical constructor arg1 equivalent to M"""
data, indices, indptr = _same_sum_duplicate(M.data, M.indices,
indptr=M.indptr)
if not sorted_indices:
for start, stop in izip(indptr, indptr[1:]):
indices[start:stop] = indices[start:stop][::-1].copy()
data[start:stop] = data[start:stop][::-1].copy()
return data, indices, indptr
def _insert_explicit_zero(self, M, i, j):
M[i,j] = 0
return M
class _NonCanonicalCSMixin(_NonCanonicalCompressedMixin):
def test_getelement(self):
def check(dtype, sorted_indices):
D = array([[1,0,0],
[4,3,0],
[0,2,0],
[0,0,0]], dtype=dtype)
A = self.spmatrix(D, sorted_indices=sorted_indices)
M,N = D.shape
for i in range(-M, M):
for j in range(-N, N):
assert_equal(A[i,j], D[i,j])
for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]:
assert_raises((IndexError, TypeError), A.__getitem__, ij)
for dtype in supported_dtypes:
for sorted_indices in [False, True]:
check(np.dtype(dtype), sorted_indices)
def test_setitem_sparse(self):
D = np.eye(3)
A = self.spmatrix(D)
B = self.spmatrix([[1,2,3]])
D[1,:] = B.toarray()
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
A[1,:] = B
assert_array_equal(A.toarray(), D)
D[:,2] = B.toarray().ravel()
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a cs[cr]_matrix is expensive")
A[:,2] = B.T
assert_array_equal(A.toarray(), D)
@pytest.mark.xfail(run=False, reason='inverse broken with non-canonical matrix')
def test_inv(self):
pass
@pytest.mark.xfail(run=False, reason='solve broken with non-canonical matrix')
def test_solve(self):
pass
class TestCSRNonCanonical(_NonCanonicalCSMixin, TestCSR):
pass
class TestCSCNonCanonical(_NonCanonicalCSMixin, TestCSC):
pass
class TestBSRNonCanonical(_NonCanonicalCompressedMixin, TestBSR):
def _insert_explicit_zero(self, M, i, j):
x = M.tocsr()
x[i,j] = 0
return x.tobsr(blocksize=M.blocksize)
@pytest.mark.xfail(run=False, reason='diagonal broken with non-canonical BSR')
def test_diagonal(self):
pass
@pytest.mark.xfail(run=False, reason='expm broken with non-canonical BSR')
def test_expm(self):
pass
class TestCOONonCanonical(_NonCanonicalMixin, TestCOO):
def _arg1_for_noncanonical(self, M, sorted_indices=None):
"""Return non-canonical constructor arg1 equivalent to M"""
data, row, col = _same_sum_duplicate(M.data, M.row, M.col)
return data, (row, col)
def _insert_explicit_zero(self, M, i, j):
M.data = np.r_[M.data.dtype.type(0), M.data]
M.row = np.r_[M.row.dtype.type(i), M.row]
M.col = np.r_[M.col.dtype.type(j), M.col]
return M
def test_setdiag_noncanonical(self):
m = self.spmatrix(np.eye(3))
m.sum_duplicates()
m.setdiag([3, 2], k=1)
m.sum_duplicates()
assert_(np.all(np.diff(m.col) >= 0))
def cases_64bit():
TEST_CLASSES = [TestBSR, TestCOO, TestCSC, TestCSR, TestDIA,
# lil/dok->other conversion operations have get_index_dtype
TestDOK, TestLIL
]
# The following features are missing, so skip the tests:
SKIP_TESTS = {
'test_expm': 'expm for 64-bit indices not available',
'test_inv': 'linsolve for 64-bit indices not available',
'test_solve': 'linsolve for 64-bit indices not available',
'test_scalar_idx_dtype': 'test implemented in base class',
}
for cls in TEST_CLASSES:
for method_name in sorted(dir(cls)):
method = getattr(cls, method_name)
if (method_name.startswith('test_') and
not getattr(method, 'slow', False)):
marks = []
msg = SKIP_TESTS.get(method_name)
if bool(msg):
marks += [pytest.mark.skip(reason=msg)]
for mname in ['skipif', 'skip', 'xfail', 'xslow']:
if hasattr(method, mname):
marks += [getattr(method, mname)]
yield pytest.param(cls, method_name, marks=marks)
class Test64Bit(object):
MAT_CLASSES = [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix]
def _create_some_matrix(self, mat_cls, m, n):
return mat_cls(np.random.rand(m, n))
def _compare_index_dtype(self, m, dtype):
dtype = np.dtype(dtype)
if isinstance(m, csc_matrix) or isinstance(m, csr_matrix) \
or isinstance(m, bsr_matrix):
return (m.indices.dtype == dtype) and (m.indptr.dtype == dtype)
elif isinstance(m, coo_matrix):
return (m.row.dtype == dtype) and (m.col.dtype == dtype)
elif isinstance(m, dia_matrix):
return (m.offsets.dtype == dtype)
else:
raise ValueError("matrix %r has no integer indices" % (m,))
def test_decorator_maxval_limit(self):
# Test that the with_64bit_maxval_limit decorator works
@with_64bit_maxval_limit(maxval_limit=10)
def check(mat_cls):
m = mat_cls(np.random.rand(10, 1))
assert_(self._compare_index_dtype(m, np.int32))
m = mat_cls(np.random.rand(11, 1))
assert_(self._compare_index_dtype(m, np.int64))
for mat_cls in self.MAT_CLASSES:
check(mat_cls)
def test_decorator_maxval_random(self):
# Test that the with_64bit_maxval_limit decorator works (2)
@with_64bit_maxval_limit(random=True)
def check(mat_cls):
seen_32 = False
seen_64 = False
for k in range(100):
m = self._create_some_matrix(mat_cls, 9, 9)
seen_32 = seen_32 or self._compare_index_dtype(m, np.int32)
seen_64 = seen_64 or self._compare_index_dtype(m, np.int64)
if seen_32 and seen_64:
break
else:
raise AssertionError("both 32 and 64 bit indices not seen")
for mat_cls in self.MAT_CLASSES:
check(mat_cls)
def _check_resiliency(self, cls, method_name, **kw):
# Resiliency test, to check that sparse matrices deal reasonably
# with varying index data types.
@with_64bit_maxval_limit(**kw)
def check(cls, method_name):
instance = cls()
if hasattr(instance, 'setup_method'):
instance.setup_method()
try:
getattr(instance, method_name)()
finally:
if hasattr(instance, 'teardown_method'):
instance.teardown_method()
check(cls, method_name)
@pytest.mark.parametrize('cls,method_name', cases_64bit())
def test_resiliency_limit_10(self, cls, method_name):
self._check_resiliency(cls, method_name, maxval_limit=10)
@pytest.mark.parametrize('cls,method_name', cases_64bit())
def test_resiliency_random(self, cls, method_name):
# bsr_matrix.eliminate_zeros relies on csr_matrix constructor
# not making copies of index arrays --- this is not
# necessarily true when we pick the index data type randomly
self._check_resiliency(cls, method_name, random=True)
@pytest.mark.parametrize('cls,method_name', cases_64bit())
def test_resiliency_all_32(self, cls, method_name):
self._check_resiliency(cls, method_name, fixed_dtype=np.int32)
@pytest.mark.parametrize('cls,method_name', cases_64bit())
def test_resiliency_all_64(self, cls, method_name):
self._check_resiliency(cls, method_name, fixed_dtype=np.int64)
@pytest.mark.parametrize('cls,method_name', cases_64bit())
def test_no_64(self, cls, method_name):
self._check_resiliency(cls, method_name, assert_32bit=True)
def test_downcast_intp(self):
# Check that bincount and ufunc.reduceat intp downcasts are
# dealt with. The point here is to trigger points in the code
# that can fail on 32-bit systems when using 64-bit indices,
# due to use of functions that only work with intp-size
# indices.
@with_64bit_maxval_limit(fixed_dtype=np.int64,
downcast_maxval=1)
def check_limited():
# These involve indices larger than `downcast_maxval`
a = csc_matrix([[1, 2], [3, 4], [5, 6]])
assert_raises(AssertionError, a.getnnz, axis=1)
assert_raises(AssertionError, a.sum, axis=0)
a = csr_matrix([[1, 2, 3], [3, 4, 6]])
assert_raises(AssertionError, a.getnnz, axis=0)
a = coo_matrix([[1, 2, 3], [3, 4, 5]])
assert_raises(AssertionError, a.getnnz, axis=0)
@with_64bit_maxval_limit(fixed_dtype=np.int64)
def check_unlimited():
# These involve indices larger than `downcast_maxval`
a = csc_matrix([[1, 2], [3, 4], [5, 6]])
a.getnnz(axis=1)
a.sum(axis=0)
a = csr_matrix([[1, 2, 3], [3, 4, 6]])
a.getnnz(axis=0)
a = coo_matrix([[1, 2, 3], [3, 4, 5]])
a.getnnz(axis=0)
check_limited()
check_unlimited()
| 169,470 | 35.666162 | 122 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/tests/test_csc.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_
from scipy.sparse import csr_matrix, csc_matrix
def test_csc_getrow():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsc = csc_matrix(X)
for i in range(N):
arr_row = X[i:i + 1, :]
csc_row = Xcsc.getrow(i)
assert_array_almost_equal(arr_row, csc_row.toarray())
assert_(type(csc_row) is csr_matrix)
def test_csc_getcol():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsc = csc_matrix(X)
for i in range(N):
arr_col = X[:, i:i + 1]
csc_col = Xcsc.getcol(i)
assert_array_almost_equal(arr_col, csc_col.toarray())
assert_(type(csc_col) is csc_matrix)
| 859 | 22.243243 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/_expm_multiply.py
|
"""Compute the action of the matrix exponential.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg import LinearOperator, aslinearoperator
__all__ = ['expm_multiply']
def _exact_inf_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=1).flat)
else:
return np.linalg.norm(A, np.inf)
def _exact_1_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
else:
return np.linalg.norm(A, 1)
def _trace(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return A.diagonal().sum()
else:
return np.trace(A)
def _ident_like(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None):
"""
Compute the action of the matrix exponential of A on B.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix or vector to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
Returns
-------
expm_A_B : ndarray
The result of the action :math:`e^{t_k A} B`.
Notes
-----
The optional arguments defining the sequence of evenly spaced time points
are compatible with the arguments of `numpy.linspace`.
The output ndarray shape is somewhat complicated so I explain it here.
The ndim of the output could be either 1, 2, or 3.
It would be 1 if you are computing the expm action on a single vector
at a single time point.
It would be 2 if you are computing the expm action on a vector
at multiple time points, or if you are computing the expm action
on a matrix at a single time point.
It would be 3 if you want the action on a matrix with multiple
columns at multiple time points.
If multiple time points are requested, expm_A_B[0] will always
be the action of the expm at the first time point,
regardless of whether the action is on a vector or a matrix.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
"Computing the Action of the Matrix Exponential,
with an Application to Exponential Integrators."
SIAM Journal on Scientific Computing,
33 (2). pp. 488-511. ISSN 1064-8275
http://eprints.ma.man.ac.uk/1591/
.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
"Computing Matrix Functions."
Acta Numerica,
19. 159-208. ISSN 0962-4929
http://eprints.ma.man.ac.uk/1451/
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm, expm_multiply
>>> A = csc_matrix([[1, 0], [0, 1]])
>>> A.todense()
matrix([[1, 0],
[0, 1]], dtype=int64)
>>> B = np.array([np.exp(-1.), np.exp(-2.)])
>>> B
array([ 0.36787944, 0.13533528])
>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
array([[ 1. , 0.36787944],
[ 1.64872127, 0.60653066],
[ 2.71828183, 1. ]])
>>> expm(A).dot(B) # Verify 1st timestep
array([ 1. , 0.36787944])
>>> expm(1.5*A).dot(B) # Verify 2nd timestep
array([ 1.64872127, 0.60653066])
>>> expm(2*A).dot(B) # Verify 3rd timestep
array([ 2.71828183, 1. ])
"""
if all(arg is None for arg in (start, stop, num, endpoint)):
X = _expm_multiply_simple(A, B)
else:
X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint)
return X
def _expm_multiply_simple(A, B, t=1.0, balance=False):
"""
Compute the action of the matrix exponential at a single time point.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
t : float
A time point.
balance : bool
Indicates whether or not to apply balancing.
Returns
-------
F : ndarray
:math:`e^{t A} B`
Notes
-----
This is algorithm (3.2) in Al-Mohy and Higham (2011).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
"""
A helper function.
"""
if balance:
raise NotImplementedError
if tol is None:
u_d = 2 ** -53
tol = u_d
F = B
eta = np.exp(t*mu / float(s))
for i in range(s):
c1 = _exact_inf_norm(B)
for j in range(m_star):
coeff = t / float(s*(j+1))
B = coeff * A.dot(B)
c2 = _exact_inf_norm(B)
F = F + B
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
F = eta * F
B = F
return F
# This table helps to compute bounds.
# They seem to have been difficult to calculate, involving symbolic
# manipulation of equations, followed by numerical root finding.
_theta = {
# The first 30 values are from table A.3 of Computing Matrix Functions.
1: 2.29e-16,
2: 2.58e-8,
3: 1.39e-5,
4: 3.40e-4,
5: 2.40e-3,
6: 9.07e-3,
7: 2.38e-2,
8: 5.00e-2,
9: 8.96e-2,
10: 1.44e-1,
# 11
11: 2.14e-1,
12: 3.00e-1,
13: 4.00e-1,
14: 5.14e-1,
15: 6.41e-1,
16: 7.81e-1,
17: 9.31e-1,
18: 1.09,
19: 1.26,
20: 1.44,
# 21
21: 1.62,
22: 1.82,
23: 2.01,
24: 2.22,
25: 2.43,
26: 2.64,
27: 2.86,
28: 3.08,
29: 3.31,
30: 3.54,
# The rest are from table 3.1 of
# Computing the Action of the Matrix Exponential.
35: 4.7,
40: 6.0,
45: 7.2,
50: 8.5,
55: 9.9,
}
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
#XXX Eventually turn this into an API function in the _onenormest module,
#XXX and remove its underscore,
#XXX but wait until expm_multiply goes into scipy.
return scipy.sparse.linalg.onenormest(aslinearoperator(A) ** p)
class LazyOperatorNormInfo:
"""
Information about an operator is lazily computed.
The information includes the exact 1-norm of the operator,
in addition to estimates of 1-norms of powers of the operator.
This uses the notation of Computing the Action (2011).
This class is specialized enough to probably not be of general interest
outside of this module.
"""
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
"""
Provide the operator and some norm-related information.
Parameters
----------
A : linear operator
The operator of interest.
A_1_norm : float, optional
The exact 1-norm of A.
ell : int, optional
A technical parameter controlling norm estimation quality.
scale : int, optional
If specified, return the norms of scale*A instead of A.
"""
self._A = A
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
self._scale = scale
def set_scale(self,scale):
"""
Set the scale parameter.
"""
self._scale = scale
def onenorm(self):
"""
Compute the exact 1-norm.
"""
if self._A_1_norm is None:
self._A_1_norm = _exact_1_norm(self._A)
return self._scale*self._A_1_norm
def d(self, p):
"""
Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
"""
if p not in self._d:
est = _onenormest_matrix_power(self._A, p, self._ell)
self._d[p] = est ** (1.0 / p)
return self._scale*self._d[p]
def alpha(self, p):
"""
Lazily compute max(d(p), d(p+1)).
"""
return max(self.d(p), self.d(p+1))
def _compute_cost_div_m(m, p, norm_info):
"""
A helper function for computing bounds.
This is equation (3.10).
It measures cost in terms of the number of required matrix products.
Parameters
----------
m : int
A valid key of _theta.
p : int
A matrix power.
norm_info : LazyOperatorNormInfo
Information about 1-norms of related operators.
Returns
-------
cost_div_m : int
Required number of matrix products divided by m.
"""
return int(np.ceil(norm_info.alpha(p) / _theta[m]))
def _compute_p_max(m_max):
"""
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
Do this in a slightly dumb way, but safe and not too slow.
Parameters
----------
m_max : int
A count related to bounds.
"""
sqrt_m_max = np.sqrt(m_max)
p_low = int(np.floor(sqrt_m_max))
p_high = int(np.ceil(sqrt_m_max + 1))
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
norm_info : LazyOperatorNormInfo
Information about norms of certain linear operators of interest.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
tol : float
Expected to be
:math:`2^{-24}` for single precision or
:math:`2^{-53}` for double precision.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
best_m : int
Related to bounds for error control.
best_s : int
Amount of scaling.
Notes
-----
This is code fragment (3.1) in Al-Mohy and Higham (2011).
The discussion of default values for m_max and ell
is given between the definitions of equation (3.11)
and the definition of equation (3.12).
"""
if ell < 1:
raise ValueError('expected ell to be a positive integer')
best_m = None
best_s = None
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
for m, theta in _theta.items():
s = int(np.ceil(norm_info.onenorm() / theta))
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
else:
# Equation (3.11).
for p in range(2, _compute_p_max(m_max) + 1):
for m in range(p*(p-1)-1, m_max+1):
if m in _theta:
s = _compute_cost_div_m(m, p, norm_info)
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
best_s = max(best_s, 1)
return best_m, best_s
def _condition_3_13(A_1_norm, n0, m_max, ell):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
A_1_norm : float
The precomputed 1-norm of A.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
value : bool
Indicates whether or not the condition has been met.
Notes
-----
This is condition (3.13) in Al-Mohy and Higham (2011).
"""
# This is the rhs of equation (3.12).
p_max = _compute_p_max(m_max)
a = 2 * ell * p_max * (p_max + 3)
# Evaluate the condition (3.13).
b = _theta[m_max] / float(n0 * m_max)
return A_1_norm <= a * b
def _expm_multiply_interval(A, B, start=None, stop=None,
num=None, endpoint=None, balance=False, status_only=False):
"""
Compute the action of the matrix exponential at multiple time points.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
balance : bool
Indicates whether or not to apply balancing.
status_only : bool
A flag that is set to True for some debugging and testing operations.
Returns
-------
F : ndarray
:math:`e^{t_k A} B`
status : int
An integer status for testing and debugging.
Notes
-----
This is algorithm (5.2) in Al-Mohy and Higham (2011).
There seems to be a typo, where line 15 of the algorithm should be
moved to line 6.5 (between lines 6 and 7).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
# Get the linspace samples, attempting to preserve the linspace defaults.
linspace_kwargs = {'retstep': True}
if num is not None:
linspace_kwargs['num'] = num
if endpoint is not None:
linspace_kwargs['endpoint'] = endpoint
samples, step = np.linspace(start, stop, **linspace_kwargs)
# Convert the linspace output to the notation used by the publication.
nsamples = len(samples)
if nsamples < 2:
raise ValueError('at least two time points are required')
q = nsamples - 1
h = step
t_0 = samples[0]
t_q = samples[q]
# Define the output ndarray.
# Use an ndim=3 shape, such that the last two indices
# are the ones that may be involved in level 3 BLAS operations.
X_shape = (nsamples,) + B.shape
X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
t = t_q - t_0
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
# Compute the expm action up to the initial time point.
X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
# Compute the expm action at the rest of the time points.
if q <= s:
if status_only:
return 0
else:
return _expm_multiply_interval_core_0(A, X,
h, mu, q, norm_info, tol, ell,n0)
elif not (q % s):
if status_only:
return 1
else:
return _expm_multiply_interval_core_1(A, X,
h, mu, m_star, s, q, tol)
elif (q % s):
if status_only:
return 2
else:
return _expm_multiply_interval_core_2(A, X,
h, mu, m_star, s, q, tol)
else:
raise Exception('internal error')
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
"""
A helper function, for the case q <= s.
"""
# Compute the new values of m_star and s which should be applied
# over intervals of size t/q
if norm_info.onenorm() == 0:
m_star, s = 0, 1
else:
norm_info.set_scale(1./q)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
norm_info.set_scale(1)
for k in range(q):
X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
return X, 0
def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s == 0.
"""
d = q // s
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(s):
Z = X[i*d]
K[0] = Z
high_p = 0
for k in range(1, d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p > high_p:
K[p] = h * A.dot(K[p-1]) / float(p)
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 1
def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s > 0.
"""
d = q // s
j = q // d
r = q - d * j
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(j + 1):
Z = X[i*d]
K[0] = Z
high_p = 0
if i < j:
effective_d = d
else:
effective_d = r
for k in range(1, effective_d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p == high_p + 1:
K[p] = h * A.dot(K[p-1]) / float(p)
high_p = p
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 2
| 21,565 | 29.633523 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linalg',parent_package,top_path)
config.add_subpackage(('isolve'))
config.add_subpackage(('dsolve'))
config.add_subpackage(('eigen'))
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 525 | 24.047619 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/matfuncs.py
|
"""
Sparse matrix functions
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
from __future__ import division, print_function, absolute_import
__all__ = ['expm', 'inv']
import math
import numpy as np
import scipy.special
from scipy.linalg.basic import solve, solve_triangular
from scipy.sparse.base import isspmatrix
from scipy.sparse.construct import eye as speye
from scipy.sparse.linalg import spsolve
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg.interface import LinearOperator
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse matrix
Parameters
----------
A : (M,M) ndarray or sparse matrix
square matrix to be inverted
Returns
-------
Ainv : (M,M) ndarray or sparse matrix
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
scipy.linalg.inv.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import inv
>>> A = csc_matrix([[1., 0.], [1., 2.]])
>>> Ainv = inv(A)
>>> Ainv
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv)
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv).todense()
matrix([[ 1., 0.],
[ 0., 1.]])
.. versionadded:: 0.12.0
"""
#check input
if not scipy.sparse.isspmatrix(A):
raise TypeError('Input must be a sparse matrix')
I = speye(A.shape[0], A.shape[1], dtype=A.dtype, format=A.format)
Ainv = spsolve(A, I)
return Ainv
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = np.ones((A.shape[0], 1), dtype=float)
M = A.T
for i in range(p):
v = M.dot(v)
return np.max(v)
def _onenorm(A):
# A compatibility function which should eventually disappear.
# This is copypasted from expm_action.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
else:
return np.linalg.norm(A, 1)
def _ident_like(A):
# A compatibility function which should eventually disappear.
# This is copypasted from expm_action.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if isspmatrix(A):
lower_part = scipy.sparse.tril(A, -1)
# Check structural upper triangularity,
# then coincidental upper triangularity if needed.
return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
else:
return not np.tril(A, -1).any()
def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if not isspmatrix(A) and not isspmatrix(B):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.find_common_type([x.dtype for x in args], [])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
class _ExpmPadeHelper(object):
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A, structure=None, use_exact_onenorm=False):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
structure : str, optional
A string describing the structure of matrix `A`.
Only `upper_triangular` is currently supported.
use_exact_onenorm : bool, optional
If True then only the exact one-norm of matrix powers and products
will be used. Otherwise, the one-norm of powers and products
may initially be estimated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
self.structure = structure
self.use_exact_onenorm = use_exact_onenorm
@property
def A2(self):
if self._A2 is None:
self._A2 = _smart_matrix_product(
self.A, self.A, structure=self.structure)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = _smart_matrix_product(
self.A2, self.A2, structure=self.structure)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = _smart_matrix_product(
self.A4, self.A2, structure=self.structure)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = _smart_matrix_product(
self.A6, self.A2, structure=self.structure)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = _smart_matrix_product(
self.A4, self.A6, structure=self.structure)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
if self.use_exact_onenorm:
return self.d4_tight
if self._d4_exact is not None:
return self._d4_exact
else:
if self._d4_approx is None:
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
structure=self.structure)**(1/4.)
return self._d4_approx
@property
def d6_loose(self):
if self.use_exact_onenorm:
return self.d6_tight
if self._d6_exact is not None:
return self._d6_exact
else:
if self._d6_approx is None:
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
structure=self.structure)**(1/6.)
return self._d6_approx
@property
def d8_loose(self):
if self.use_exact_onenorm:
return self.d8_tight
if self._d8_exact is not None:
return self._d8_exact
else:
if self._d8_approx is None:
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
structure=self.structure)**(1/8.)
return self._d8_approx
@property
def d10_loose(self):
if self.use_exact_onenorm:
return self.d10_tight
if self._d10_exact is not None:
return self._d10_exact
else:
if self._d10_approx is None:
self._d10_approx = _onenormest_product((self.A4, self.A6),
structure=self.structure)**(1/10.)
return self._d10_approx
def pade3(self):
b = (120., 60., 12., 1.)
U = _smart_matrix_product(self.A,
b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = _smart_matrix_product(self.A,
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7(self):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
U = _smart_matrix_product(self.A,
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade9(self):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
U = _smart_matrix_product(self.A,
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
b[3]*self.A2 + b[1]*self.ident),
structure=self.structure)
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
b[2]*self.A2 + b[0]*self.ident)
return U, V
def pade13_scaled(self, s):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U2 = _smart_matrix_product(B6,
b[13]*B6 + b[11]*B4 + b[9]*B2,
structure=self.structure)
U = _smart_matrix_product(B,
(U2 + b[7]*B6 + b[5]*B4 +
b[3]*B2 + b[1]*self.ident),
structure=self.structure)
V2 = _smart_matrix_product(B6,
b[12]*B6 + b[10]*B4 + b[8]*B2,
structure=self.structure)
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm
>>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
>>> A.todense()
matrix([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int64)
>>> Aexp = expm(A)
>>> Aexp
<3x3 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> Aexp.todense()
matrix([[ 2.71828183, 0. , 0. ],
[ 0. , 7.3890561 , 0. ],
[ 0. , 0. , 20.08553692]])
"""
return _expm(A, use_exact_onenorm='auto')
def _expm(A, use_exact_onenorm):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if isinstance(A, (list, tuple)):
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# Trivial case
if A.shape == (1, 1):
out = [[np.exp(A[0, 0])]]
# Avoid indiscriminate casting to ndarray to
# allow for sparse or other strange arrays
if isspmatrix(A):
return A.__class__(out)
return np.array(out)
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
if use_exact_onenorm == "auto":
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
use_exact_onenorm = A.shape[0] < 200
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
eta_3 = max(h.d6_tight, h.d8_loose)
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
U, V = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
U, V = h.pade9()
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
# Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
if eta_5 == 0:
# Nilpotent special case
s = 0
else:
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * h.A, 13)
U, V = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, h.A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if isspmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _sinch(x):
"""
Stably evaluate sinch.
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
x2 = x*x
if abs(x) < 0.0135:
return 1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))
else:
return np.sinh(x) / x
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * np.exp(a) * _sinch(b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse matrices, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = np.ravel(T.diagonal().copy())
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
p = 2*m + 1
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2p_p = scipy.special.comb(2*p, p, exact=True)
abs_c_recip = float(choose_2p_p * math.factorial(2*p + 1))
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), p)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)
| 26,961 | 30.206019 | 93 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/_norm.py
|
"""Sparse matrix norms.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import issparse
from numpy.core import Inf, sqrt, abs
__all__ = ['norm']
def _sparse_frobenius_norm(x):
if np.issubdtype(x.dtype, np.complexfloating):
sqnorm = abs(x).power(2).sum()
else:
sqnorm = x.power(2).sum()
return sqrt(sqnorm)
def norm(x, ord=None, axis=None):
"""
Norm of a sparse matrix
This function is able to return one of seven different matrix norms,
depending on the value of the ``ord`` parameter.
Parameters
----------
x : a sparse matrix
Input sparse matrix.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Notes
-----
Some of the ord are not implemented because some associated functions like,
_multi_svd_norm, are not yet available for sparse matrix.
This docstring is modified based on numpy.linalg.norm.
https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py
The following norms can be calculated:
===== ============================
ord norm for sparse matrices
===== ============================
None Frobenius norm
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
0 abs(x).sum(axis=axis)
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 Not implemented
-2 Not implemented
other Not implemented
===== ============================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from scipy.sparse import *
>>> import numpy as np
>>> from scipy.sparse.linalg import norm
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> b = csr_matrix(b)
>>> norm(b)
7.745966692414834
>>> norm(b, 'fro')
7.745966692414834
>>> norm(b, np.inf)
9
>>> norm(b, -np.inf)
2
>>> norm(b, 1)
7
>>> norm(b, -1)
6
"""
if not issparse(x):
raise TypeError("input is not sparse. use numpy.linalg.norm")
# Check the default case first and handle it immediately.
if axis is None and ord in (None, 'fro', 'f'):
return _sparse_frobenius_norm(x)
# Some norms require functions that are not implemented for all types.
x = x.tocsr()
if axis is None:
axis = (0, 1)
elif not isinstance(axis, tuple):
msg = "'axis' must be None, an integer or a tuple of integers"
try:
int_axis = int(axis)
except TypeError:
raise TypeError(msg)
if axis != int_axis:
raise TypeError(msg)
axis = (int_axis,)
nd = 2
if len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
raise NotImplementedError
#return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
raise NotImplementedError
#return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]
elif ord == Inf:
return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]
elif ord == -1:
return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]
elif ord == -Inf:
return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]
elif ord in (None, 'f', 'fro'):
# The axis order does not matter for this norm.
return _sparse_frobenius_norm(x)
else:
raise ValueError("Invalid norm order for matrices.")
elif len(axis) == 1:
a, = axis
if not (-nd <= a < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if ord == Inf:
M = abs(x).max(axis=a)
elif ord == -Inf:
M = abs(x).min(axis=a)
elif ord == 0:
# Zero norm
M = (x != 0).sum(axis=a)
elif ord == 1:
# special case for speedup
M = abs(x).sum(axis=a)
elif ord in (2, None):
M = sqrt(abs(x).power(2).sum(axis=a))
else:
try:
ord + 1
except TypeError:
raise ValueError('Invalid norm order for vectors.')
M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)
return M.A.ravel()
else:
raise ValueError("Improper number of dimensions to norm.")
| 5,867 | 30.718919 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/_onenormest.py
|
"""Sparse block 1-norm estimator.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse.linalg import aslinearoperator
__all__ = ['onenormest']
def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False):
"""
Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can be transposed and that can
produce matrix products.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
Notes
-----
This is algorithm 2.4 of [1].
In [2] it is described as follows.
"This algorithm typically requires the evaluation of
about 4t matrix-vector products and almost invariably
produces a norm estimate (which is, in fact, a lower
bound on the norm) correct to within a factor 3."
.. versionadded:: 0.13.0
References
----------
.. [1] Nicholas J. Higham and Francoise Tisseur (2000),
"A Block Algorithm for Matrix 1-Norm Estimation,
with an Application to 1-Norm Pseudospectra."
SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201.
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009),
"A new scaling and squaring algorithm for the matrix exponential."
SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import onenormest
>>> A = csc_matrix([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float)
>>> A.todense()
matrix([[ 1., 0., 0.],
[ 5., 8., 2.],
[ 0., -1., 0.]])
>>> onenormest(A)
9.0
>>> np.linalg.norm(A.todense(), ord=1)
9.0
"""
# Check the input.
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected the operator to act like a square matrix')
# If the operator size is small compared to t,
# then it is easier to compute the exact norm.
# Otherwise estimate the norm.
n = A.shape[1]
if t >= n:
A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n)))
if A_explicit.shape != (n, n):
raise Exception('internal error: ',
'unexpected shape ' + str(A_explicit.shape))
col_abs_sums = abs(A_explicit).sum(axis=0)
if col_abs_sums.shape != (n, ):
raise Exception('internal error: ',
'unexpected shape ' + str(col_abs_sums.shape))
argmax_j = np.argmax(col_abs_sums)
v = elementary_vector(n, argmax_j)
w = A_explicit[:, argmax_j]
est = col_abs_sums[argmax_j]
else:
est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax)
# Report the norm estimate along with some certificates of the estimate.
if compute_v or compute_w:
result = (est,)
if compute_v:
result += (v,)
if compute_w:
result += (w,)
return result
else:
return est
def _blocked_elementwise(func):
"""
Decorator for an elementwise function, to apply it blockwise along
first dimension, to avoid excessive memory usage in temporaries.
"""
block_size = 2**20
def wrapper(x):
if x.shape[0] < block_size:
return func(x)
else:
y0 = func(x[:block_size])
y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype)
y[:block_size] = y0
del y0
for j in range(block_size, x.shape[0], block_size):
y[j:j+block_size] = func(x[j:j+block_size])
return y
return wrapper
@_blocked_elementwise
def sign_round_up(X):
"""
This should do the right thing for both real and complex matrices.
From Higham and Tisseur:
"Everything in this section remains valid for complex matrices
provided that sign(A) is redefined as the matrix (aij / |aij|)
(and sign(0) = 1) transposes are replaced by conjugate transposes."
"""
Y = X.copy()
Y[Y == 0] = 1
Y /= np.abs(Y)
return Y
@_blocked_elementwise
def _max_abs_axis1(X):
return np.max(np.abs(X), axis=1)
def _sum_abs_axis0(X):
block_size = 2**20
r = None
for j in range(0, X.shape[0], block_size):
y = np.sum(np.abs(X[j:j+block_size]), axis=0)
if r is None:
r = y
else:
r += y
return r
def elementary_vector(n, i):
v = np.zeros(n, dtype=float)
v[i] = 1
return v
def vectors_are_parallel(v, w):
# Columns are considered parallel when they are equal or negative.
# Entries are required to be in {-1, 1},
# which guarantees that the magnitudes of the vectors are identical.
if v.ndim != 1 or v.shape != w.shape:
raise ValueError('expected conformant vectors with entries in {-1,1}')
n = v.shape[0]
return np.dot(v, w) == n
def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y):
for v in X.T:
if not any(vectors_are_parallel(v, w) for w in Y.T):
return False
return True
def column_needs_resampling(i, X, Y=None):
# column i of X needs resampling if either
# it is parallel to a previous column of X or
# it is parallel to a column of Y
n, t = X.shape
v = X[:, i]
if any(vectors_are_parallel(v, X[:, j]) for j in range(i)):
return True
if Y is not None:
if any(vectors_are_parallel(v, w) for w in Y.T):
return True
return False
def resample_column(i, X):
X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
def _algorithm_2_2(A, AT, t):
"""
This is Algorithm 2.2.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Returns
-------
g : sequence
A non-negative decreasing vector
such that g[j] is a lower bound for the 1-norm
of the column of A of jth largest 1-norm.
The first entry of this vector is therefore a lower bound
on the 1-norm of the linear operator A.
This sequence has length t.
ind : sequence
The ith entry of ind is the index of the column A whose 1-norm
is given by g[i].
This sequence of indices has length t, and its entries are
chosen from range(n), possibly with repetition,
where n is the order of the operator A.
Notes
-----
This algorithm is mainly for testing.
It uses the 'ind' array in a way that is similar to
its usage in algorithm 2.4. This algorithm 2.2 may be easier to test,
so it gives a chance of uncovering bugs related to indexing
which could have propagated less noticeably to algorithm 2.4.
"""
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
n = A_linear_operator.shape[0]
# Initialize the X block with columns of unit 1-norm.
X = np.ones((n, t))
if t > 1:
X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1
X /= float(n)
# Iteratively improve the lower bounds.
# Track extra things, to assert invariants for debugging.
g_prev = None
h_prev = None
k = 1
ind = range(t)
while True:
Y = np.asarray(A_linear_operator.matmat(X))
g = _sum_abs_axis0(Y)
best_j = np.argmax(g)
g.sort()
g = g[::-1]
S = sign_round_up(Y)
Z = np.asarray(AT_linear_operator.matmat(S))
h = _max_abs_axis1(Z)
# If this algorithm runs for fewer than two iterations,
# then its return values do not have the properties indicated
# in the description of the algorithm.
# In particular, the entries of g are not 1-norms of any
# column of A until the second iteration.
# Therefore we will require the algorithm to run for at least
# two iterations, even though this requirement is not stated
# in the description of the algorithm.
if k >= 2:
if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])):
break
ind = np.argsort(h)[::-1][:t]
h = h[ind]
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
# Check invariant (2.2).
if k >= 2:
if not less_than_or_close(g_prev[0], h_prev[0]):
raise Exception('invariant (2.2) is violated')
if not less_than_or_close(h_prev[0], g[0]):
raise Exception('invariant (2.2) is violated')
# Check invariant (2.3).
if k >= 3:
for j in range(t):
if not less_than_or_close(g[j], g_prev[j]):
raise Exception('invariant (2.3) is violated')
# Update for the next iteration.
g_prev = g
h_prev = h
k += 1
# Return the lower bounds and the corresponding column indices.
return g, ind
def _onenormest_core(A, AT, t, itmax):
"""
Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
itmax : int, optional
Use at most this many iterations.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
nmults : int, optional
The number of matrix products that were computed.
nresamples : int, optional
The number of times a parallel column was observed,
necessitating a re-randomization of the column.
Notes
-----
This is algorithm 2.4.
"""
# This function is a more or less direct translation
# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
if itmax < 2:
raise ValueError('at least two iterations are required')
if t < 1:
raise ValueError('at least one column is required')
n = A.shape[0]
if t >= n:
raise ValueError('t should be smaller than the order of A')
# Track the number of big*small matrix multiplications
# and the number of resamplings.
nmults = 0
nresamples = 0
# "We now explain our choice of starting matrix. We take the first
# column of X to be the vector of 1s [...] This has the advantage that
# for a matrix with nonnegative elements the algorithm converges
# with an exact estimate on the second iteration, and such matrices
# arise in applications [...]"
X = np.ones((n, t), dtype=float)
# "The remaining columns are chosen as rand{-1,1},
# with a check for and correction of parallel columns,
# exactly as for S in the body of the algorithm."
if t > 1:
for i in range(1, t):
# These are technically initial samples, not resamples,
# so the resampling count is not incremented.
resample_column(i, X)
for i in range(t):
while column_needs_resampling(i, X):
resample_column(i, X)
nresamples += 1
# "Choose starting matrix X with columns of unit 1-norm."
X /= float(n)
# "indices of used unit vectors e_j"
ind_hist = np.zeros(0, dtype=np.intp)
est_old = 0
S = np.zeros((n, t), dtype=float)
k = 1
ind = None
while True:
Y = np.asarray(A_linear_operator.matmat(X))
nmults += 1
mags = _sum_abs_axis0(Y)
est = np.max(mags)
best_j = np.argmax(mags)
if est > est_old or k == 2:
if k >= 2:
ind_best = ind[best_j]
w = Y[:, best_j]
# (1)
if k >= 2 and est <= est_old:
est = est_old
break
est_old = est
S_old = S
if k > itmax:
break
S = sign_round_up(Y)
del Y
# (2)
if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
break
if t > 1:
# "Ensure that no column of S is parallel to another column of S
# or to a column of S_old by replacing columns of S by rand{-1,1}."
for i in range(t):
while column_needs_resampling(i, S, S_old):
resample_column(i, S)
nresamples += 1
del S_old
# (3)
Z = np.asarray(AT_linear_operator.matmat(S))
nmults += 1
h = _max_abs_axis1(Z)
del Z
# (4)
if k >= 2 and max(h) == h[ind_best]:
break
# "Sort h so that h_first >= ... >= h_last
# and re-order ind correspondingly."
#
# Later on, we will need at most t+len(ind_hist) largest
# entries, so drop the rest
ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
del h
if t > 1:
# (5)
# Break if the most promising t vectors have been visited already.
if np.in1d(ind[:t], ind_hist).all():
break
# Put the most promising unvisited vectors at the front of the list
# and put the visited vectors at the end of the list.
# Preserve the order of the indices induced by the ordering of h.
seen = np.in1d(ind, ind_hist)
ind = np.concatenate((ind[~seen], ind[seen]))
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)]
ind_hist = np.concatenate((ind_hist, new_ind))
k += 1
v = elementary_vector(n, ind_best)
return est, v, w, nmults, nresamples
| 15,529 | 32.113006 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/__init__.py
|
"""
==================================================
Sparse linear algebra (:mod:`scipy.sparse.linalg`)
==================================================
.. currentmodule:: scipy.sparse.linalg
Abstract linear operators
-------------------------
.. autosummary::
:toctree: generated/
LinearOperator -- abstract representation of a linear operator
aslinearoperator -- convert an object to an abstract linear operator
Matrix Operations
-----------------
.. autosummary::
:toctree: generated/
inv -- compute the sparse matrix inverse
expm -- compute the sparse matrix exponential
expm_multiply -- compute the product of a matrix exponential and a matrix
Matrix norms
------------
.. autosummary::
:toctree: generated/
norm -- Norm of a sparse matrix
onenormest -- Estimate the 1-norm of a sparse matrix
Solving linear problems
-----------------------
Direct methods for linear equation systems:
.. autosummary::
:toctree: generated/
spsolve -- Solve the sparse linear system Ax=b
spsolve_triangular -- Solve the sparse linear system Ax=b for a triangular matrix
factorized -- Pre-factorize matrix to a function solving a linear system
MatrixRankWarning -- Warning on exactly singular matrices
use_solver -- Select direct solver to use
Iterative methods for linear equation systems:
.. autosummary::
:toctree: generated/
bicg -- Use BIConjugate Gradient iteration to solve A x = b
bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b
cg -- Use Conjugate Gradient iteration to solve A x = b
cgs -- Use Conjugate Gradient Squared iteration to solve A x = b
gmres -- Use Generalized Minimal RESidual iteration to solve A x = b
lgmres -- Solve a matrix equation using the LGMRES algorithm
minres -- Use MINimum RESidual iteration to solve Ax = b
qmr -- Use Quasi-Minimal Residual iteration to solve A x = b
gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm
Iterative methods for least-squares problems:
.. autosummary::
:toctree: generated/
lsqr -- Find the least-squares solution to a sparse linear equation system
lsmr -- Find the least-squares solution to a sparse linear equation system
Matrix factorizations
---------------------
Eigenvalue problems:
.. autosummary::
:toctree: generated/
eigs -- Find k eigenvalues and eigenvectors of the square matrix A
eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
Singular values problems:
.. autosummary::
:toctree: generated/
svds -- Compute k singular values/vectors for a sparse matrix
Complete or incomplete LU factorizations
.. autosummary::
:toctree: generated/
splu -- Compute a LU decomposition for a sparse matrix
spilu -- Compute an incomplete LU decomposition for a sparse matrix
SuperLU -- Object representing an LU factorization
Exceptions
----------
.. autosummary::
:toctree: generated/
ArpackNoConvergence
ArpackError
"""
from __future__ import division, print_function, absolute_import
from .isolve import *
from .dsolve import *
from .interface import *
from .eigen import *
from .matfuncs import *
from ._onenormest import *
from ._norm import *
from ._expm_multiply import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 3,482 | 26.210938 | 84 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/interface.py
|
"""Abstract linear algebra library.
This module defines a class hierarchy that implements a kind of "lazy"
matrix representation, called the ``LinearOperator``. It can be used to do
linear algebra with extremely large sparse or structured matrices, without
representing those explicitly in memory. Such matrices can be added,
multiplied, transposed, etc.
As a motivating example, suppose you want have a matrix where almost all of
the elements have the value one. The standard sparse matrix representation
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
able to represent such matrices efficiently. First, we need a compact way to
represent an all-ones matrix::
>>> import numpy as np
>>> class Ones(LinearOperator):
... def __init__(self, shape):
... super(Ones, self).__init__(dtype=None, shape=shape)
... def _matvec(self, x):
... return np.repeat(x.sum(), self.shape[0])
Instances of this class emulate ``np.ones(shape)``, but using a constant
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
how this linear operator multiplies with (operates on) a vector. We can now
add this operator to a sparse matrix that stores only offsets from one::
>>> from scipy.sparse import csr_matrix
>>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
>>> A.dot([1, 2, 3])
array([13, 4, 15])
The result is the same as that given by its dense, explicitly-stored
counterpart::
>>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
array([13, 4, 15])
Several algorithms in the ``scipy.sparse`` library are able to operate on
``LinearOperator`` instances.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import isspmatrix
from scipy.sparse.sputils import isshape, isintlike
__all__ = ['LinearOperator', 'aslinearoperator']
class LinearOperator(object):
"""Common interface for performing matrix vector products
Many iterative methods (e.g. cg, gmres) do not need to know the
individual entries of a matrix to solve a linear system A*x=b.
Such solvers only require the computation of matrix vector
products, A*v where v is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
To construct a concrete LinearOperator, either pass appropriate
callables to the constructor of this class, or subclass it.
A subclass must implement either one of the methods ``_matvec``
and ``_matmat``, and the attributes/properties ``shape`` (pair of
integers) and ``dtype`` (may be None). It may call the ``__init__``
on this class to have these attributes validated. Implementing
``_matvec`` automatically implements ``_matmat`` (using a naive
algorithm) and vice-versa.
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
to implement the Hermitian adjoint (conjugate transpose). As with
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
``_adjoint`` implements the other automatically. Implementing
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
backwards compatibility.
Parameters
----------
shape : tuple
Matrix dimensions (M,N).
matvec : callable f(v)
Returns returns A * v.
rmatvec : callable f(v)
Returns A^H * v, where A^H is the conjugate transpose of A.
matmat : callable f(V)
Returns A * V, where V is a dense matrix with dimensions (N,K).
dtype : dtype
Data type of the matrix.
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined matvec() function must properly handle the case
where v has shape (N,) as well as the (N,1) case. The shape of
the return type is handled internally by LinearOperator.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, all lazily: the result of these operations
is always a new, composite LinearOperator, that defers linear
operations to the original operators and combines the results.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import LinearOperator
>>> def mv(v):
... return np.array([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 _CustomLinearOperator with dtype=float64>
>>> A.matvec(np.ones(2))
array([ 2., 3.])
>>> A * np.ones(2)
array([ 2., 3.])
"""
def __new__(cls, *args, **kwargs):
if cls is LinearOperator:
# Operate as _CustomLinearOperator factory.
return super(LinearOperator, cls).__new__(_CustomLinearOperator)
else:
obj = super(LinearOperator, cls).__new__(cls)
if (type(obj)._matvec == LinearOperator._matvec
and type(obj)._matmat == LinearOperator._matmat):
raise TypeError("LinearOperator subclass should implement"
" at least one of _matvec and _matmat.")
return obj
def __init__(self, dtype, shape):
"""Initialize this LinearOperator.
To be called by subclasses. ``dtype`` may be None; ``shape`` should
be convertible to a length-2 tuple.
"""
if dtype is not None:
dtype = np.dtype(dtype)
shape = tuple(shape)
if not isshape(shape):
raise ValueError("invalid shape %r (must be 2-d)" % (shape,))
self.dtype = dtype
self.shape = shape
def _init_dtype(self):
"""Called from subclasses at the end of the __init__ routine.
"""
if self.dtype is None:
v = np.zeros(self.shape[-1])
self.dtype = np.asarray(self.matvec(v)).dtype
def _matmat(self, X):
"""Default matrix-matrix multiplication handler.
Falls back on the user-defined _matvec method, so defining that will
define matrix multiplication (though in a very suboptimal way).
"""
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
def _matvec(self, x):
"""Default matrix-vector multiplication handler.
If self is a linear operator of shape (M, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (M,) or (M, 1) ndarray.
This default implementation falls back on _matmat, so defining that
will define matrix-vector multiplication as well.
"""
return self.matmat(x.reshape(-1, 1))
def matvec(self, x):
"""Matrix-vector multiplication.
Performs the operation y=A*x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine or overridden
_matvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (N,) and x.shape != (N,1):
raise ValueError('dimension mismatch')
y = self._matvec(x)
if isinstance(x, np.matrix):
y = np.asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(M)
elif x.ndim == 2:
y = y.reshape(M,1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def rmatvec(self, x):
"""Adjoint matrix-vector multiplication.
Performs the operation y = A^H * x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (M,) or (M,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (N,) or (N,1) depending
on the type and shape of the x argument.
Notes
-----
This rmatvec wraps the user-specified rmatvec routine or overridden
_rmatvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (M,) and x.shape != (M,1):
raise ValueError('dimension mismatch')
y = self._rmatvec(x)
if isinstance(x, np.matrix):
y = np.asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(N)
elif x.ndim == 2:
y = y.reshape(N,1)
else:
raise ValueError('invalid shape returned by user-defined rmatvec()')
return y
def _rmatvec(self, x):
"""Default implementation of _rmatvec; defers to adjoint."""
if type(self)._adjoint == LinearOperator._adjoint:
# _adjoint not overridden, prevent infinite recursion
raise NotImplementedError
else:
return self.H.matvec(x)
def matmat(self, X):
"""Matrix-matrix multiplication.
Performs the operation y=A*X where A is an MxN linear
operator and X dense N*K matrix or ndarray.
Parameters
----------
X : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the X argument.
Notes
-----
This matmat wraps any user-specified matmat routine or overridden
_matmat method to ensure that y has the correct type.
"""
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
% X.ndim)
M,N = self.shape
if X.shape[0] != N:
raise ValueError('dimension mismatch: %r, %r'
% (self.shape, X.shape))
Y = self._matmat(X)
if isinstance(Y, np.matrix):
Y = np.asmatrix(Y)
return Y
def __call__(self, x):
return self*x
def __mul__(self, x):
return self.dot(x)
def dot(self, x):
"""Matrix-matrix or matrix-vector multiplication.
Parameters
----------
x : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
Ax : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x.
"""
if isinstance(x, LinearOperator):
return _ProductLinearOperator(self, x)
elif np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
x = np.asarray(x)
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
return self.matvec(x)
elif x.ndim == 2:
return self.matmat(x)
else:
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
% x)
def __matmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__rmul__(other)
def __rmul__(self, x):
if np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
return NotImplemented
def __pow__(self, p):
if np.isscalar(p):
return _PowerLinearOperator(self, p)
else:
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
else:
return NotImplemented
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M,N = self.shape
if self.dtype is None:
dt = 'unspecified dtype'
else:
dt = 'dtype=' + str(self.dtype)
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
def adjoint(self):
"""Hermitian adjoint.
Returns the Hermitian adjoint of self, aka the Hermitian
conjugate or Hermitian transpose. For a complex matrix, the
Hermitian adjoint is equal to the conjugate transpose.
Can be abbreviated self.H instead of self.adjoint().
Returns
-------
A_H : LinearOperator
Hermitian adjoint of self.
"""
return self._adjoint()
H = property(adjoint)
def transpose(self):
"""Transpose this linear operator.
Returns a LinearOperator that represents the transpose of this one.
Can be abbreviated self.T instead of self.transpose().
"""
return self._transpose()
T = property(transpose)
def _adjoint(self):
"""Default implementation of _adjoint; defers to rmatvec."""
shape = (self.shape[1], self.shape[0])
return _CustomLinearOperator(shape, matvec=self.rmatvec,
rmatvec=self.matvec,
dtype=self.dtype)
class _CustomLinearOperator(LinearOperator):
"""Linear operator defined in terms of user-specified operations."""
def __init__(self, shape, matvec, rmatvec=None, matmat=None, dtype=None):
super(_CustomLinearOperator, self).__init__(dtype, shape)
self.args = ()
self.__matvec_impl = matvec
self.__rmatvec_impl = rmatvec
self.__matmat_impl = matmat
self._init_dtype()
def _matmat(self, X):
if self.__matmat_impl is not None:
return self.__matmat_impl(X)
else:
return super(_CustomLinearOperator, self)._matmat(X)
def _matvec(self, x):
return self.__matvec_impl(x)
def _rmatvec(self, x):
func = self.__rmatvec_impl
if func is None:
raise NotImplementedError("rmatvec is not defined")
return self.__rmatvec_impl(x)
def _adjoint(self):
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
matvec=self.__rmatvec_impl,
rmatvec=self.__matvec_impl,
dtype=self.dtype)
def _get_dtype(operators, dtypes=None):
if dtypes is None:
dtypes = []
for obj in operators:
if obj is not None and hasattr(obj, 'dtype'):
dtypes.append(obj.dtype)
return np.find_common_type(dtypes, [])
class _SumLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError('cannot add %r and %r: shape mismatch'
% (A, B))
self.args = (A, B)
super(_SumLinearOperator, self).__init__(_get_dtype([A, B]), A.shape)
def _matvec(self, x):
return self.args[0].matvec(x) + self.args[1].matvec(x)
def _rmatvec(self, x):
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
def _matmat(self, x):
return self.args[0].matmat(x) + self.args[1].matmat(x)
def _adjoint(self):
A, B = self.args
return A.H + B.H
class _ProductLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape[1] != B.shape[0]:
raise ValueError('cannot multiply %r and %r: shape mismatch'
% (A, B))
super(_ProductLinearOperator, self).__init__(_get_dtype([A, B]),
(A.shape[0], B.shape[1]))
self.args = (A, B)
def _matvec(self, x):
return self.args[0].matvec(self.args[1].matvec(x))
def _rmatvec(self, x):
return self.args[1].rmatvec(self.args[0].rmatvec(x))
def _matmat(self, x):
return self.args[0].matmat(self.args[1].matmat(x))
def _adjoint(self):
A, B = self.args
return B.H * A.H
class _ScaledLinearOperator(LinearOperator):
def __init__(self, A, alpha):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if not np.isscalar(alpha):
raise ValueError('scalar expected as alpha')
dtype = _get_dtype([A], [type(alpha)])
super(_ScaledLinearOperator, self).__init__(dtype, A.shape)
self.args = (A, alpha)
def _matvec(self, x):
return self.args[1] * self.args[0].matvec(x)
def _rmatvec(self, x):
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
def _matmat(self, x):
return self.args[1] * self.args[0].matmat(x)
def _adjoint(self):
A, alpha = self.args
return A.H * alpha
class _PowerLinearOperator(LinearOperator):
def __init__(self, A, p):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if A.shape[0] != A.shape[1]:
raise ValueError('square LinearOperator expected, got %r' % A)
if not isintlike(p) or p < 0:
raise ValueError('non-negative integer expected as p')
super(_PowerLinearOperator, self).__init__(_get_dtype([A]), A.shape)
self.args = (A, p)
def _power(self, fun, x):
res = np.array(x, copy=True)
for i in range(self.args[1]):
res = fun(res)
return res
def _matvec(self, x):
return self._power(self.args[0].matvec, x)
def _rmatvec(self, x):
return self._power(self.args[0].rmatvec, x)
def _matmat(self, x):
return self._power(self.args[0].matmat, x)
def _adjoint(self):
A, p = self.args
return A.H ** p
class MatrixLinearOperator(LinearOperator):
def __init__(self, A):
super(MatrixLinearOperator, self).__init__(A.dtype, A.shape)
self.A = A
self.__adj = None
self.args = (A,)
def _matmat(self, X):
return self.A.dot(X)
def _adjoint(self):
if self.__adj is None:
self.__adj = _AdjointMatrixOperator(self)
return self.__adj
class _AdjointMatrixOperator(MatrixLinearOperator):
def __init__(self, adjoint):
self.A = adjoint.A.T.conj()
self.__adjoint = adjoint
self.args = (adjoint,)
self.shape = adjoint.shape[1], adjoint.shape[0]
@property
def dtype(self):
return self.__adjoint.dtype
def _adjoint(self):
return self.__adjoint
class IdentityOperator(LinearOperator):
def __init__(self, shape, dtype=None):
super(IdentityOperator, self).__init__(dtype, shape)
def _matvec(self, x):
return x
def _rmatvec(self, x):
return x
def _matmat(self, x):
return x
def _adjoint(self):
return self
def aslinearoperator(A):
"""Return A as a LinearOperator.
'A' may be any of the following types:
- ndarray
- matrix
- sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
- LinearOperator
- An object with .shape and .matvec attributes
See the LinearOperator documentation for additional information.
Notes
-----
If 'A' has no .dtype attribute, the data type is determined by calling
:func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
call upon the linear operator creation.
Examples
--------
>>> from scipy.sparse.linalg import aslinearoperator
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
>>> aslinearoperator(M)
<2x3 MatrixLinearOperator with dtype=int32>
"""
if isinstance(A, LinearOperator):
return A
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
if A.ndim > 2:
raise ValueError('array must have ndim <= 2')
A = np.atleast_2d(np.asarray(A))
return MatrixLinearOperator(A)
elif isspmatrix(A):
return MatrixLinearOperator(A)
else:
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
rmatvec = None
dtype = None
if hasattr(A, 'rmatvec'):
rmatvec = A.rmatvec
if hasattr(A, 'dtype'):
dtype = A.dtype
return LinearOperator(A.shape, A.matvec,
rmatvec=rmatvec, dtype=dtype)
else:
raise TypeError('type not understood')
| 21,661 | 29.901569 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('eigen',parent_package,top_path)
config.add_subpackage(('arpack'))
config.add_subpackage(('lobpcg'))
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 453 | 24.222222 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/__init__.py
|
"""
Sparse Eigenvalue Solvers
-------------------------
The submodules of sparse.linalg.eigen:
1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method
"""
from __future__ import division, print_function, absolute_import
from .arpack import *
from .lobpcg import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 439 | 22.157895 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/arpack/setup.py
|
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import get_g77_abi_wrappers, get_sgemv_fix
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
config = Configuration('arpack', parent_package, top_path)
arpack_sources = [join('ARPACK','SRC', '*.f')]
arpack_sources.extend([join('ARPACK','UTIL', '*.f')])
arpack_sources += get_g77_abi_wrappers(lapack_opt)
config.add_library('arpack_scipy', sources=arpack_sources,
include_dirs=[join('ARPACK', 'SRC')])
ext_sources = ['arpack.pyf.src']
ext_sources += get_sgemv_fix(lapack_opt)
config.add_extension('_arpack',
sources=ext_sources,
libraries=['arpack_scipy'],
extra_info=lapack_opt,
depends=arpack_sources,
)
config.add_data_dir('tests')
# Add license files
config.add_data_files('ARPACK/COPYING')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,407 | 29.608696 | 70 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/arpack/__init__.py
|
"""
Eigenvalue solver using iterative methods.
Find k eigenvectors and eigenvalues of a matrix A using the
Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
These methods are most useful for large sparse matrices.
- eigs(A,k)
- eigsh(A,k)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
from __future__ import division, print_function, absolute_import
from .arpack import *
| 628 | 26.347826 | 71 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.py
|
"""
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
from . import _arpack
import numpy as np
import warnings
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import eye, issparse, isspmatrix, isspmatrix_csr
from scipy.linalg import eig, eigh, lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
from scipy._lib._util import _aligned_zeros
from scipy._lib._threadsafety import ReentrancyLock
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error"
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"
}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
def choose_ncv(k):
"""
Choose number of lanczos vectors based on target number
of singular/eigen values and vectors to compute, k.
"""
return max(2 * k + 1, 20)
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
# ARPACK will use a random initial vector.
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = choose_ncv(k)
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than ndim(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.workd = _aligned_zeros(3 * n, self.tp)
self.workl = _aligned_zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than ndim(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.workd = _aligned_zeros(3 * n, self.tp)
self.workl = _aligned_zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.rwork = _aligned_zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(return_eigenvectors,
howmny, sselect, sigmar, sigmai, workev,
self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr,
self.workd, self.workl, self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(return_eigenvectors,
howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr,
self.workd, self.workl, self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
self.shape = M.shape
self.dtype = M.dtype
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
x = np.asarray(x)
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x).astype(self.dtype))
+ 1j * self.M_lu.solve(np.imag(x).astype(self.dtype)))
else:
return self.M_lu.solve(x.astype(self.dtype))
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
self.shape = M.shape
self.dtype = M.dtype
def _matvec(self, x):
return lu_solve(self.M_lu, x)
def gmres_loose(A, b, tol):
"""
gmres with looser termination condition.
"""
b = np.asarray(b)
min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps
return gmres(A, b, tol=max(tol, min_tol), atol=0)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres_loose, tol=0):
self.M = M
if hasattr(M, 'dtype'):
self.dtype = M.dtype
else:
x = np.zeros(M.shape[1])
self.dtype = (M * x).dtype
self.shape = M.shape
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = 2 * np.finfo(self.dtype).eps
self.ifunc = ifunc
self.tol = tol
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0):
self.A = A
self.M = M
self.sigma = sigma
def mult_func(x):
return A.matvec(x) - sigma * M.matvec(x)
def mult_func_M_None(x):
return A.matvec(x) - sigma * x
x = np.zeros(A.shape[1])
if M is None:
dtype = mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
mult_func_M_None,
dtype=dtype)
else:
dtype = mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
mult_func,
dtype=dtype)
self.shape = A.shape
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = 2 * np.finfo(self.OP.dtype).eps
self.ifunc = ifunc
self.tol = tol
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
@property
def dtype(self):
return self.OP.dtype
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * eye(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A),
M, sigma, tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M),
sigma, tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
# ARPACK is not threadsafe or reentrant (SAVE variables), so we need a
# lock and a re-entering check.
_ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: "
"ARPACK is not re-entrant")
def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None,
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
Minv=None, OPinv=None, OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : ndarray, sparse matrix or LinearOperator
An array, sparse matrix, or LinearOperator representing
the operation ``A * x``, where A is a real or complex square matrix.
k : int, optional
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N-1. It is not possible to compute all
eigenvectors of a matrix.
M : ndarray, sparse matrix or LinearOperator, optional
An array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
A * x = w * M * x.
M must represent a real, symmetric matrix if A is real, and must
represent a complex, hermitian matrix if A is complex. For best
results, the data type of M should be the same as that of A.
Additionally:
If `sigma` is None, M is positive definite
If sigma is specified, M is positive semi-definite
If sigma is None, eigs requires an operator to compute the solution
of the linear equation ``M * x = b``. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
``x = Minv * b = M^-1 * b``.
sigma : real or complex, optional
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
``[A - sigma * M] * x = b``, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives ``x = OPinv * b = [A - sigma * M]^-1 * b``.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues ``w'[i]`` where:
If A is real and OPpart == 'r' (default),
``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``.
If A is real and OPpart == 'i',
``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``.
If A is complex, ``w'[i] = 1/(w[i]-sigma)``.
v0 : ndarray, optional
Starting vector for iteration.
Default: random
ncv : int, optional
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
Default: ``min(n, max(2*k + 1, 20))``
which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional
Which `k` eigenvectors and eigenvalues to find:
'LM' : largest magnitude
'SM' : smallest magnitude
'LR' : largest real part
'SR' : smallest real part
'LI' : largest imaginary part
'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed
Default: ``n*10``
tol : float, optional
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : bool, optional
Return eigenvectors (True) in addition to eigenvalues
Minv : ndarray, sparse matrix or LinearOperator, optional
See notes in M, above.
OPinv : ndarray, sparse matrix or LinearOperator, optional
See notes in sigma, above.
OPpart : {'r' or 'i'}, optional
See notes in sigma, above
Returns
-------
w : ndarray
Array of k eigenvalues.
v : ndarray
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> import scipy.sparse as sparse
>>> id = np.eye(13)
>>> vals, vecs = sparse.linalg.eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0:
raise ValueError("k=%d must be greater than 0." % k)
if k >= n - 1:
warnings.warn("k >= N - 1 for N * N square matrix. "
"Attempting to use scipy.linalg.eig instead.",
RuntimeWarning)
if issparse(A):
raise TypeError("Cannot use scipy.linalg.eig for sparse A with "
"k >= N - 1. Use scipy.linalg.eig(A.toarray()) or"
" reduce k.")
if isinstance(A, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
"A with k >= N - 1.")
if isinstance(M, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
"M with k >= N - 1.")
return eig(A, b=M, right=return_eigenvectors)
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
with _ARPACK_LOCK:
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None,
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
Minv=None, OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : int, optional
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues
v : array
An array representing the `k` eigenvectors. The column ``v[:, i]`` is
the eigenvector corresponding to the eigenvalue ``w[i]``.
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
A * x = w * M * x.
M must represent a real, symmetric matrix if A is real, and must
represent a complex, hermitian matrix if A is complex. For best
results, the data type of M should be the same as that of A.
Additionally:
If sigma is None, M is symmetric positive definite
If sigma is specified, M is symmetric positive semi-definite
In buckling mode, M is symmetric indefinite.
If sigma is None, eigsh requires an operator to compute the solution
of the linear equation ``M * x = b``. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
``x = Minv * b = M^-1 * b``.
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives ``x = OPinv * b = [A - sigma * M]^-1 * b``.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues ``w'[i]`` where:
if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``.
if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``.
if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``.
(see further discussion in 'mode' below)
v0 : ndarray, optional
Starting vector for iteration.
Default: random
ncv : int, optional
The number of Lanczos vectors generated ncv must be greater than k and
smaller than n; it is recommended that ``ncv > 2*k``.
Default: ``min(n, max(2*k + 1, 20))``
which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find:
'LM' : Largest (in magnitude) eigenvalues
'SM' : Smallest (in magnitude) eigenvalues
'LA' : Largest (algebraic) eigenvalues
'SA' : Smallest (algebraic) eigenvalues
'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end.
When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]``
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed
Default: ``n*10``
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : bool
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
'normal' :
OP = [A - sigma * M]^-1 * M,
B = M,
w'[i] = 1 / (w[i] - sigma)
'buckling' :
OP = [A - sigma * M]^-1 * A,
B = A,
w'[i] = w[i] / (w[i] - sigma)
'cayley' :
OP = [A - sigma * M]^-1 * [A + sigma * M],
B = M,
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
Examples
--------
>>> import scipy.sparse as sparse
>>> id = np.eye(13)
>>> vals, vecs = sparse.linalg.eigsh(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0:
raise ValueError("k must be greater than 0.")
if k >= n:
warnings.warn("k >= N for N * N square matrix. "
"Attempting to use scipy.linalg.eigh instead.",
RuntimeWarning)
if issparse(A):
raise TypeError("Cannot use scipy.linalg.eigh for sparse A with "
"k >= N. Use scipy.linalg.eigh(A.toarray()) or"
" reduce k.")
if isinstance(A, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
"A with k >= N.")
if isinstance(M, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
"M with k >= N.")
return eigh(A, b=M, eigvals_only=not return_eigenvectors)
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
with _ARPACK_LOCK:
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _augmented_orthonormal_cols(x, k):
# extract the shape of the x array
n, m = x.shape
# create the expanded array and copy x into it
y = np.empty((n, m+k), dtype=x.dtype)
y[:, :m] = x
# do some modified gram schmidt to add k random orthonormal vectors
for i in range(k):
# sample a random initial vector
v = np.random.randn(n)
if np.iscomplexobj(x):
v = v + 1j*np.random.randn(n)
# subtract projections onto the existing unit length vectors
for j in range(m+i):
u = y[:, j]
v -= (np.dot(v, u.conj()) / np.dot(u, u.conj())) * u
# normalize v
v /= np.sqrt(np.dot(v, v.conj()))
# add v into the output array
y[:, m+i] = v
# return the expanded array
return y
def _augmented_orthonormal_rows(x, k):
return _augmented_orthonormal_cols(x.T, k).T
def _herm(x):
return x.T.conj()
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True):
"""Compute the largest k singular values/vectors for a sparse matrix.
Parameters
----------
A : {sparse matrix, LinearOperator}
Array to compute the SVD on, of shape (M, N)
k : int, optional
Number of singular values and vectors to compute.
Must be 1 <= k < min(A.shape).
ncv : int, optional
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
Default: ``min(n, max(2*k + 1, 20))``
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : str, ['LM' | 'SM'], optional
Which `k` singular values to find:
- 'LM' : largest singular values
- 'SM' : smallest singular values
.. versionadded:: 0.12.0
v0 : ndarray, optional
Starting vector for iteration, of length min(A.shape). Should be an
(approximate) left singular vector if N > M and a right singular
vector otherwise.
Default: random
.. versionadded:: 0.12.0
maxiter : int, optional
Maximum number of iterations.
.. versionadded:: 0.12.0
return_singular_vectors : bool or str, optional
- True: return singular vectors (True) in addition to singular values.
.. versionadded:: 0.12.0
- "u": only return the u matrix, without computing vh (if N > M).
- "vh": only return the vh matrix, without computing u (if N <= M).
.. versionadded:: 0.16.0
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
If `return_singular_vectors` is "vh", this variable is not computed,
and None is returned instead.
s : ndarray, shape=(k,)
The singular values.
vt : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
If `return_singular_vectors` is "u", this variable is not computed,
and None is returned instead.
Notes
-----
This is a naive implementation using ARPACK as an eigensolver
on A.H * A or A * A.H, depending on which one is more efficient.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import svds, eigs
>>> A = csc_matrix([[1, 0, 0], [5, 0, 2], [0, -1, 0], [0, 0, 3]], dtype=float)
>>> u, s, vt = svds(A, k=2)
>>> s
array([ 2.75193379, 5.6059665 ])
>>> np.sqrt(eigs(A.dot(A.T), k=2)[0]).real
array([ 5.6059665 , 2.75193379])
"""
if not (isinstance(A, LinearOperator) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if k <= 0 or k >= min(n, m):
raise ValueError("k must be between 1 and min(A.shape), k=%d" % k)
if isinstance(A, LinearOperator):
if n > m:
X_dot = A.matvec
X_matmat = A.matmat
XH_dot = A.rmatvec
else:
X_dot = A.rmatvec
XH_dot = A.matvec
dtype = getattr(A, 'dtype', None)
if dtype is None:
dtype = A.dot(np.zeros([m,1])).dtype
# A^H * V; works around lack of LinearOperator.adjoint.
# XXX This can be slow!
def X_matmat(V):
out = np.empty((V.shape[1], m), dtype=dtype)
for i, col in enumerate(V.T):
out[i, :] = A.rmatvec(col.reshape(-1, 1)).T
return out.T
else:
if n > m:
X_dot = X_matmat = A.dot
XH_dot = _herm(A).dot
else:
XH_dot = A.dot
X_dot = X_matmat = _herm(A).dot
def matvec_XH_X(x):
return XH_dot(X_dot(x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
shape=(min(A.shape), min(A.shape)))
# Get a low rank approximation of the implicitly defined gramian matrix.
# This is not a stable way to approach the problem.
eigvals, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
ncv=ncv, which=which, v0=v0)
# In 'LM' mode try to be clever about small eigenvalues.
# Otherwise in 'SM' mode do not try to be clever.
if which == 'LM':
# Gramian matrices have real non-negative eigenvalues.
eigvals = np.maximum(eigvals.real, 0)
# Use the sophisticated detection of small eigenvalues from pinvh.
t = eigvec.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
cutoff = cond * np.max(eigvals)
# Get a mask indicating which eigenpairs are not degenerately tiny,
# and create the re-ordered array of thresholded singular values.
above_cutoff = (eigvals > cutoff)
nlarge = above_cutoff.sum()
nsmall = k - nlarge
slarge = np.sqrt(eigvals[above_cutoff])
s = np.zeros_like(eigvals)
s[:nlarge] = slarge
if not return_singular_vectors:
return s
if n > m:
vlarge = eigvec[:, above_cutoff]
ularge = X_matmat(vlarge) / slarge if return_singular_vectors != 'vh' else None
vhlarge = _herm(vlarge)
else:
ularge = eigvec[:, above_cutoff]
vhlarge = _herm(X_matmat(ularge) / slarge) if return_singular_vectors != 'u' else None
u = _augmented_orthonormal_cols(ularge, nsmall) if ularge is not None else None
vh = _augmented_orthonormal_rows(vhlarge, nsmall) if vhlarge is not None else None
elif which == 'SM':
s = np.sqrt(eigvals)
if not return_singular_vectors:
return s
if n > m:
v = eigvec
u = X_matmat(v) / s if return_singular_vectors != 'vh' else None
vh = _herm(v)
else:
u = eigvec
vh = _herm(X_matmat(u) / s) if return_singular_vectors != 'u' else None
else:
raise ValueError("which must be either 'LM' or 'SM'.")
return u, s, vh
| 72,877 | 38.055734 | 98 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py
|
from __future__ import division, print_function, absolute_import
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import threading
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal_nulp,
assert_equal, assert_array_equal)
from pytest import raises as assert_raises
import pytest
from numpy import dot, conj, random
from scipy.linalg import eig, eigh, hilbert, svd
from scipy.sparse import csc_matrix, csr_matrix, isspmatrix, diags
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence, arpack
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
from scipy._lib._numpy_compat import suppress_warnings
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N,N))
if complex:
M = M + 1j * np.random.random((N,N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i,j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.where(i == j)
j[ind] = (j[ind] + 1) % N
M[i,j] = 0
M[j,i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i,j] = 0
return M
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
M = np.random.random((N, N))
M = 0.5 * (M + M.T) # Make M symmetric
if pos_definite:
Id = N * np.eye(N)
if sparse:
M = csr_matrix(M)
M += Id
else:
if sparse:
M = csr_matrix(M)
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eval, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eval, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eval - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eval - sigma)
+ 1. / (eval - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eval - sigma)
- 1. / (eval - np.conj(sigma)))
elif mode == 'cayley':
reval = (eval + sigma) / (eval - sigma)
elif mode == 'buckling':
reval = eval / (eval - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ.lower())
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eval, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eval, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eval, typ, k, which,
sigma, OPpart, mode)
eval = eval[ind]
evec = evec[:,ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eval * np.dot(b, evec)
else:
RHS = eval * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
eval_evec(symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# XXX: this test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
#----------------------------------------------------------------------
# sparse SVD tests
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if isspmatrix(m):
m = m.todense()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError("unknown which=%r" % (which,))
return u[:, ii], s[ii], vh[ii]
def svd_estimate(u, s, vh):
return np.dot(u, np.dot(np.diag(s), vh))
def svd_test_input_check():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
assert_raises(ValueError, svds, x, k=-1)
assert_raises(ValueError, svds, x, k=0)
assert_raises(ValueError, svds, x, k=10)
assert_raises(ValueError, svds, x, k=x.shape[0])
assert_raises(ValueError, svds, x, k=x.shape[1])
assert_raises(ValueError, svds, x.T, k=x.shape[0])
assert_raises(ValueError, svds, x.T, k=x.shape[1])
def test_svd_simple_real():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
y = np.array([[1, 2, 3, 8],
[3, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], float)
z = csc_matrix(x)
for m in [x.T, x, y, z, z.T]:
for k in range(1, min(m.shape)):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_simple_complex():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1 + 1j, 0, 2],
[0, 0, 1]], complex)
y = np.array([[1, 2, 3, 8 + 5j],
[3 - 2j, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], complex)
z = csc_matrix(x)
for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]:
for k in range(1, min(m.shape) - 1):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_maxiter():
# check that maxiter works as expected
x = hilbert(6)
# ARPACK shouldn't converge on such an ill-conditioned matrix with just
# one iteration
assert_raises(ArpackNoConvergence, svds, x, 1, maxiter=1, ncv=3)
# but 100 iterations should be more than enough
u, s, vt = svds(x, 1, maxiter=100, ncv=3)
assert_allclose(s, [1.7], atol=0.5)
def test_svd_return():
# check that the return_singular_vectors parameter works as expected
x = hilbert(6)
_, s, _ = sorted_svd(x, 2)
ss = svds(x, 2, return_singular_vectors=False)
assert_allclose(s, ss)
def test_svd_which():
# check that the which parameter works as expected
x = hilbert(6)
for which in ['LM', 'SM']:
_, s, _ = sorted_svd(x, 2, which=which)
ss = svds(x, 2, which=which, return_singular_vectors=False)
ss.sort()
assert_allclose(s, ss, atol=np.sqrt(1e-15))
def test_svd_v0():
# check that the v0 parameter works as expected
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], float)
u, s, vh = svds(x, 1)
u2, s2, vh2 = svds(x, 1, v0=u[:,0])
assert_allclose(s, s2, atol=np.sqrt(1e-15))
def _check_svds(A, k, U, s, VH):
n, m = A.shape
# Check shapes.
assert_equal(U.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(VH.shape, (k, m))
# Check that the original matrix can be reconstituted.
A_rebuilt = (U*s).dot(VH)
assert_equal(A_rebuilt.shape, A.shape)
assert_allclose(A_rebuilt, A)
# Check that U is a semi-orthogonal matrix.
UH_U = np.dot(U.T.conj(), U)
assert_equal(UH_U.shape, (k, k))
assert_allclose(UH_U, np.identity(k), atol=1e-12)
# Check that V is a semi-orthogonal matrix.
VH_V = np.dot(VH, VH.T.conj())
assert_equal(VH_V.shape, (k, k))
assert_allclose(VH_V, np.identity(k), atol=1e-12)
def test_svd_LM_ones_matrix():
# Check that svds can deal with matrix_rank less than k in LM mode.
k = 3
for n, m in (6, 5), (5, 5), (5, 6):
for t in float, complex:
A = np.ones((n, m), dtype=t)
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the largest singular value is near sqrt(n*m)
# and the other singular values have been forced to zero.
assert_allclose(np.max(s), np.sqrt(n*m))
assert_array_equal(sorted(s)[:-1], 0)
def test_svd_LM_zeros_matrix():
# Check that svds can deal with matrices containing only zeros.
k = 1
for n, m in (3, 4), (4, 4), (4, 3):
for t in float, complex:
A = np.zeros((n, m), dtype=t)
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
def test_svd_LM_zeros_matrix_gh_3452():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/3452
# Note that for complex dype the size of this matrix is too small for k=1.
n, m, k = 4, 2, 1
A = np.zeros((n, m))
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
class CheckingLinearOperator(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def _matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def _rmatvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.T.conjugate().dot(x)
def test_svd_linop():
nmks = [(6, 7, 3),
(9, 5, 4),
(10, 8, 5)]
def reorder(args):
U, s, VH = args
j = np.argsort(s)
return U[:,j], s[j], VH[j,:]
for n, m, k in nmks:
# Test svds on a LinearOperator.
A = np.random.RandomState(52).randn(n, m)
L = CheckingLinearOperator(A)
v0 = np.ones(min(A.shape))
U1, s1, VH1 = reorder(svds(A, k, v0=v0))
U2, s2, VH2 = reorder(svds(L, k, v0=v0))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
# Try again with which="SM".
A = np.random.RandomState(1909).randn(n, m)
L = CheckingLinearOperator(A)
U1, s1, VH1 = reorder(svds(A, k, which="SM"))
U2, s2, VH2 = reorder(svds(L, k, which="SM"))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
if k < min(n, m) - 1:
# Complex input and explicit which="LM".
for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]:
rng = np.random.RandomState(1648)
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
L = CheckingLinearOperator(A)
U1, s1, VH1 = reorder(svds(A, k, which="LM"))
U2, s2, VH2 = reorder(svds(L, k, which="LM"))
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
assert_allclose(s1, s2, rtol=eps)
assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)), rtol=eps)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_matrix(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_svds_partial_return():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
# test vertical matrix
z = csr_matrix(x)
vh_full = svds(z, 2)[-1]
vh_partial = svds(z, 2, return_singular_vectors='vh')[-1]
dvh = np.linalg.norm(np.abs(vh_full) - np.abs(vh_partial))
if dvh > 1e-10:
raise AssertionError('right eigenvector matrices differ when using return_singular_vectors parameter')
if svds(z, 2, return_singular_vectors='vh')[0] is not None:
raise AssertionError('left eigenvector matrix was computed when it should not have been')
# test horizontal matrix
z = csr_matrix(x.T)
u_full = svds(z, 2)[0]
u_partial = svds(z, 2, return_singular_vectors='vh')[0]
du = np.linalg.norm(np.abs(u_full) - np.abs(u_partial))
if du > 1e-10:
raise AssertionError('left eigenvector matrices differ when using return_singular_vectors parameter')
if svds(z, 2, return_singular_vectors='u')[-1] is not None:
raise AssertionError('right eigenvector matrix was computed when it should not have been')
def test_svds_wrong_eigen_type():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/4590
# Function was not checking for eigenvalue type and unintended
# values could be returned.
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
assert_raises(ValueError, svds, x, 1, which='LA')
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags([w0], [0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = np.random.random((4, 4))
M_sparse = generate_matrix(4, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = generate_matrix_symmetric(4, pos_definite=True)
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
| 31,498 | 31.60766 | 110 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/arpack/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py
|
"""
Pure SciPy implementation of Locally Optimal Block Preconditioned Conjugate
Gradient Method (LOBPCG), see
https://bitbucket.org/joseroman/blopex
License: BSD
Authors: Robert Cimrman, Andrew Knyazev
Examples in tests directory contributed by Nils Wagner.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import assert_allclose
from scipy._lib.six import xrange
from scipy.linalg import inv, eigh, cho_factor, cho_solve, cholesky
from scipy.sparse.linalg import aslinearoperator, LinearOperator
__all__ = ['lobpcg']
def pause():
# Used only when verbosity level > 10.
input()
def save(ar, fileName):
# Used only when verbosity level > 10.
from numpy import savetxt
savetxt(fileName, ar, precision=8)
def _assert_symmetric(M, rtol=1e-5, atol=1e-8):
assert_allclose(M.T.conj(), M, rtol=rtol, atol=atol)
##
# 21.05.2007, c
def as2d(ar):
"""
If the input array is 2D return it, if it is 1D, append a dimension,
making it a column vector.
"""
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _makeOperator(operatorInput, expectedShape):
"""Takes a dense numpy array or a sparse matrix or
a function and makes an operator performing matrix * blockvector
products.
Examples
--------
>>> A = _makeOperator( arrayA, (n, n) )
>>> vectorB = A( vectorX )
"""
if operatorInput is None:
def ident(x):
return x
operator = LinearOperator(expectedShape, ident, matmat=ident)
else:
operator = aslinearoperator(operatorInput)
if operator.shape != expectedShape:
raise ValueError('operator has invalid shape')
return operator
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
"""Changes blockVectorV in place."""
gramYBV = np.dot(blockVectorBY.T.conj(), blockVectorV)
tmp = cho_solve(factYBY, gramYBV)
blockVectorV -= np.dot(blockVectorY, tmp)
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
if blockVectorBV is None:
if B is not None:
blockVectorBV = B(blockVectorV)
else:
blockVectorBV = blockVectorV # Shared data!!!
gramVBV = np.dot(blockVectorV.T.conj(), blockVectorBV)
gramVBV = cholesky(gramVBV)
gramVBV = inv(gramVBV, overwrite_a=True)
# gramVBV is now R^{-1}.
blockVectorV = np.dot(blockVectorV, gramVBV)
if B is not None:
blockVectorBV = np.dot(blockVectorBV, gramVBV)
if retInvR:
return blockVectorV, blockVectorBV, gramVBV
else:
return blockVectorV, blockVectorBV
def lobpcg(A, X,
B=None, M=None, Y=None,
tol=None, maxiter=20,
largest=True, verbosityLevel=0,
retLambdaHistory=False, retResidualNormsHistory=False):
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive
definite (SPD) generalized eigenproblems.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The symmetric linear operator of the problem, usually a
sparse matrix. Often called the "stiffness matrix".
X : array_like
Initial approximation to the k eigenvectors. If A has
shape=(n,n) then X should have shape shape=(n,k).
B : {dense matrix, sparse matrix, LinearOperator}, optional
the right hand side operator in a generalized eigenproblem.
by default, B = Identity
often called the "mass matrix"
M : {dense matrix, sparse matrix, LinearOperator}, optional
preconditioner to A; by default M = Identity
M should approximate the inverse of A
Y : array_like, optional
n-by-sizeY matrix of constraints, sizeY < n
The iterations will be performed in the B-orthogonal complement
of the column-space of Y. Y must be full rank.
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors. V has the same shape as X.
Other Parameters
----------------
tol : scalar, optional
Solver tolerance (stopping criterion)
by default: tol=n*sqrt(eps)
maxiter : integer, optional
maximum number of iterations
by default: maxiter=min(n,20)
largest : bool, optional
when True, solve for the largest eigenvalues, otherwise the smallest
verbosityLevel : integer, optional
controls solver output. default: verbosityLevel = 0.
retLambdaHistory : boolean, optional
whether to return eigenvalue history
retResidualNormsHistory : boolean, optional
whether to return history of residual norms
Examples
--------
Solve A x = lambda B x with constraints and preconditioning.
>>> from scipy.sparse import spdiags, issparse
>>> from scipy.sparse.linalg import lobpcg, LinearOperator
>>> n = 100
>>> vals = [np.arange(n, dtype=np.float64) + 1]
>>> A = spdiags(vals, 0, n, n)
>>> A.toarray()
array([[ 1., 0., 0., ..., 0., 0., 0.],
[ 0., 2., 0., ..., 0., 0., 0.],
[ 0., 0., 3., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 98., 0., 0.],
[ 0., 0., 0., ..., 0., 99., 0.],
[ 0., 0., 0., ..., 0., 0., 100.]])
Constraints.
>>> Y = np.eye(n, 3)
Initial guess for eigenvectors, should have linearly independent
columns. Column dimension = number of requested eigenvalues.
>>> X = np.random.rand(n, 3)
Preconditioner -- inverse of A (as an abstract linear operator).
>>> invA = spdiags([1./vals[0]], 0, n, n)
>>> def precond( x ):
... return invA * x
>>> M = LinearOperator(matvec=precond, shape=(n, n), dtype=float)
Here, ``invA`` could of course have been used directly as a preconditioner.
Let us then solve the problem:
>>> eigs, vecs = lobpcg(A, X, Y=Y, M=M, tol=1e-4, maxiter=40, largest=False)
>>> eigs
array([ 4., 5., 6.])
Note that the vectors passed in Y are the eigenvectors of the 3 smallest
eigenvalues. The results returned are orthogonal to those.
Notes
-----
If both retLambdaHistory and retResidualNormsHistory are True,
the return tuple has the following format
(lambda, V, lambda history, residual norms history).
In the following ``n`` denotes the matrix size and ``m`` the number
of required eigenvalues (smallest or largest).
The LOBPCG code internally solves eigenproblems of the size 3``m`` on every
iteration by calling the "standard" dense eigensolver, so if ``m`` is not
small enough compared to ``n``, it does not make sense to call the LOBPCG
code, but rather one should use the "standard" eigensolver,
e.g. numpy or scipy function in this case.
If one calls the LOBPCG algorithm for 5``m``>``n``,
it will most likely break internally, so the code tries to call the standard
function instead.
It is not that n should be large for the LOBPCG to work, but rather the
ratio ``n``/``m`` should be large. It you call the LOBPCG code with ``m``=1
and ``n``=10, it should work, though ``n`` is small. The method is intended
for extremely large ``n``/``m``, see e.g., reference [28] in
http://arxiv.org/abs/0705.2626
The convergence speed depends basically on two factors:
1. How well relatively separated the seeking eigenvalues are
from the rest of the eigenvalues.
One can try to vary ``m`` to make this better.
2. How well conditioned the problem is. This can be changed by using proper
preconditioning. For example, a rod vibration test problem (under tests
directory) is ill-conditioned for large ``n``, so convergence will be
slow, unless efficient preconditioning is used.
For this specific problem, a good simple preconditioner function would
be a linear solve for A, which is easy to code since A is tridiagonal.
*Acknowledgements*
lobpcg.py code was written by Robert Cimrman.
Many thanks belong to Andrew Knyazev, the author of the algorithm,
for lots of advice and support.
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
in hypre and PETSc. http://arxiv.org/abs/0705.2626
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://bitbucket.org/joseroman/blopex
"""
blockVectorX = X
blockVectorY = Y
residualTolerance = tol
maxIterations = maxiter
if blockVectorY is not None:
sizeY = blockVectorY.shape[1]
else:
sizeY = 0
# Block size.
if len(blockVectorX.shape) != 2:
raise ValueError('expected rank-2 array for argument X')
n, sizeX = blockVectorX.shape
if sizeX > n:
raise ValueError('X column dimension exceeds the row dimension')
A = _makeOperator(A, (n,n))
B = _makeOperator(B, (n,n))
M = _makeOperator(M, (n,n))
if (n - sizeY) < (5 * sizeX):
# warn('The problem size is small compared to the block size.' \
# ' Using dense eigensolver instead of LOBPCG.')
if blockVectorY is not None:
raise NotImplementedError('The dense eigensolver '
'does not support constraints.')
# Define the closed range of indices of eigenvalues to return.
if largest:
eigvals = (n - sizeX, n-1)
else:
eigvals = (0, sizeX-1)
A_dense = A(np.eye(n))
B_dense = None if B is None else B(np.eye(n))
return eigh(A_dense, B_dense, eigvals=eigvals, check_finite=False)
if residualTolerance is None:
residualTolerance = np.sqrt(1e-15) * n
maxIterations = min(n, maxIterations)
if verbosityLevel:
aux = "Solving "
if B is None:
aux += "standard"
else:
aux += "generalized"
aux += " eigenvalue problem with"
if M is None:
aux += "out"
aux += " preconditioning\n\n"
aux += "matrix size %d\n" % n
aux += "block size %d\n\n" % sizeX
if blockVectorY is None:
aux += "No constraints\n\n"
else:
if sizeY > 1:
aux += "%d constraints\n\n" % sizeY
else:
aux += "%d constraint\n\n" % sizeY
print(aux)
##
# Apply constraints to X.
if blockVectorY is not None:
if B is not None:
blockVectorBY = B(blockVectorY)
else:
blockVectorBY = blockVectorY
# gramYBY is a dense array.
gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
try:
# gramYBY is a Cholesky factor from now on...
gramYBY = cho_factor(gramYBY)
except:
raise ValueError('cannot handle linearly dependent constraints')
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
##
# B-orthonormalize X.
blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)
##
# Compute the initial Ritz vectors: solve the eigenproblem.
blockVectorAX = A(blockVectorX)
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
ii = np.argsort(_lambda)[:sizeX]
if largest:
ii = ii[::-1]
_lambda = _lambda[ii]
eigBlockVector = np.asarray(eigBlockVector[:,ii])
blockVectorX = np.dot(blockVectorX, eigBlockVector)
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
if B is not None:
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
##
# Active index set.
activeMask = np.ones((sizeX,), dtype=bool)
lambdaHistory = [_lambda]
residualNormsHistory = []
previousBlockSize = sizeX
ident = np.eye(sizeX, dtype=A.dtype)
ident0 = np.eye(sizeX, dtype=A.dtype)
##
# Main iteration loop.
blockVectorP = None # set during iteration
blockVectorAP = None
blockVectorBP = None
for iterationNumber in xrange(maxIterations):
if verbosityLevel > 0:
print('iteration %d' % iterationNumber)
aux = blockVectorBX * _lambda[np.newaxis,:]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)
residualNorms = np.sqrt(aux)
residualNormsHistory.append(residualNorms)
ii = np.where(residualNorms > residualTolerance, True, False)
activeMask = activeMask & ii
if verbosityLevel > 2:
print(activeMask)
currentBlockSize = activeMask.sum()
if currentBlockSize != previousBlockSize:
previousBlockSize = currentBlockSize
ident = np.eye(currentBlockSize, dtype=A.dtype)
if currentBlockSize == 0:
break
if verbosityLevel > 0:
print('current block size:', currentBlockSize)
print('eigenvalue:', _lambda)
print('residual norms:', residualNorms)
if verbosityLevel > 10:
print(eigBlockVector)
activeBlockVectorR = as2d(blockVectorR[:,activeMask])
if iterationNumber > 0:
activeBlockVectorP = as2d(blockVectorP[:,activeMask])
activeBlockVectorAP = as2d(blockVectorAP[:,activeMask])
activeBlockVectorBP = as2d(blockVectorBP[:,activeMask])
if M is not None:
# Apply preconditioner T to the active residuals.
activeBlockVectorR = M(activeBlockVectorR)
##
# Apply constraints to the preconditioned residuals.
if blockVectorY is not None:
_applyConstraints(activeBlockVectorR,
gramYBY, blockVectorBY, blockVectorY)
##
# B-orthonormalize the preconditioned residuals.
aux = _b_orthonormalize(B, activeBlockVectorR)
activeBlockVectorR, activeBlockVectorBR = aux
activeBlockVectorAR = A(activeBlockVectorR)
if iterationNumber > 0:
aux = _b_orthonormalize(B, activeBlockVectorP,
activeBlockVectorBP, retInvR=True)
activeBlockVectorP, activeBlockVectorBP, invR = aux
activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
##
# Perform the Rayleigh Ritz Procedure:
# Compute symmetric Gram matrices:
xaw = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
waw = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
xbw = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
if iterationNumber > 0:
xap = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
wap = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
pap = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
xbp = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
wbp = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
gramA = np.bmat([[np.diag(_lambda), xaw, xap],
[xaw.T.conj(), waw, wap],
[xap.T.conj(), wap.T.conj(), pap]])
gramB = np.bmat([[ident0, xbw, xbp],
[xbw.T.conj(), ident, wbp],
[xbp.T.conj(), wbp.T.conj(), ident]])
else:
gramA = np.bmat([[np.diag(_lambda), xaw],
[xaw.T.conj(), waw]])
gramB = np.bmat([[ident0, xbw],
[xbw.T.conj(), ident]])
_assert_symmetric(gramA)
_assert_symmetric(gramB)
if verbosityLevel > 10:
save(gramA, 'gramA')
save(gramB, 'gramB')
# Solve the generalized eigenvalue problem.
_lambda, eigBlockVector = eigh(gramA, gramB, check_finite=False)
ii = np.argsort(_lambda)[:sizeX]
if largest:
ii = ii[::-1]
if verbosityLevel > 10:
print(ii)
_lambda = _lambda[ii]
eigBlockVector = eigBlockVector[:,ii]
lambdaHistory.append(_lambda)
if verbosityLevel > 10:
print('lambda:', _lambda)
## # Normalize eigenvectors!
## aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
## eigVecNorms = np.sqrt( aux )
## eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:]
# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )
if verbosityLevel > 10:
print(eigBlockVector)
pause()
##
# Compute Ritz vectors.
if iterationNumber > 0:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
if verbosityLevel > 10:
print(pp)
print(app)
print(bpp)
pause()
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
aux = blockVectorBX * _lambda[np.newaxis,:]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)
residualNorms = np.sqrt(aux)
if verbosityLevel > 0:
print('final eigenvalue:', _lambda)
print('final residual norms:', residualNorms)
if retLambdaHistory:
if retResidualNormsHistory:
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
else:
return _lambda, blockVectorX, lambdaHistory
else:
if retResidualNormsHistory:
return _lambda, blockVectorX, residualNormsHistory
else:
return _lambda, blockVectorX
| 19,289 | 32.901582 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/lobpcg/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lobpcg',parent_package,top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 410 | 24.6875 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.py
|
"""
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
Call the function lobpcg - see help for lobpcg.lobpcg.
"""
from __future__ import division, print_function, absolute_import
from .lobpcg import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 486 | 24.631579 | 76 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py
|
""" Test functions for the sparse.linalg.eigen.lobpcg module
"""
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less, assert_)
from scipy import ones, rand, r_, diag, linalg, eye
from scipy.linalg import eig, eigh, toeplitz
import scipy.sparse
from scipy.sparse.linalg.eigen.lobpcg import lobpcg
def ElasticRod(n):
# Fixed-free elastic rod
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1),1])-diag(ones(n-1),1)-diag(ones(n-1),-1))
B = mass*(diag(r_[4.*ones(n-1),2])+diag(ones(n-1),1)+diag(ones(n-1),-1))
return A,B
def MikotaPair(n):
# Mikota pair acts as a nice test since the eigenvalues
# are the squares of the integers n, n=1,2,...
x = np.arange(1,n+1)
B = diag(1./x)
y = np.arange(n-1,0,-1)
z = np.arange(2*n-1,0,-2)
A = diag(z)-diag(y,-1)-diag(y,1)
return A,B
def compare_solutions(A,B,m):
n = A.shape[0]
np.random.seed(0)
V = rand(n,m)
X = linalg.orth(V)
eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30)
eigs.sort()
w,v = eig(A,b=B)
w.sort()
assert_almost_equal(w[:int(m/2)],eigs[:int(m/2)],decimal=2)
def test_Small():
A,B = ElasticRod(10)
compare_solutions(A,B,10)
A,B = MikotaPair(10)
compare_solutions(A,B,10)
def test_ElasticRod():
A,B = ElasticRod(100)
compare_solutions(A,B,20)
def test_MikotaPair():
A,B = MikotaPair(100)
compare_solutions(A,B,20)
def test_trivial():
n = 5
X = ones((n, 1))
A = eye(n)
compare_solutions(A, None, n)
def test_regression():
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, V = lobpcg(A, X)
assert_allclose(w, [1])
def test_diagonal():
# This test was moved from '__main__' in lobpcg.py.
# Coincidentally or not, this is the same eigensystem
# required to reproduce arpack bug
# http://forge.scilab.org/index.php/p/arpack-ng/issues/1397/
# even using the same n=100.
np.random.seed(1234)
# The system of interest is of size n x n.
n = 100
# We care about only m eigenpairs.
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A to be the diagonal matrix whose entries are 1..n
# and where B is chosen to be the identity matrix.
vals = np.arange(1, n+1, dtype=float)
A = scipy.sparse.diags([vals], [0], (n, n))
B = scipy.sparse.eye(n)
# Let the preconditioner M be the inverse of A.
M = scipy.sparse.diags([np.reciprocal(vals)], [0], (n, n))
# Pick random initial vectors.
X = np.random.rand(n, m)
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors.
m_excluded = 3
Y = np.eye(n, m_excluded)
eigs, vecs = lobpcg(A, X, B, M=M, Y=Y, tol=1e-4, maxiter=40, largest=False)
assert_allclose(eigs, np.arange(1+m_excluded, 1+m_excluded+m))
_check_eigen(A, eigs, vecs, rtol=1e-3, atol=1e-3)
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
mult_wV = np.multiply(w, V)
dot_MV = M.dot(V)
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
def _check_fiedler(n, p):
# This is not necessarily the recommended way to find the Fiedler vector.
np.random.seed(1234)
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
def test_fiedler_small_8():
# This triggers the dense path because 8 < 2*5.
_check_fiedler(8, 2)
def test_fiedler_large_12():
# This does not trigger the dense path, because 2*5 <= 12.
_check_fiedler(12, 2)
def test_hermitian():
np.random.seed(1234)
sizes = [3, 10, 50]
ks = [1, 3, 10, 50]
gens = [True, False]
for size, k, gen in itertools.product(sizes, ks, gens):
if k > size:
continue
H = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
H = 10 * np.eye(size) + H + H.T.conj()
X = np.random.rand(size, k)
if not gen:
B = np.eye(size)
w, v = lobpcg(H, X, maxiter=5000)
w0, v0 = eigh(H)
else:
B = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
B = 10 * np.eye(size) + B.dot(B.T.conj())
w, v = lobpcg(H, X, B, maxiter=5000)
w0, v0 = eigh(H, B)
for wx, vx in zip(w, v.T):
# Check eigenvector
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx) / np.linalg.norm(H.dot(vx)),
0, atol=5e-4, rtol=0)
# Compare eigenvalues
j = np.argmin(abs(w0 - wx))
assert_allclose(wx, w0[j], rtol=1e-4)
| 6,709 | 28.173913 | 99 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/tests/test_norm.py
|
"""Test functions for the sparse.linalg.norm module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm as npnorm
from numpy.testing import assert_equal, assert_allclose
from pytest import raises as assert_raises
from scipy._lib._version import NumpyVersion
import scipy.sparse
from scipy.sparse.linalg import norm as spnorm
class TestNorm(object):
def setup_method(self):
a = np.arange(9) - 4
b = a.reshape((3, 3))
self.b = scipy.sparse.csr_matrix(b)
def test_matrix_norm(self):
# Frobenius norm is the default
assert_allclose(spnorm(self.b), 7.745966692414834)
assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834)
assert_allclose(spnorm(self.b, np.inf), 9)
assert_allclose(spnorm(self.b, -np.inf), 2)
assert_allclose(spnorm(self.b, 1), 7)
assert_allclose(spnorm(self.b, -1), 6)
# _multi_svd_norm is not implemented for sparse matrix
assert_raises(NotImplementedError, spnorm, self.b, 2)
assert_raises(NotImplementedError, spnorm, self.b, -2)
def test_matrix_norm_axis(self):
for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))):
assert_allclose(spnorm(m, axis=axis), 7.745966692414834)
assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834)
assert_allclose(spnorm(m, np.inf, axis=axis), 9)
assert_allclose(spnorm(m, -np.inf, axis=axis), 2)
assert_allclose(spnorm(m, 1, axis=axis), 7)
assert_allclose(spnorm(m, -1, axis=axis), 6)
def test_vector_norm(self):
v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398]
for m, a in (self.b, 0), (self.b.T, 1):
for axis in a, (a, ), a-2, (a-2, ):
assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7])
assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4])
assert_allclose(spnorm(m, axis=axis), v)
assert_allclose(spnorm(m, ord=2, axis=axis), v)
assert_allclose(spnorm(m, ord=None, axis=axis), v)
def test_norm_exceptions(self):
m = self.b
assert_raises(TypeError, spnorm, m, None, 1.5)
assert_raises(TypeError, spnorm, m, None, [2])
assert_raises(ValueError, spnorm, m, None, ())
assert_raises(ValueError, spnorm, m, None, (0, 1, 2))
assert_raises(ValueError, spnorm, m, None, (0, 0))
assert_raises(ValueError, spnorm, m, None, (0, 2))
assert_raises(ValueError, spnorm, m, None, (-3, 0))
assert_raises(ValueError, spnorm, m, None, 2)
assert_raises(ValueError, spnorm, m, None, -3)
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0)
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1))
class TestVsNumpyNorm(object):
_sparse_types = (
scipy.sparse.bsr_matrix,
scipy.sparse.coo_matrix,
scipy.sparse.csc_matrix,
scipy.sparse.csr_matrix,
scipy.sparse.dia_matrix,
scipy.sparse.dok_matrix,
scipy.sparse.lil_matrix,
)
_test_matrices = (
(np.arange(9) - 4).reshape((3, 3)),
[
[1, 2, 3],
[-1, 1, 4]],
[
[1, 0, 3],
[-1, 1, 4j]],
)
def test_sparse_matrix_norms(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
assert_allclose(spnorm(S), npnorm(M))
assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro'))
assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf))
assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf))
assert_allclose(spnorm(S, 1), npnorm(M, 1))
assert_allclose(spnorm(S, -1), npnorm(M, -1))
def test_sparse_matrix_norms_with_axis(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
for axis in None, (0, 1), (1, 0):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
for ord in 'fro', np.inf, -np.inf, 1, -1:
assert_allclose(spnorm(S, ord, axis=axis),
npnorm(M, ord, axis=axis))
# Some numpy matrix norms are allergic to negative axes.
for axis in (-2, -1), (-1, -2), (1, -2):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
assert_allclose(spnorm(S, 'f', axis=axis),
npnorm(M, 'f', axis=axis))
assert_allclose(spnorm(S, 'fro', axis=axis),
npnorm(M, 'fro', axis=axis))
def test_sparse_vector_norms(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42:
assert_allclose(spnorm(S, ord, axis=axis),
npnorm(M, ord, axis=axis))
| 5,486 | 41.867188 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/tests/test_matfuncs.py
|
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for scipy.linalg.matfuncs module
"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from numpy import array, eye, exp, random
from numpy.linalg import matrix_power
from numpy.testing import (
assert_allclose, assert_, assert_array_almost_equal, assert_equal,
assert_array_almost_equal_nulp)
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import csc_matrix, SparseEfficiencyWarning
from scipy.sparse.construct import eye as speye
from scipy.sparse.linalg.matfuncs import (expm, _expm,
ProductOperator, MatrixPowerOperator,
_onenorm_matrix_power_nnm)
from scipy.linalg import logm
from scipy.special import factorial, binom
import scipy.sparse
import scipy.sparse.linalg
def _burkardt_13_power(n, p):
"""
A helper function for testing matrix functions.
Parameters
----------
n : integer greater than 1
Order of the square matrix to be returned.
p : non-negative integer
Power of the matrix.
Returns
-------
out : ndarray representing a square matrix
A Forsythe matrix of order n, raised to the power p.
"""
# Input validation.
if n != int(n) or n < 2:
raise ValueError('n must be an integer greater than 1')
n = int(n)
if p != int(p) or p < 0:
raise ValueError('p must be a non-negative integer')
p = int(p)
# Construct the matrix explicitly.
a, b = divmod(p, n)
large = np.power(10.0, -n*a)
small = large * np.power(10.0, -n)
return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)
def test_onenorm_matrix_power_nnm():
np.random.seed(1234)
for n in range(1, 5):
for p in range(5):
M = np.random.random((n, n))
Mp = np.linalg.matrix_power(M, p)
observed = _onenorm_matrix_power_nnm(M, p)
expected = np.linalg.norm(Mp, 1)
assert_allclose(observed, expected)
class TestExpM(object):
def test_zero_ndarray(self):
a = array([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_zero_sparse(self):
a = csc_matrix([[0.,0],[0,0]])
assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])
def test_zero_matrix(self):
a = np.matrix([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_misc_types(self):
A = expm(np.array([[1]]))
assert_allclose(expm(((1,),)), A)
assert_allclose(expm([[1]]), A)
assert_allclose(expm(np.matrix([[1]])), A)
assert_allclose(expm(np.array([[1]])), A)
assert_allclose(expm(csc_matrix([[1]])).A, A)
B = expm(np.array([[1j]]))
assert_allclose(expm(((1j,),)), B)
assert_allclose(expm([[1j]]), B)
assert_allclose(expm(np.matrix([[1j]])), B)
assert_allclose(expm(csc_matrix([[1j]])).A, B)
def test_bidiagonal_sparse(self):
A = csc_matrix([
[1, 3, 0],
[0, 1, 5],
[0, 0, 2]], dtype=float)
e1 = math.exp(1)
e2 = math.exp(2)
expected = np.array([
[e1, 3*e1, 15*(e2 - 2*e1)],
[0, e1, 5*(e2 - e1)],
[0, 0, e2]], dtype=float)
observed = expm(A).toarray()
assert_array_almost_equal(observed, expected)
def test_padecases_dtype_float(self):
for dtype in [np.float32, np.float64]:
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
A = scale * eye(3, dtype=dtype)
observed = expm(A)
expected = exp(scale) * eye(3, dtype=dtype)
assert_array_almost_equal_nulp(observed, expected, nulp=100)
def test_padecases_dtype_complex(self):
for dtype in [np.complex64, np.complex128]:
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
A = scale * eye(3, dtype=dtype)
observed = expm(A)
expected = exp(scale) * eye(3, dtype=dtype)
assert_array_almost_equal_nulp(observed, expected, nulp=100)
def test_padecases_dtype_sparse_float(self):
# float32 and complex64 lead to errors in spsolve/UMFpack
dtype = np.float64
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
a = scale * speye(3, 3, dtype=dtype, format='csc')
e = exp(scale) * eye(3, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a csc_matrix is expensive.")
exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()
inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()
assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)
assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)
def test_padecases_dtype_sparse_complex(self):
# float32 and complex64 lead to errors in spsolve/UMFpack
dtype = np.complex128
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
a = scale * speye(3, 3, dtype=dtype, format='csc')
e = exp(scale) * eye(3, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a csc_matrix is expensive.")
assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
def test_logm_consistency(self):
random.seed(1234)
for dtype in [np.float64, np.complex128]:
for n in range(1, 10):
for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
# make logm(A) be of a given scale
A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
if np.iscomplexobj(A):
A = A + 1j * random.rand(n, n) * scale
assert_array_almost_equal(expm(logm(A)), A)
def test_integer_matrix(self):
Q = np.array([
[-3, 1, 1, 1],
[1, -3, 1, 1],
[1, 1, -3, 1],
[1, 1, 1, -3]])
assert_allclose(expm(Q), expm(1.0 * Q))
def test_triangularity_perturbation(self):
# Experiment (1) of
# Awad H. Al-Mohy and Nicholas J. Higham (2012)
# Improved Inverse Scaling and Squaring Algorithms
# for the Matrix Logarithm.
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.221e-1, 3e4],
[0, 0, 0, 3.0744e-1]],
dtype=float)
A_logm = np.array([
[-1.12867982029050462e+00, 9.61418377142025565e+04,
-4.52485573953179264e+09, 2.92496941103871812e+14],
[0.00000000000000000e+00, -1.20101052953082288e+00,
9.63469687211303099e+04, -4.68104828911105442e+09],
[0.00000000000000000e+00, 0.00000000000000000e+00,
-1.13289322264498393e+00, 9.53249183094775653e+04],
[0.00000000000000000e+00, 0.00000000000000000e+00,
0.00000000000000000e+00, -1.17947533272554850e+00]],
dtype=float)
assert_allclose(expm(A_logm), A, rtol=1e-4)
# Perturb the upper triangular matrix by tiny amounts,
# so that it becomes technically not upper triangular.
random.seed(1234)
tiny = 1e-17
A_logm_perturbed = A_logm.copy()
A_logm_perturbed[1, 0] = tiny
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"scipy.linalg.solve\nIll-conditioned.*")
A_expm_logm_perturbed = expm(A_logm_perturbed)
rtol = 1e-4
atol = 100 * tiny
assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))
def test_burkardt_1(self):
# This matrix is diagonal.
# The calculation of the matrix exponential is simple.
#
# This is the first of a series of matrix exponential tests
# collected by John Burkardt from the following sources.
#
# Alan Laub,
# Review of "Linear System Theory" by Joao Hespanha,
# SIAM Review,
# Volume 52, Number 4, December 2010, pages 779--781.
#
# Cleve Moler and Charles Van Loan,
# Nineteen Dubious Ways to Compute the Exponential of a Matrix,
# Twenty-Five Years Later,
# SIAM Review,
# Volume 45, Number 1, March 2003, pages 3--49.
#
# Cleve Moler,
# Cleve's Corner: A Balancing Act for the Matrix Exponential,
# 23 July 2012.
#
# Robert Ward,
# Numerical computation of the matrix exponential
# with accuracy estimate,
# SIAM Journal on Numerical Analysis,
# Volume 14, Number 4, September 1977, pages 600--610.
exp1 = np.exp(1)
exp2 = np.exp(2)
A = np.array([
[1, 0],
[0, 2],
], dtype=float)
desired = np.array([
[exp1, 0],
[0, exp2],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_2(self):
# This matrix is symmetric.
# The calculation of the matrix exponential is straightforward.
A = np.array([
[1, 3],
[3, 2],
], dtype=float)
desired = np.array([
[39.322809708033859, 46.166301438885753],
[46.166301438885768, 54.711576854329110],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_3(self):
# This example is due to Laub.
# This matrix is ill-suited for the Taylor series approach.
# As powers of A are computed, the entries blow up too quickly.
exp1 = np.exp(1)
exp39 = np.exp(39)
A = np.array([
[0, 1],
[-39, -40],
], dtype=float)
desired = np.array([
[
39/(38*exp1) - 1/(38*exp39),
-np.expm1(-38) / (38*exp1)],
[
39*np.expm1(-38) / (38*exp1),
-1/(38*exp1) + 39/(38*exp39)],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_4(self):
# This example is due to Moler and Van Loan.
# The example will cause problems for the series summation approach,
# as well as for diagonal Pade approximations.
A = np.array([
[-49, 24],
[-64, 31],
], dtype=float)
U = np.array([[3, 1], [4, 2]], dtype=float)
V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)
w = np.array([-17, -1], dtype=float)
desired = np.dot(U * np.exp(w), V)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_5(self):
# This example is due to Moler and Van Loan.
# This matrix is strictly upper triangular
# All powers of A are zero beyond some (low) limit.
# This example will cause problems for Pade approximations.
A = np.array([
[0, 6, 0, 0],
[0, 0, 6, 0],
[0, 0, 0, 6],
[0, 0, 0, 0],
], dtype=float)
desired = np.array([
[1, 6, 18, 36],
[0, 1, 6, 18],
[0, 0, 1, 6],
[0, 0, 0, 1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_6(self):
# This example is due to Moler and Van Loan.
# This matrix does not have a complete set of eigenvectors.
# That means the eigenvector approach will fail.
exp1 = np.exp(1)
A = np.array([
[1, 1],
[0, 1],
], dtype=float)
desired = np.array([
[exp1, exp1],
[0, exp1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_7(self):
# This example is due to Moler and Van Loan.
# This matrix is very close to example 5.
# Mathematically, it has a complete set of eigenvectors.
# Numerically, however, the calculation will be suspect.
exp1 = np.exp(1)
eps = np.spacing(1)
A = np.array([
[1 + eps, 1],
[0, 1 - eps],
], dtype=float)
desired = np.array([
[exp1, exp1],
[0, exp1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_8(self):
# This matrix was an example in Wikipedia.
exp4 = np.exp(4)
exp16 = np.exp(16)
A = np.array([
[21, 17, 6],
[-5, -1, -6],
[4, 4, 16],
], dtype=float)
desired = np.array([
[13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],
[-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],
[16*exp16, 16*exp16, 4*exp16],
], dtype=float) * 0.25
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_9(self):
# This matrix is due to the NAG Library.
# It is an example for function F01ECF.
A = np.array([
[1, 2, 2, 2],
[3, 1, 1, 2],
[3, 2, 1, 2],
[3, 3, 3, 1],
], dtype=float)
desired = np.array([
[740.7038, 610.8500, 542.2743, 549.1753],
[731.2510, 603.5524, 535.0884, 542.2743],
[823.7630, 679.4257, 603.5524, 610.8500],
[998.4355, 823.7630, 731.2510, 740.7038],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_10(self):
# This is Ward's example #1.
# It is defective and nonderogatory.
A = np.array([
[4, 2, 0],
[1, 4, 1],
[1, 1, 4],
], dtype=float)
assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))
desired = np.array([
[147.8666224463699, 183.7651386463682, 71.79703239999647],
[127.7810855231823, 183.7651386463682, 91.88256932318415],
[127.7810855231824, 163.6796017231806, 111.9681062463718],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_11(self):
# This is Ward's example #2.
# It is a symmetric matrix.
A = np.array([
[29.87942128909879, 0.7815750847907159, -2.289519314033932],
[0.7815750847907159, 25.72656945571064, 8.680737820540137],
[-2.289519314033932, 8.680737820540137, 34.39400925519054],
], dtype=float)
assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))
desired = np.array([
[
5.496313853692378E+15,
-1.823188097200898E+16,
-3.047577080858001E+16],
[
-1.823188097200899E+16,
6.060522870222108E+16,
1.012918429302482E+17],
[
-3.047577080858001E+16,
1.012918429302482E+17,
1.692944112408493E+17],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_12(self):
# This is Ward's example #3.
# Ward's algorithm has difficulty estimating the accuracy
# of its results.
A = np.array([
[-131, 19, 18],
[-390, 56, 54],
[-387, 57, 52],
], dtype=float)
assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))
desired = np.array([
[-1.509644158793135, 0.3678794391096522, 0.1353352811751005],
[-5.632570799891469, 1.471517758499875, 0.4060058435250609],
[-4.934938326088363, 1.103638317328798, 0.5413411267617766],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_13(self):
# This is Ward's example #4.
# This is a version of the Forsythe matrix.
# The eigenvector problem is badly conditioned.
# Ward's algorithm has difficulty esimating the accuracy
# of its results for this problem.
#
# Check the construction of one instance of this family of matrices.
A4_actual = _burkardt_13_power(4, 1)
A4_desired = [[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1e-4, 0, 0, 0]]
assert_allclose(A4_actual, A4_desired)
# Check the expm for a few instances.
for n in (2, 3, 4, 10):
# Approximate expm using Taylor series.
# This works well for this matrix family
# because each matrix in the summation,
# even before dividing by the factorial,
# is entrywise positive with max entry 10**(-floor(p/n)*n).
k = max(1, int(np.ceil(16/n)))
desired = np.zeros((n, n), dtype=float)
for p in range(n*k):
Ap = _burkardt_13_power(n, p)
assert_equal(np.min(Ap), 0)
assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))
desired += Ap / factorial(p)
actual = expm(_burkardt_13_power(n, 1))
assert_allclose(actual, desired)
def test_burkardt_14(self):
# This is Moler's example.
# This badly scaled matrix caused problems for MATLAB's expm().
A = np.array([
[0, 1e-8, 0],
[-(2e10 + 4e8/6.), -3, 2e10],
[200./3., 0, -200./3.],
], dtype=float)
desired = np.array([
[0.446849468283175, 1.54044157383952e-09, 0.462811453558774],
[-5743067.77947947, -0.0152830038686819, -4526542.71278401],
[0.447722977849494, 1.54270484519591e-09, 0.463480648837651],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_pascal(self):
# Test pascal triangle.
# Nilpotent exponential, used to trigger a failure (gh-8029)
for scale in [1.0, 1e-3, 1e-6]:
for n in range(120):
A = np.diag(np.arange(1, n + 1), -1) * scale
B = expm(A)
sc = scale**np.arange(n, -1, -1)
if np.any(sc < 1e-300):
continue
got = B
expected = binom(np.arange(n + 1)[:,None],
np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]
err = abs(expected - got).max()
atol = 1e-13 * abs(expected).max()
assert_allclose(got, expected, atol=atol)
class TestOperators(object):
def test_product_operator(self):
random.seed(1234)
n = 5
k = 2
nsamples = 10
for i in range(nsamples):
A = np.random.randn(n, n)
B = np.random.randn(n, n)
C = np.random.randn(n, n)
D = np.random.randn(n, k)
op = ProductOperator(A, B, C)
assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))
assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))
def test_matrix_power_operator(self):
random.seed(1234)
n = 5
k = 2
p = 3
nsamples = 10
for i in range(nsamples):
A = np.random.randn(n, n)
B = np.random.randn(n, k)
op = MatrixPowerOperator(A, p)
assert_allclose(op.matmat(B), matrix_power(A, p).dot(B))
assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))
| 19,937 | 35.449726 | 91 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/tests/test_interface.py
|
"""Test functions for the sparse.linalg.interface module
"""
from __future__ import division, print_function, absolute_import
from functools import partial
from itertools import product
import operator
import pytest
from pytest import raises as assert_raises
from numpy.testing import assert_, assert_equal
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import interface
# Only test matmul operator (A @ B) when available (Python 3.5+)
TEST_MATMUL = hasattr(operator, 'matmul')
class TestLinearOperator(object):
def setup_method(self):
self.A = np.array([[1,2,3],
[4,5,6]])
self.B = np.array([[1,2],
[3,4],
[5,6]])
self.C = np.array([[1,2],
[3,4]])
def test_matvec(self):
def get_matvecs(A):
return [{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),
'rmatvec': lambda x: np.dot(A.T.conj(),
x).reshape(A.shape[1])
},
{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x),
'rmatvec': lambda x: np.dot(A.T.conj(), x),
'matmat': lambda x: np.dot(A, x)
}]
for matvecs in get_matvecs(self.A):
A = interface.LinearOperator(**matvecs)
assert_(A.args == ())
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A.matvec(np.matrix([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.matrix([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.matrix([[1],[2],[3]])), [[14],[32]])
assert_equal((2*A)*[1,1,1], [12,30])
assert_equal((2*A).rmatvec([1,1]), [10, 14, 18])
assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])
assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])
assert_equal((2*A).matmat([[1],[1],[1]]), [[12],[30]])
assert_equal((A*2)*[1,1,1], [12,30])
assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])
assert_equal((2j*A)*[1,1,1], [12j,30j])
assert_equal((A+A)*[1,1,1], [12, 30])
assert_equal((A+A).rmatvec([1,1]), [10, 14, 18])
assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])
assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])
assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])
assert_equal((-A)*[1,1,1], [-6,-15])
assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])
assert_equal((A-A)*[1,1,1], [0,0])
assert_equal((A-A)*[[1],[1],[1]], [[0],[0]])
z = A+A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)
z = 2*A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)
assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))
assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.array([1,2,3]), np.ndarray))
assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))
assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A.matvec(np.matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.matrix([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(2*A, interface._ScaledLinearOperator))
assert_(isinstance(2j*A, interface._ScaledLinearOperator))
assert_(isinstance(A+A, interface._SumLinearOperator))
assert_(isinstance(-A, interface._ScaledLinearOperator))
assert_(isinstance(A-A, interface._SumLinearOperator))
assert_((2j*A).dtype == np.complex_)
assert_raises(ValueError, A.matvec, np.array([1,2]))
assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))
assert_raises(ValueError, A.matvec, np.array([[1],[2]]))
assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))
assert_raises(ValueError, lambda: A*A)
assert_raises(ValueError, lambda: A**2)
for matvecsA, matvecsB in product(get_matvecs(self.A),
get_matvecs(self.B)):
A = interface.LinearOperator(**matvecsA)
B = interface.LinearOperator(**matvecsB)
assert_equal((A*B)*[1,1], [50,113])
assert_equal((A*B)*[[1],[1]], [[50],[113]])
assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])
assert_equal((A*B).rmatvec([1,1]), [71,92])
assert_equal((A*B).H.matvec([1,1]), [71,92])
assert_(isinstance(A*B, interface._ProductLinearOperator))
assert_raises(ValueError, lambda: A+B)
assert_raises(ValueError, lambda: A**2)
z = A*B
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)
for matvecsC in get_matvecs(self.C):
C = interface.LinearOperator(**matvecsC)
assert_equal((C**2)*[1,1], [17,37])
assert_equal((C**2).rmatvec([1,1]), [22,32])
assert_equal((C**2).H.matvec([1,1]), [22,32])
assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])
assert_(isinstance(C**2, interface._PowerLinearOperator))
def test_matmul(self):
if not TEST_MATMUL:
pytest.skip("matmul is only tested in Python 3.5+")
D = {'shape': self.A.shape,
'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),
'rmatvec': lambda x: np.dot(self.A.T.conj(),
x).reshape(self.A.shape[1]),
'matmat': lambda x: np.dot(self.A, x)}
A = interface.LinearOperator(**D)
B = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = B[0]
assert_equal(operator.matmul(A, b), A * b)
assert_equal(operator.matmul(A, B), A * B)
assert_raises(ValueError, operator.matmul, A, 2)
assert_raises(ValueError, operator.matmul, 2, A)
class TestAsLinearOperator(object):
def setup_method(self):
self.cases = []
def make_cases(dtype):
self.cases.append(np.matrix([[1,2,3],[4,5,6]], dtype=dtype))
self.cases.append(np.array([[1,2,3],[4,5,6]], dtype=dtype))
self.cases.append(sparse.csr_matrix([[1,2,3],[4,5,6]], dtype=dtype))
# Test default implementations of _adjoint and _rmatvec, which
# refer to each other.
def mv(x, dtype):
y = np.array([1 * x[0] + 2 * x[1] + 3 * x[2],
4 * x[0] + 5 * x[1] + 6 * x[2]], dtype=dtype)
if len(x.shape) == 2:
y = y.reshape(-1, 1)
return y
def rmv(x, dtype):
return np.array([1 * x[0] + 4 * x[1],
2 * x[0] + 5 * x[1],
3 * x[0] + 6 * x[1]], dtype=dtype)
class BaseMatlike(interface.LinearOperator):
def __init__(self, dtype):
self.dtype = np.dtype(dtype)
self.shape = (2,3)
def _matvec(self, x):
return mv(x, self.dtype)
class HasRmatvec(BaseMatlike):
def _rmatvec(self,x):
return rmv(x, self.dtype)
class HasAdjoint(BaseMatlike):
def _adjoint(self):
shape = self.shape[1], self.shape[0]
matvec = partial(rmv, dtype=self.dtype)
rmatvec = partial(mv, dtype=self.dtype)
return interface.LinearOperator(matvec=matvec,
rmatvec=rmatvec,
dtype=self.dtype,
shape=shape)
self.cases.append(HasRmatvec(dtype))
self.cases.append(HasAdjoint(dtype))
make_cases('int32')
make_cases('float32')
make_cases('float64')
def test_basic(self):
for M in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.rmatvec(np.array([1,2])), [9,12,15])
assert_equal(A.rmatvec(np.array([[1],[2]])), [[9],[12],[15]])
assert_equal(A.H.matvec(np.array([1,2])), [9,12,15])
assert_equal(A.H.matvec(np.array([[1],[2]])), [[9],[12],[15]])
assert_equal(
A.matmat(np.array([[1,4],[2,5],[3,6]])),
[[14,32],[32,77]])
assert_equal(A * np.array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]])
if hasattr(M,'dtype'):
assert_equal(A.dtype, M.dtype)
def test_dot(self):
for M in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(
A.dot(np.array([[1,4],[2,5],[3,6]])),
[[14,32],[32,77]])
def test_repr():
A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)
repr_A = repr(A)
assert_('unspecified dtype' not in repr_A, repr_A)
def test_identity():
ident = interface.IdentityOperator((3, 3))
assert_equal(ident * [1, 2, 3], [1, 2, 3])
assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))
assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])
def test_attributes():
A = interface.aslinearoperator(np.arange(16).reshape(4, 4))
def always_four_ones(x):
x = np.asarray(x)
assert_(x.shape == (3,) or x.shape == (3, 1))
return np.ones(4)
B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)
for op in [A, B, A * B, A.H, A + A, B + B, A ** 4]:
assert_(hasattr(op, "dtype"))
assert_(hasattr(op, "shape"))
assert_(hasattr(op, "_matvec"))
def matvec(x):
""" Needed for test_pickle as local functions are not pickleable """
return np.zeros(3)
def test_pickle():
import pickle
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
A = interface.LinearOperator((3, 3), matvec)
s = pickle.dumps(A, protocol=protocol)
B = pickle.loads(s)
for k in A.__dict__:
assert_equal(getattr(A, k), getattr(B, k))
def test_inheritance():
class Empty(interface.LinearOperator):
pass
assert_raises(TypeError, Empty)
class Identity(interface.LinearOperator):
def __init__(self, n):
super(Identity, self).__init__(dtype=None, shape=(n, n))
def _matvec(self, x):
return x
id3 = Identity(3)
assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])
assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])
class MatmatOnly(interface.LinearOperator):
def __init__(self, A):
super(MatmatOnly, self).__init__(A.dtype, A.shape)
self.A = A
def _matmat(self, x):
return self.A.dot(x)
mm = MatmatOnly(np.random.randn(5, 3))
assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))
def test_dtypes_of_operator_sum():
# gh-6078
mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)
mat_real = np.random.rand(2,2)
complex_operator = interface.aslinearoperator(mat_complex)
real_operator = interface.aslinearoperator(mat_real)
sum_complex = complex_operator + complex_operator
sum_real = real_operator + real_operator
assert_equal(sum_real.dtype, np.float64)
assert_equal(sum_complex.dtype, np.complex128)
def test_no_double_init():
call_count = [0]
def matvec(v):
call_count[0] += 1
return v
# It should call matvec exactly once (in order to determine the
# operator dtype)
A = interface.LinearOperator((2, 2), matvec=matvec)
assert_equal(call_count[0], 1)
| 13,030 | 35.707042 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/tests/test_onenormest.py
|
"""Test functions for the sparse.linalg._onenormest module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_
import pytest
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg._onenormest import _onenormest_core, _algorithm_2_2
class MatrixProductOperator(scipy.sparse.linalg.LinearOperator):
"""
This is purely for onenormest testing.
"""
def __init__(self, A, B):
if A.ndim != 2 or B.ndim != 2:
raise ValueError('expected ndarrays representing matrices')
if A.shape[1] != B.shape[0]:
raise ValueError('incompatible shapes')
self.A = A
self.B = B
self.ndim = 2
self.shape = (A.shape[0], B.shape[1])
def _matvec(self, x):
return np.dot(self.A, np.dot(self.B, x))
def _rmatvec(self, x):
return np.dot(np.dot(x, self.A), self.B)
def _matmat(self, X):
return np.dot(self.A, np.dot(self.B, X))
@property
def T(self):
return MatrixProductOperator(self.B.T, self.A.T)
class TestOnenormest(object):
@pytest.mark.xslow
def test_onenormest_table_3_t_2(self):
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 2
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
assert_(0.99 < np.mean(underestimation_ratio) < 1.0)
# check the max and mean required column resamples
assert_equal(np.max(nresample_list), 2)
assert_(0.05 < np.mean(nresample_list) < 0.2)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.9 < proportion_exact < 0.95)
# check the average number of matrix*vector multiplications
assert_(3.5 < np.mean(nmult_list) < 4.5)
@pytest.mark.xslow
def test_onenormest_table_4_t_7(self):
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 7
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A = np.random.randint(-1, 2, size=(n, n))
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
assert_(0.90 < np.mean(underestimation_ratio) < 0.99)
# check the required column resamples
assert_equal(np.max(nresample_list), 0)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.15 < proportion_exact < 0.25)
# check the average number of matrix*vector multiplications
assert_(3.5 < np.mean(nmult_list) < 4.5)
def test_onenormest_table_5_t_1(self):
# "note that there is no randomness and hence only one estimate for t=1"
t = 1
n = 100
itmax = 5
alpha = 1 - 1e-6
A = -scipy.linalg.inv(np.identity(n) + alpha*np.eye(n, k=1))
first_col = np.array([1] + [0]*(n-1))
first_row = np.array([(-alpha)**i for i in range(n)])
B = -scipy.linalg.toeplitz(first_col, first_row)
assert_allclose(A, B)
est, v, w, nmults, nresamples = _onenormest_core(B, B.T, t, itmax)
exact_value = scipy.linalg.norm(B, 1)
underest_ratio = est / exact_value
assert_allclose(underest_ratio, 0.05, rtol=1e-4)
assert_equal(nmults, 11)
assert_equal(nresamples, 0)
# check the non-underscored version of onenormest
est_plain = scipy.sparse.linalg.onenormest(B, t=t, itmax=itmax)
assert_allclose(est, est_plain)
@pytest.mark.xslow
def test_onenormest_table_6_t_1(self):
#TODO this test seems to give estimates that match the table,
#TODO even though no attempt has been made to deal with
#TODO complex numbers in the one-norm estimation.
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 1
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A_inv = np.random.rand(n, n) + 1j * np.random.rand(n, n)
A = scipy.linalg.inv(A_inv)
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
underestimation_ratio_mean = np.mean(underestimation_ratio)
assert_(0.90 < underestimation_ratio_mean < 0.99)
# check the required column resamples
max_nresamples = np.max(nresample_list)
assert_equal(max_nresamples, 0)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.7 < proportion_exact < 0.8)
# check the average number of matrix*vector multiplications
mean_nmult = np.mean(nmult_list)
assert_(4 < mean_nmult < 5)
def _help_product_norm_slow(self, A, B):
# for profiling
C = np.dot(A, B)
return scipy.linalg.norm(C, 1)
def _help_product_norm_fast(self, A, B):
# for profiling
t = 2
itmax = 5
D = MatrixProductOperator(A, B)
est, v, w, nmults, nresamples = _onenormest_core(D, D.T, t, itmax)
return est
@pytest.mark.slow
def test_onenormest_linear_operator(self):
# Define a matrix through its product A B.
# Depending on the shapes of A and B,
# it could be easy to multiply this product by a small matrix,
# but it could be annoying to look at all of
# the entries of the product explicitly.
np.random.seed(1234)
n = 6000
k = 3
A = np.random.randn(n, k)
B = np.random.randn(k, n)
fast_estimate = self._help_product_norm_fast(A, B)
exact_value = self._help_product_norm_slow(A, B)
assert_(fast_estimate <= exact_value <= 3*fast_estimate,
'fast: %g\nexact:%g' % (fast_estimate, exact_value))
def test_returns(self):
np.random.seed(1234)
A = scipy.sparse.rand(50, 50, 0.1)
s0 = scipy.linalg.norm(A.todense(), 1)
s1, v = scipy.sparse.linalg.onenormest(A, compute_v=True)
s2, w = scipy.sparse.linalg.onenormest(A, compute_w=True)
s3, v2, w2 = scipy.sparse.linalg.onenormest(A, compute_w=True, compute_v=True)
assert_allclose(s1, s0, rtol=1e-9)
assert_allclose(np.linalg.norm(A.dot(v), 1), s0*np.linalg.norm(v, 1), rtol=1e-9)
assert_allclose(A.dot(v), w, rtol=1e-9)
class TestAlgorithm_2_2(object):
def test_randn_inv(self):
np.random.seed(1234)
n = 20
nsamples = 100
for i in range(nsamples):
# Choose integer t uniformly between 1 and 3 inclusive.
t = np.random.randint(1, 4)
# Choose n uniformly between 10 and 40 inclusive.
n = np.random.randint(10, 41)
# Sample the inverse of a matrix with random normal entries.
A = scipy.linalg.inv(np.random.randn(n, n))
# Compute the 1-norm bounds.
g, ind = _algorithm_2_2(A, A.T, t)
| 9,311 | 35.517647 | 88 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/tests/test_expm_multiply.py
|
"""Test functions for the sparse.linalg._expm_multiply module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_, assert_equal
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import SparseEfficiencyWarning
import scipy.linalg
from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max,
_onenormest_matrix_power, expm_multiply, _expm_multiply_simple,
_expm_multiply_interval)
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
class TestExpmActionSimple(object):
"""
These tests do not consider the case of multiple time steps in one call.
"""
def test_theta_monotonicity(self):
pairs = sorted(_theta.items())
for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]):
assert_(theta_a < theta_b)
def test_p_max_default(self):
m_max = 55
expected_p_max = 8
observed_p_max = _compute_p_max(m_max)
assert_equal(observed_p_max, expected_p_max)
def test_p_max_range(self):
for m_max in range(1, 55+1):
p_max = _compute_p_max(m_max)
assert_(p_max*(p_max - 1) <= m_max + 1)
p_too_big = p_max + 1
assert_(p_too_big*(p_too_big - 1) > m_max + 1)
def test_onenormest_matrix_power(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
for p in range(4):
if not p:
M = np.identity(n)
else:
M = np.dot(M, A)
estimated = _onenormest_matrix_power(A, p)
exact = np.linalg.norm(M, 1)
assert_(less_than_or_close(estimated, exact))
assert_(less_than_or_close(exact, 3*estimated))
def test_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
expected = np.dot(scipy.linalg.expm(A), B)
assert_allclose(observed, expected)
def test_matrix_vector_multiply(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
observed = expm_multiply(A, v)
expected = np.dot(scipy.linalg.expm(A), v)
assert_allclose(observed, expected)
def test_scaled_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
for t in (0.2, 1.0, 1.5):
with np.errstate(invalid='ignore'):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = np.dot(scipy.linalg.expm(t*A), B)
assert_allclose(observed, expected)
def test_scaled_expm_multiply_single_timepoint(self):
np.random.seed(1234)
t = 0.1
n = 5
k = 2
A = np.random.randn(n, n)
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = scipy.linalg.expm(t*A).dot(B)
assert_allclose(observed, expected)
def test_sparse_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
expected = scipy.linalg.expm(A).dot(B)
assert_allclose(observed, expected)
def test_complex(self):
A = np.array([
[1j, 1j],
[0, 1j]], dtype=complex)
B = np.array([1j, 1j])
observed = expm_multiply(A, B)
expected = np.array([
1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)),
1j * np.exp(1j)], dtype=complex)
assert_allclose(observed, expected)
class TestExpmActionInterval(object):
def test_sparse_expm_multiply_interval(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
n = 40
k = 3
endpoint = True
for num in (14, 13, 2):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
v = np.random.randn(n)
for target in (B, v):
X = expm_multiply(A, target,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
for solution, t in zip(X, samples):
assert_allclose(solution,
scipy.linalg.expm(t*A).dot(target))
def test_expm_multiply_interval_vector(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
endpoint = True
for num in (14, 13, 2):
for n in (1, 2, 5, 20, 40):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
X = expm_multiply(A, v,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(v))
def test_expm_multiply_interval_matrix(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
endpoint = True
for num in (14, 13, 2):
for n in (1, 2, 5, 20, 40):
for k in (1, 2):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
X = expm_multiply(A, B,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
def test_sparse_expm_multiply_interval_dtypes(self):
# Test A & B int
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A complex, B int
A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A int, B complex
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = 1j*np.ones(5, dtype=complex)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
def test_expm_multiply_interval_status_0(self):
self._help_test_specific_expm_interval_status(0)
def test_expm_multiply_interval_status_1(self):
self._help_test_specific_expm_interval_status(1)
def test_expm_multiply_interval_status_2(self):
self._help_test_specific_expm_interval_status(2)
def _help_test_specific_expm_interval_status(self, target_status):
np.random.seed(1234)
start = 0.1
stop = 3.2
num = 13
endpoint = True
n = 5
k = 2
nrepeats = 10
nsuccesses = 0
for num in [14, 13, 2] * nrepeats:
A = np.random.randn(n, n)
B = np.random.randn(n, k)
status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=True)
if status == target_status:
X, status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=False)
assert_equal(X.shape, (num, n, k))
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
nsuccesses += 1
if not nsuccesses:
msg = 'failed to find a status-' + str(target_status) + ' interval'
raise Exception(msg)
| 9,658 | 36.878431 | 101 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/lsqr.py
|
"""Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
iter_lim=None, show=False, calc_var=False, x0=None):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||b - Ax||^2`` or
``min ||Ax - b||^2 + d^2 ||x||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve A*x = b
2. Linear least squares -- solve A*x = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( 0 )
in the least-squares sense
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
Representation of an m-by-n matrix. It is required that
the linear operator can produce ``Ax`` and ``A^T x``.
b : array_like, shape (m,)
Right-hand side vector ``b``.
damp : float
Damping coefficient.
atol, btol : float, optional
Stopping tolerances. If both are 1.0e-9 (say), the final
residual norm should be accurate to about 9 digits. (The
final x will usually have fewer correct digits, depending on
cond(A) and the size of damp.)
conlim : float, optional
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive.
iter_lim : int, optional
Explicit limitation on number of iterations (for safety).
show : bool, optional
Display an iteration log.
calc_var : bool, optional
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
x0 : array_like, shape (n,), optional
Initial guess of x, if None zeros are used.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if
``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'*r - damp^2*x)``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A*x0``.
2. Use LSQR to solve the system ``A*dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A*x = b and k2 iterations to solve A*dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A*x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A*dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M*x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A*M(inverse)*z =
b``, after which x can be recovered by solving M*x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lsqr
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution `[0, 0]`
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsqr(A, b)[:4]
The exact solution is x = 0
>>> istop
0
>>> x
array([ 0., 0.])
The stopping code `istop=0` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> r1norm
4.440892098500627e-16
As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance
limits. The given solution `[1., -1.]` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> r1norm
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `r1norm`
contains the norm of the minimal residual that was found.
"""
A = aslinearoperator(A)
b = np.atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = 'The matrix A has %8g rows and %8g cols' % (m, n)
str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var)
str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)
str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim)
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
"""
Set up the first vectors u and v for the bidiagonalization.
These satisfy beta*u = b - A*x, alfa*v = A'*u.
"""
u = b
bnorm = np.linalg.norm(b)
if x0 is None:
x = np.zeros(n)
beta = bnorm.copy()
else:
x = np.asarray(x0)
u = u - A.matvec(x)
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
else:
v = x.copy()
alfa = 0
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
"""
% Perform the next step of the bidiagonalization to obtain the
% next beta, u, alfa, v. These satisfy the relations
% beta*u = a*v - alfa*u,
% alfa*v = A'*u - beta*v.
"""
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
rhobar1 = sqrt(rhobar**2 + damp**2)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
# Although there is cancellation, it might be accurate enough.
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
if show:
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (anorm, acond)
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm)
str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm)
str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm)
str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm)
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
| 19,988 | 34.130053 | 84 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/setup.py
|
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import get_g77_abi_wrappers
config = Configuration('isolve',parent_package,top_path)
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
# iterative methods
methods = ['BiCGREVCOM.f.src',
'BiCGSTABREVCOM.f.src',
'CGREVCOM.f.src',
'CGSREVCOM.f.src',
# 'ChebyREVCOM.f.src',
'GMRESREVCOM.f.src',
# 'JacobiREVCOM.f.src',
'QMRREVCOM.f.src',
# 'SORREVCOM.f.src'
]
Util = ['getbreak.f.src']
sources = Util + methods + ['_iterative.pyf.src']
sources = [join('iterative', x) for x in sources]
sources += get_g77_abi_wrappers(lapack_opt)
config.add_extension('_iterative',
sources=sources,
extra_info=lapack_opt)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,368 | 27.520833 | 67 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/iterative.py
|
"""Iterative methods for solving linear systems"""
from __future__ import division, print_function, absolute_import
__all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr']
import warnings
import numpy as np
from . import _iterative
from scipy.sparse.linalg.interface import LinearOperator
from scipy._lib.decorator import decorator
from .utils import make_system
from scipy._lib._util import _aligned_zeros
from scipy._lib._threadsafety import non_reentrant
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'}
# Part of the docstring common to all iterative solvers
common_doc1 = \
"""
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}"""
common_doc2 = \
"""b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
"""
def _stoptest(residual, atol):
"""
Successful termination condition for the solvers.
"""
resid = np.linalg.norm(residual)
if resid <= atol:
return resid, 1
else:
return resid, 0
def _get_atol(tol, atol, bnrm2, get_residual, routine_name):
"""
Parse arguments for absolute tolerance in termination condition.
Parameters
----------
tol, atol : object
The arguments passed into the solver routine by user.
bnrm2 : float
2-norm of the rhs vector.
get_residual : callable
Callable ``get_residual()`` that returns the initial value of
the residual.
routine_name : str
Name of the routine.
"""
if atol is None:
warnings.warn("scipy.sparse.linalg.{name} called without specifying `atol`. "
"The default value will be changed in a future release. "
"For compatibility, specify a value for `atol` explicitly, e.g., "
"``{name}(..., atol=0)``, or to retain the old behavior "
"``{name}(..., atol='legacy')``".format(name=routine_name),
category=DeprecationWarning, stacklevel=4)
atol = 'legacy'
tol = float(tol)
if atol == 'legacy':
# emulate old legacy behavior
resid = get_residual()
if resid <= tol:
return 'exit'
if bnrm2 == 0:
return tol
else:
return tol * float(bnrm2)
else:
return max(float(atol), tol * float(bnrm2))
def set_docstring(header, Ainfo, footer='', atol_default='0'):
def combine(fn):
fn.__doc__ = '\n'.join((header, common_doc1,
' ' + Ainfo.replace('\n', '\n '),
common_doc2, footer))
return fn
return combine
@set_docstring('Use BIConjugate Gradient iteration to solve ``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'It is required that the linear operator can produce\n'
'``Ax`` and ``A^T x``.')
@non_reentrant()
def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A,M,x,b,postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec, rmatvec = A.matvec, A.rmatvec
psolve, rpsolve = M.matvec, M.rmatvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'bicgrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicg')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(6*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = psolve(work[slice2])
elif (ijob == 4):
work[slice1] = rpsolve(work[slice2])
elif (ijob == 5):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 6):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use BIConjugate Gradient STABilized iteration to solve '
'``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.')
@non_reentrant()
def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'bicgstabrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicgstab')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(7*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use Conjugate Gradient iteration to solve ``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'``A`` must represent a hermitian, positive definite matrix.')
@non_reentrant()
def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'cgrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cg')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(4*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
if info == 1 and iter_ > 1:
# recompute residual and recheck, to avoid
# accumulating rounding error
work[slice1] = b - matvec(x)
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use Conjugate Gradient Squared iteration to solve ``Ax = b``.',
'The real-valued N-by-N matrix of the linear system.')
@non_reentrant()
def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'cgsrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cgs')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(7*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
if info == 1 and iter_ > 1:
# recompute residual and recheck, to avoid
# accumulating rounding error
work[slice1] = b - matvec(x)
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info == -10:
# termination due to breakdown: check for convergence
resid, ok = _stoptest(b - matvec(x), atol)
if ok:
info = 0
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@non_reentrant()
def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None,
restrt=None, atol=None):
"""
Use Generalized Minimal RESidual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
* <0 : illegal input or breakdown
Other parameters
----------------
x0 : {array, matrix}
Starting guess for the solution (a vector of zeros by default).
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
restart : int, optional
Number of iterations between restarts. Larger values increase
iteration cost, but may be necessary for convergence.
Default is 20.
maxiter : int, optional
Maximum number of iterations (restart cycles). Iteration will stop
after maxiter steps even if the specified tolerance has not been
achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Inverse of the preconditioner of A. M should approximate the
inverse of A and be easy to solve for (see Notes). Effective
preconditioning dramatically improves the rate of convergence,
which implies that fewer iterations are needed to reach a given
error tolerance. By default, no preconditioner is used.
callback : function
User-supplied function to call after each iteration. It is called
as callback(rk), where rk is the current residual vector.
restrt : int, optional
DEPRECATED - use `restart` instead.
See Also
--------
LinearOperator
Notes
-----
A preconditioner, P, is chosen such that P is close to A but easy to solve
for. The preconditioner parameter required by this routine is
``M = P^-1``. The inverse should preferably not be calculated
explicitly. Rather, use the following template to produce M::
# Construct a linear operator that computes P^-1 * x.
import scipy.sparse.linalg as spla
M_x = lambda x: spla.spsolve(P, x)
M = spla.LinearOperator((n, n), M_x)
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import gmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = gmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
# Change 'restrt' keyword to 'restart'
if restrt is None:
restrt = restart
elif restart is not None:
raise ValueError("Cannot specify both restart and restrt keywords. "
"Preferably use 'restart' only.")
A, M, x, b,postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
if restrt is None:
restrt = 20
restrt = min(restrt, n)
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'gmresrevcom')
bnrm2 = np.linalg.norm(b)
Mb_nrm2 = np.linalg.norm(psolve(b))
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, bnrm2, get_residual, 'gmres')
if atol == 'exit':
return postprocess(x), 0
if bnrm2 == 0:
return postprocess(b), 0
# Tolerance passed to GMRESREVCOM applies to the inner iteration
# and deals with the left-preconditioned residual.
ptol_max_factor = 1.0
ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2)
resid = np.nan
presid = np.nan
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros((6+restrt)*n,dtype=x.dtype)
work2 = _aligned_zeros((restrt+1)*(2*restrt+2),dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
old_ijob = ijob
first_pass = True
resid_ready = False
iter_num = 1
while True:
x, iter_, presid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, restrt, work, work2, iter_, presid, info, ndx1, ndx2, ijob, ptol)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1): # gmres success, update last residual
if resid_ready and callback is not None:
callback(presid / bnrm2)
resid_ready = False
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
if not first_pass and old_ijob == 3:
resid_ready = True
first_pass = False
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
if resid_ready and callback is not None:
callback(presid / bnrm2)
resid_ready = False
iter_num = iter_num+1
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
# Inner loop tolerance control
if info or presid > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
# Inner loop tolerance OK, but outer loop not.
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
if resid != 0:
ptol = presid * min(ptol_max_factor, atol / resid)
else:
ptol = presid * ptol_max_factor
old_ijob = ijob
ijob = 2
if iter_num > maxiter:
info = maxiter
break
if info >= 0 and not (resid <= atol):
# info isn't set appropriately otherwise
info = maxiter
return postprocess(x), info
@non_reentrant()
def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None,
atol=None):
"""Use Quasi-Minimal Residual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real-valued N-by-N matrix of the linear system.
It is required that the linear operator can produce
``Ax`` and ``A^T x``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M1 : {sparse matrix, dense matrix, LinearOperator}
Left preconditioner for A.
M2 : {sparse matrix, dense matrix, LinearOperator}
Right preconditioner for A. Used together with the left
preconditioner M1. The matrix M1*A*M2 should have better
conditioned than A alone.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
See Also
--------
LinearOperator
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import qmr
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = qmr(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A_ = A
A, M, x, b, postprocess = make_system(A, None, x0, b)
if M1 is None and M2 is None:
if hasattr(A_,'psolve'):
def left_psolve(b):
return A_.psolve(b,'left')
def right_psolve(b):
return A_.psolve(b,'right')
def left_rpsolve(b):
return A_.rpsolve(b,'left')
def right_rpsolve(b):
return A_.rpsolve(b,'right')
M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
else:
def id(b):
return b
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
n = len(b)
if maxiter is None:
maxiter = n*10
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'qmrrevcom')
get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(11*n,x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*A.rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = M1.matvec(work[slice2])
elif (ijob == 4):
work[slice1] = M2.matvec(work[slice2])
elif (ijob == 5):
work[slice1] = M1.rmatvec(work[slice2])
elif (ijob == 6):
work[slice1] = M2.rmatvec(work[slice2])
elif (ijob == 7):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(x)
elif (ijob == 8):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
| 24,841 | 31.903311 | 89 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/utils.py
|
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = []
from warnings import warn
from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \
IdentityOperator
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
('D','D'):'D'}
def coerce(x,y):
if x not in 'fdFD':
x = 'd'
if y not in 'fdFD':
y = 'd'
return _coerce_rules[x,y]
def id(x):
return x
def make_system(A, M, x0, b):
"""Make a linear system Ax=b
Parameters
----------
A : LinearOperator
sparse or dense matrix (or any valid input to aslinearoperator)
M : {LinearOperator, Nones}
preconditioner
sparse or dense matrix (or any valid input to aslinearoperator)
x0 : {array_like, None}
initial guess to iterative method
b : array_like
right hand side
Returns
-------
(A, M, x, b, postprocess)
A : LinearOperator
matrix of the linear system
M : LinearOperator
preconditioner
x : rank 1 ndarray
initial guess
b : rank 1 ndarray
right hand side
postprocess : function
converts the solution vector to the appropriate
type and dimensions (e.g. (N,1) matrix)
"""
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix, but got shape=%s' % (A.shape,))
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError('A and b have incompatible dimensions')
if b.dtype.char not in 'fdFD':
b = b.astype('d') # upcast non-FP types to double
def postprocess(x):
if isinstance(b,matrix):
x = asmatrix(x)
return x.reshape(b.shape)
if hasattr(A,'dtype'):
xtype = A.dtype.char
else:
xtype = A.matvec(b).dtype.char
xtype = coerce(xtype, b.dtype.char)
b = asarray(b,dtype=xtype) # make b the same type as x
b = b.ravel()
if x0 is None:
x = zeros(N, dtype=xtype)
else:
x = array(x0, dtype=xtype)
if not (x.shape == (N,1) or x.shape == (N,)):
raise ValueError('A and x have incompatible dimensions')
x = x.ravel()
# process preconditioner
if M is None:
if hasattr(A_,'psolve'):
psolve = A_.psolve
else:
psolve = id
if hasattr(A_,'rpsolve'):
rpsolve = A_.rpsolve
else:
rpsolve = id
if psolve is id and rpsolve is id:
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
else:
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
dtype=A.dtype)
else:
M = aslinearoperator(M)
if A.shape != M.shape:
raise ValueError('matrix and preconditioner have different shapes')
return A, M, x, b, postprocess
| 3,363 | 26.129032 | 81 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/minres.py
|
from __future__ import division, print_function, absolute_import
from numpy import sqrt, inner, finfo, zeros
from numpy.linalg import norm
from .utils import make_system
__all__ = ['minres']
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
M=None, callback=None, show=False, check=False):
"""
Use MINimum RESidual iteration to solve Ax=b
MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike
the Conjugate Gradient method, A can be indefinite or singular.
If shift != 0 then the method solves (A - shift*I)x = b
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real symmetric N-by-N matrix of the linear system
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol : float
Tolerance to achieve. The algorithm terminates when the relative
residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
References
----------
Solution of sparse indefinite systems of linear equations,
C. C. Paige and M. A. Saunders (1975),
SIAM J. Numer. Anal. 12(4), pp. 617-629.
https://web.stanford.edu/group/SOL/software/minres/
This file is a translation of the following MATLAB implementation:
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
first = 'Enter minres. '
last = 'Exit minres. '
n = A.shape[0]
if maxiter is None:
maxiter = 5 * n
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
' beta1 = 0. The exact solution is x = 0 ', # 0
' A solution to Ax = b was found, given rtol ', # 1
' A least-squares solution was found, given rtol ', # 2
' Reasonable accuracy achieved, given eps ', # 3
' x has converged to an eigenvector ', # 4
' acond has exceeded 0.1/eps ', # 5
' The iteration limit was reached ', # 6
' A does not define a symmetric matrix ', # 7
' M does not define a symmetric matrix ', # 8
' M does not define a pos-def preconditioner '] # 9
if show:
print(first + 'Solution of symmetric Ax = b')
print(first + 'n = %3g shift = %23.14e' % (n,shift))
print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol))
print()
istop = 0
itn = 0
Anorm = 0
Acond = 0
rnorm = 0
ynorm = 0
xtype = x.dtype
eps = finfo(xtype).eps
x = zeros(n, dtype=xtype)
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
y = b
r1 = b
y = psolve(b)
beta1 = inner(b,y)
if beta1 < 0:
raise ValueError('indefinite preconditioner')
elif beta1 == 0:
return (postprocess(x), 0)
beta1 = sqrt(beta1)
if check:
# are these too strict?
# see if A is symmetric
w = matvec(y)
r2 = matvec(w)
s = inner(w,w)
t = inner(y,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric matrix')
# see if M is symmetric
r2 = psolve(y)
s = inner(y,y)
t = inner(r1,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric preconditioner')
# Initialize other quantities
oldb = 0
beta = beta1
dbar = 0
epsln = 0
qrnorm = beta1
phibar = beta1
rhs1 = beta1
rhs2 = 0
tnorm2 = 0
ynorm2 = 0
cs = -1
sn = 0
w = zeros(n, dtype=xtype)
w2 = zeros(n, dtype=xtype)
r2 = r1
if show:
print()
print()
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
while itn < maxiter:
itn += 1
s = 1.0/beta
v = s*y
y = matvec(v)
y = y - shift * v
if itn >= 2:
y = y - (beta/oldb)*r1
alfa = inner(v,y)
y = y - (alfa/beta)*r2
r1 = r2
r2 = y
y = psolve(r2)
oldb = beta
beta = inner(r2,y)
if beta < 0:
raise ValueError('non-symmetric matrix')
beta = sqrt(beta)
tnorm2 += alfa**2 + oldb**2 + beta**2
if itn == 1:
if beta/beta1 <= 10*eps:
istop = -1 # Terminate later
# tnorm2 = alfa**2 ??
gmax = abs(alfa)
gmin = gmax
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
root = norm([gbar, dbar])
Arnorm = phibar * root
# Compute the next plane rotation Qk
gamma = norm([gbar, beta]) # gammak
gamma = max(gamma, eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1.0/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x = x + phi*w
# Go round again.
gmax = max(gmax, gamma)
gmin = min(gmin, gamma)
z = rhs1 / gamma
ynorm2 = z**2 + ynorm2
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = sqrt(tnorm2)
ynorm = sqrt(ynorm2)
epsa = Anorm * eps
epsx = Anorm * ynorm * eps
epsr = Anorm * ynorm * tol
diag = gbar
if diag == 0:
diag = epsa
qrnorm = phibar
rnorm = qrnorm
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
# Estimate cond(A).
# In this version we look at the diagonals of R in the
# factorization of the lower Hessenberg matrix, Q * H = R,
# where H is the tridiagonal matrix from Lanczos with one
# extra row, beta(k+1) e_k^T.
Acond = gmax/gmin
# See if any of the stopping criteria are satisfied.
# In rare cases, istop is already -1 from above (Abar = const*I).
if istop == 0:
t1 = 1 + test1 # These tests work if tol < eps
t2 = 1 + test2
if t2 <= 1:
istop = 2
if t1 <= 1:
istop = 1
if itn >= maxiter:
istop = 6
if Acond >= 0.1/eps:
istop = 4
if epsx >= beta:
istop = 3
# if rnorm <= epsx : istop = 2
# if rnorm <= epsr : istop = 1
if test2 <= tol:
istop = 2
if test1 <= tol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= maxiter-10:
prnt = True
if itn % 10 == 0:
prnt = True
if qrnorm <= 10*epsx:
prnt = True
if qrnorm <= 10*epsr:
prnt = True
if Acond <= 1e-2/eps:
prnt = True
if istop != 0:
prnt = True
if show and prnt:
str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1)
str2 = ' %10.3e' % (test2,)
str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
print(str1 + str2 + str3)
if itn % 10 == 0:
print()
if callback is not None:
callback(x)
if istop != 0:
break # TODO check this
if show:
print()
print(last + ' istop = %3g itn =%5g' % (istop,itn))
print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond))
print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm))
print(last + ' Arnorm = %12.4e' % (Arnorm,))
print(last + msg[istop+1])
if istop == 6:
info = maxiter
else:
info = 0
return (postprocess(x),info)
if __name__ == '__main__':
from scipy import ones, arange
from scipy.linalg import norm
from scipy.sparse import spdiags
n = 10
residuals = []
def cb(x):
residuals.append(norm(b - A*x))
# A = poisson((10,),format='csr')
A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
A.psolve = M.matvec
b = 0*ones(A.shape[0])
x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
# x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
| 10,413 | 27.688705 | 85 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/lgmres.py
|
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from scipy._lib.six import xrange
from scipy.linalg import get_blas_funcs, get_lapack_funcs
from .utils import make_system
from ._gcrotmk import _fgmres
__all__ = ['lgmres']
def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True,
prepend_outer_v=False, atol=None):
"""
Solve a matrix equation using the LGMRES algorithm.
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
in the convergence in restarted GMRES, and often converges in fewer
iterations.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is `tol`.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
inner_m : int, optional
Number of inner GMRES iterations per each outer iteration.
outer_k : int, optional
Number of vectors to carry between inner GMRES iterations.
According to [1]_, good values are in the range of 1...3.
However, note that if you want to use the additional vectors to
accelerate solving multiple similar problems, larger values may
be beneficial.
outer_v : list of tuples, optional
List containing tuples ``(v, Av)`` of vectors and corresponding
matrix-vector products, used to augment the Krylov subspace, and
carried between inner GMRES iterations. The element ``Av`` can
be `None` if the matrix-vector product should be re-evaluated.
This parameter is modified in-place by `lgmres`, and can be used
to pass "guess" vectors in and out of the algorithm when solving
similar problems.
store_outer_Av : bool, optional
Whether LGMRES should store also A*v in addition to vectors `v`
in the `outer_v` list. Default is True.
prepend_outer_v : bool, optional
Whether to put outer_v augmentation vectors before Krylov iterates.
In standard LGMRES, prepend_outer_v=False.
Returns
-------
x : array or matrix
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
slowing of convergence in restarted GMRES, due to alternating
residual vectors. Typically, it often outperforms GMRES(m) of
comparable memory requirements by some measure, or at least is not
much worse.
Another advantage in this algorithm is that you can supply it with
'guess' vectors in the `outer_v` argument that augment the Krylov
subspace. If the solution lies close to the span of these vectors,
the algorithm converges faster. This can be useful if several very
similar matrices need to be inverted one after another, such as in
Newton-Krylov iteration where the Jacobian matrix often changes
little in the nonlinear steps.
References
----------
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for
Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix
Anal. Appl. 26, 962 (2005).
.. [2] A.H. Baker, "On Improving the Performance of the Linear Solver
restarted GMRES", PhD thesis, University of Colorado (2003).
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lgmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = lgmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if atol is None:
warnings.warn("scipy.sparse.linalg.lgmres called without specifying `atol`. "
"The default value will change in the future. To preserve "
"current behavior, set ``atol=tol``.",
category=DeprecationWarning, stacklevel=2)
atol = tol
matvec = A.matvec
psolve = M.matvec
if outer_v is None:
outer_v = []
axpy, dot, scal = None, None, None
nrm2 = get_blas_funcs('nrm2', [b])
b_norm = nrm2(b)
ptol_max_factor = 1.0
for k_outer in xrange(maxiter):
r_outer = matvec(x) - b
# -- callback
if callback is not None:
callback(x)
# -- determine input type routines
if axpy is None:
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
x = x.astype(r_outer.dtype)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
(x, r_outer))
# -- check stopping condition
r_norm = nrm2(r_outer)
if r_norm <= max(atol, tol * b_norm):
break
# -- inner LGMRES iteration
v0 = -psolve(r_outer)
inner_res_0 = nrm2(v0)
if inner_res_0 == 0:
rnorm = nrm2(r_outer)
raise RuntimeError("Preconditioner returned a zero vector; "
"|v| ~ %.1g, |M v| = 0" % rnorm)
v0 = scal(1.0/inner_res_0, v0)
ptol = min(ptol_max_factor, max(atol, tol*b_norm)/r_norm)
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
v0,
inner_m,
lpsolve=psolve,
atol=ptol,
outer_v=outer_v,
prepend_outer_v=prepend_outer_v)
y *= inner_res_0
if not np.isfinite(y).all():
# Overflow etc. in computation. There's no way to
# recover from this, so we have to bail out.
raise LinAlgError()
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
return postprocess(x), k_outer + 1
# Inner loop tolerance control
if pres > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
# -- GMRES terminated: eval solution
dx = zs[0]*y[0]
for w, yc in zip(zs[1:], y[1:]):
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
# -- Store LGMRES augmentation vectors
nx = nrm2(dx)
if nx > 0:
if store_outer_Av:
q = Q.dot(R.dot(y))
ax = vs[0]*q[0]
for v, qc in zip(vs[1:], q[1:]):
ax = axpy(v, ax, ax.shape[0], qc)
outer_v.append((dx/nx, ax/nx))
else:
outer_v.append((dx/nx, None))
# -- Retain only a finite number of augmentation vectors
while len(outer_v) > outer_k:
del outer_v[0]
# -- Apply step
x += dx
else:
# didn't converge ...
return postprocess(x), maxiter
return postprocess(x), 0
| 8,842 | 36.95279 | 85 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.