Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/__init__.py
"Iterative Solvers for Sparse Linear Systems" from __future__ import division, print_function, absolute_import #from info import __doc__ from .iterative import * from .minres import minres from .lgmres import lgmres from .lsqr import lsqr from .lsmr import lsmr from ._gcrotmk import gcrotmk __all__ = [s for s in dir() if not s.startswith('_')] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
444
23.722222
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/lsmr.py
""" Copyright (C) 2010 David Fong and Michael Saunders LSMR uses an iterative method. 07 Jun 2010: Documentation updated 03 Jun 2010: First release version in Python David Chin-lung Fong clfong@stanford.edu Institute for Computational and Mathematical Engineering Stanford University Michael Saunders saunders@stanford.edu Systems Optimization Laboratory Dept of MS&E, Stanford University. """ from __future__ import division, print_function, absolute_import __all__ = ['lsmr'] from numpy import zeros, infty, atleast_1d from numpy.linalg import norm from math import sqrt from scipy.sparse.linalg.interface import aslinearoperator from .lsqr import _sym_ortho def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, maxiter=None, show=False, x0=None): """Iterative solver for least-squares problems. lsmr solves the system of linear equations ``Ax = b``. If the system is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``. A is a rectangular matrix of dimension m-by-n, where all cases are allowed: m = n, m > n, or m < n. B is a vector of length m. The matrix A may be dense or sparse (usually sparse). Parameters ---------- A : {matrix, sparse matrix, ndarray, LinearOperator} Matrix A in the linear system. b : array_like, shape (m,) Vector b in the linear system. damp : float Damping factor for regularized least-squares. `lsmr` solves the regularized least-squares problem:: min ||(b) - ( A )x|| ||(0) (damp*I) ||_2 where damp is a scalar. If damp is None or 0, the system is solved without regularization. atol, btol : float, optional Stopping tolerances. `lsmr` continues iterations until a certain backward error estimate is smaller than some quantity depending on atol and btol. Let ``r = b - Ax`` be the residual vector for the current approximate solution ``x``. If ``Ax = b`` seems to be consistent, ``lsmr`` terminates when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``. Otherwise, lsmr terminates when ``norm(A^{T} r) <= atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say), the final ``norm(r)`` should be accurate to about 6 digits. (The final x will usually have fewer correct digits, depending on ``cond(A)`` and the size of LAMBDA.) If `atol` or `btol` is None, a default value of 1.0e-6 will be used. Ideally, they should be estimates of the relative error in the entries of A and B respectively. For example, if the entries of `A` have 7 correct digits, set atol = 1e-7. This prevents the algorithm from doing unnecessary work beyond the uncertainty of the input data. conlim : float, optional `lsmr` terminates if an estimate of ``cond(A)`` exceeds `conlim`. For compatible systems ``Ax = b``, conlim could be as large as 1.0e+12 (say). For least-squares problems, `conlim` should be less than 1.0e+8. If `conlim` is None, the default value is 1e+8. Maximum precision can be obtained by setting ``atol = btol = conlim = 0``, but the number of iterations may then be excessive. maxiter : int, optional `lsmr` terminates if the number of iterations reaches `maxiter`. The default is ``maxiter = min(m, n)``. For ill-conditioned systems, a larger value of `maxiter` may be needed. show : bool, optional Print iterations logs if ``show=True``. x0 : array_like, shape (n,), optional Initial guess of x, if None zeros are used. .. versionadded:: 1.0.0 Returns ------- x : ndarray of float Least-square solution returned. istop : int istop gives the reason for stopping:: istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a solution. = 1 means x is an approximate solution to A*x = B, according to atol and btol. = 2 means x approximately solves the least-squares problem according to atol. = 3 means COND(A) seems to be greater than CONLIM. = 4 is the same as 1 with atol = btol = eps (machine precision) = 5 is the same as 2 with atol = eps. = 6 is the same as 3 with CONLIM = 1/eps. = 7 means ITN reached maxiter before the other stopping conditions were satisfied. itn : int Number of iterations used. normr : float ``norm(b-Ax)`` normar : float ``norm(A^T (b - Ax))`` norma : float ``norm(A)`` conda : float Condition number of A. normx : float ``norm(x)`` Notes ----- .. versionadded:: 0.11.0 References ---------- .. [1] D. C.-L. Fong and M. A. Saunders, "LSMR: An iterative algorithm for sparse least-squares problems", SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011. http://arxiv.org/abs/1006.0758 .. [2] LSMR Software, http://web.stanford.edu/group/SOL/software/lsmr/ Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import lsmr >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float) The first example has the trivial solution `[0, 0]` >>> b = np.array([0., 0., 0.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 0 >>> x array([ 0., 0.]) The stopping code `istop=0` returned indicates that a vector of zeros was found as a solution. The returned solution `x` indeed contains `[0., 0.]`. The next example has a non-trivial solution: >>> b = np.array([1., 0., -1.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 1 >>> x array([ 1., -1.]) >>> itn 1 >>> normr 4.440892098500627e-16 As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance limits. The given solution `[1., -1.]` obviously solves the equation. The remaining return values include information about the number of iterations (`itn=1`) and the remaining difference of left and right side of the solved equation. The final example demonstrates the behavior in the case where there is no solution for the equation: >>> b = np.array([1., 0.01, -1.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 2 >>> x array([ 1.00333333, -0.99666667]) >>> A.dot(x)-b array([ 0.00333333, -0.00333333, 0.00333333]) >>> normr 0.005773502691896255 `istop` indicates that the system is inconsistent and thus `x` is rather an approximate solution to the corresponding least-squares problem. `normr` contains the minimal distance that was found. """ A = aslinearoperator(A) b = atleast_1d(b) if b.ndim > 1: b = b.squeeze() msg = ('The exact solution is x = 0, or x = x0, if x0 was given ', 'Ax - b is small enough, given atol, btol ', 'The least-squares solution is good enough, given atol ', 'The estimate of cond(Abar) has exceeded conlim ', 'Ax - b is small enough for this machine ', 'The least-squares solution is good enough for this machine', 'Cond(Abar) seems to be too large for this machine ', 'The iteration limit has been reached ') hdg1 = ' itn x(1) norm r norm A''r' hdg2 = ' compatible LS norm A cond A' pfreq = 20 # print frequency (for repeating the heading) pcount = 0 # print counter m, n = A.shape # stores the num of singular values minDim = min([m, n]) if maxiter is None: maxiter = minDim if show: print(' ') print('LSMR Least-squares solution of Ax = b\n') print('The matrix A has %8g rows and %8g cols' % (m, n)) print('damp = %20.14e\n' % (damp)) print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim)) print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter)) u = b normb = norm(b) if x0 is None: x = zeros(n) beta = normb.copy() else: x = atleast_1d(x0) u = u - A.matvec(x) beta = norm(u) if beta > 0: u = (1 / beta) * u v = A.rmatvec(u) alpha = norm(v) else: v = zeros(n) alpha = 0 if alpha > 0: v = (1 / alpha) * v # Initialize variables for 1st iteration. itn = 0 zetabar = alpha * beta alphabar = alpha rho = 1 rhobar = 1 cbar = 1 sbar = 0 h = v.copy() hbar = zeros(n) # Initialize variables for estimation of ||r||. betadd = beta betad = 0 rhodold = 1 tautildeold = 0 thetatilde = 0 zeta = 0 d = 0 # Initialize variables for estimation of ||A|| and cond(A) normA2 = alpha * alpha maxrbar = 0 minrbar = 1e+100 normA = sqrt(normA2) condA = 1 normx = 0 # Items for use in stopping rules, normb set earlier istop = 0 ctol = 0 if conlim > 0: ctol = 1 / conlim normr = beta # Reverse the order here from the original matlab code because # there was an error on return when arnorm==0 normar = alpha * beta if normar == 0: if show: print(msg[0]) return x, istop, itn, normr, normar, normA, condA, normx if show: print(' ') print(hdg1, hdg2) test1 = 1 test2 = alpha / beta str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (normr, normar) str3 = ' %8.1e %8.1e' % (test1, test2) print(''.join([str1, str2, str3])) # Main iteration loop. while itn < maxiter: itn = itn + 1 # Perform the next step of the bidiagonalization to obtain the # next beta, u, alpha, v. These satisfy the relations # beta*u = a*v - alpha*u, # alpha*v = A'*u - beta*v. u = A.matvec(v) - alpha * u beta = norm(u) if beta > 0: u = (1 / beta) * u v = A.rmatvec(u) - beta * v alpha = norm(v) if alpha > 0: v = (1 / alpha) * v # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}. # Construct rotation Qhat_{k,2k+1}. chat, shat, alphahat = _sym_ortho(alphabar, damp) # Use a plane rotation (Q_i) to turn B_i to R_i rhoold = rho c, s, rho = _sym_ortho(alphahat, beta) thetanew = s*alpha alphabar = c*alpha # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar rhobarold = rhobar zetaold = zeta thetabar = sbar * rho rhotemp = cbar * rho cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew) zeta = cbar * zetabar zetabar = - sbar * zetabar # Update h, h_hat, x. hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar x = x + (zeta / (rho * rhobar)) * hbar h = v - (thetanew / rho) * h # Estimate of ||r||. # Apply rotation Qhat_{k,2k+1}. betaacute = chat * betadd betacheck = -shat * betadd # Apply rotation Q_{k,k+1}. betahat = c * betaacute betadd = -s * betaacute # Apply rotation Qtilde_{k-1}. # betad = betad_{k-1} here. thetatildeold = thetatilde ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar) thetatilde = stildeold * rhobar rhodold = ctildeold * rhobar betad = - stildeold * betad + ctildeold * betahat # betad = betad_k here. # rhodold = rhod_k here. tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold taud = (zeta - thetatilde * tautildeold) / rhodold d = d + betacheck * betacheck normr = sqrt(d + (betad - taud)**2 + betadd * betadd) # Estimate ||A||. normA2 = normA2 + beta * beta normA = sqrt(normA2) normA2 = normA2 + alpha * alpha # Estimate cond(A). maxrbar = max(maxrbar, rhobarold) if itn > 1: minrbar = min(minrbar, rhobarold) condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp) # Test for convergence. # Compute norms for convergence testing. normar = abs(zetabar) normx = norm(x) # Now use these norms to estimate certain other quantities, # some of which will be small near a solution. test1 = normr / normb if (normA * normr) != 0: test2 = normar / (normA * normr) else: test2 = infty test3 = 1 / condA t1 = test1 / (1 + normA * normx / normb) rtol = btol + atol * normA * normx / normb # The following tests guard against extremely small values of # atol, btol or ctol. (The user may have set any or all of # the parameters atol, btol, conlim to 0.) # The effect is equivalent to the normAl tests using # atol = eps, btol = eps, conlim = 1/eps. if itn >= maxiter: istop = 7 if 1 + test3 <= 1: istop = 6 if 1 + test2 <= 1: istop = 5 if 1 + t1 <= 1: istop = 4 # Allow for tolerances set by the user. if test3 <= ctol: istop = 3 if test2 <= atol: istop = 2 if test1 <= rtol: istop = 1 # See if it is time to print something. if show: if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \ (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \ (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \ (istop != 0): if pcount >= pfreq: pcount = 0 print(' ') print(hdg1, hdg2) pcount = pcount + 1 str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (normr, normar) str3 = ' %8.1e %8.1e' % (test1, test2) str4 = ' %8.1e %8.1e' % (normA, condA) print(''.join([str1, str2, str3, str4])) if istop > 0: break # Print the stopping condition. if show: print(' ') print('LSMR finished') print(msg[istop]) print('istop =%8g normr =%8.1e' % (istop, normr)) print(' normA =%8.1e normAr =%8.1e' % (normA, normar)) print('itn =%8g condA =%8.1e' % (itn, condA)) print(' normx =%8.1e' % (normx)) print(str1, str2) print(str3, str4) return x, istop, itn, normr, normar, normA, condA, normx
15,126
31.116773
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.py
# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi> # Distributed under the same license as Scipy. from __future__ import division, print_function, absolute_import import warnings import numpy as np from numpy.linalg import LinAlgError from scipy._lib.six import xrange from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq) from scipy.sparse.linalg.isolve.utils import make_system __all__ = ['gcrotmk'] def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(), prepend_outer_v=False): """ FGMRES Arnoldi process, with optional projection or augmentation Parameters ---------- matvec : callable Operation A*x v0 : ndarray Initial vector, normalized to nrm2(v0) == 1 m : int Number of GMRES rounds atol : float Absolute tolerance for early exit lpsolve : callable Left preconditioner L rpsolve : callable Right preconditioner R CU : list of (ndarray, ndarray) Columns of matrices C and U in GCROT outer_v : list of ndarrays Augmentation vectors in LGMRES prepend_outer_v : bool, optional Whether augmentation vectors come before or after Krylov iterates Raises ------ LinAlgError If nans encountered Returns ------- Q, R : ndarray QR decomposition of the upper Hessenberg H=QR B : ndarray Projections corresponding to matrix C vs : list of ndarray Columns of matrix V zs : list of ndarray Columns of matrix Z y : ndarray Solution to ||H y - e_1||_2 = min! res : float The final (preconditioned) residual norm """ if lpsolve is None: lpsolve = lambda x: x if rpsolve is None: rpsolve = lambda x: x axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,)) vs = [v0] zs = [] y = None res = np.nan m = m + len(outer_v) # Orthogonal projection coefficients B = np.zeros((len(cs), m), dtype=v0.dtype) # H is stored in QR factorized form Q = np.ones((1, 1), dtype=v0.dtype) R = np.zeros((1, 0), dtype=v0.dtype) eps = np.finfo(v0.dtype).eps breakdown = False # FGMRES Arnoldi process for j in xrange(m): # L A Z = C B + V H if prepend_outer_v and j < len(outer_v): z, w = outer_v[j] elif prepend_outer_v and j == len(outer_v): z = rpsolve(v0) w = None elif not prepend_outer_v and j >= m - len(outer_v): z, w = outer_v[j - (m - len(outer_v))] else: z = rpsolve(vs[-1]) w = None if w is None: w = lpsolve(matvec(z)) else: # w is clobbered below w = w.copy() w_norm = nrm2(w) # GCROT projection: L A -> (1 - C C^H) L A # i.e. orthogonalize against C for i, c in enumerate(cs): alpha = dot(c, w) B[i,j] = alpha w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c # Orthogonalize against V hcur = np.zeros(j+2, dtype=Q.dtype) for i, v in enumerate(vs): alpha = dot(v, w) hcur[i] = alpha w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v hcur[i+1] = nrm2(w) with np.errstate(over='ignore', divide='ignore'): # Careful with denormals alpha = 1/hcur[-1] if np.isfinite(alpha): w = scal(alpha, w) if not (hcur[-1] > eps * w_norm): # w essentially in the span of previous vectors, # or we have nans. Bail out after updating the QR # solution. breakdown = True vs.append(w) zs.append(z) # Arnoldi LSQ problem # Add new column to H=Q*R, padding other columns with zeros Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F') Q2[:j+1,:j+1] = Q Q2[j+1,j+1] = 1 R2 = np.zeros((j+2, j), dtype=R.dtype, order='F') R2[:j+1,:] = R Q, R = qr_insert(Q2, R2, hcur, j, which='col', overwrite_qru=True, check_finite=False) # Transformed least squares problem # || Q R y - inner_res_0 * e_1 ||_2 = min! # Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0] # Residual is immediately known res = abs(Q[0,-1]) # Check for termination if res < atol or breakdown: break if not np.isfinite(R[j,j]): # nans encountered, bail out raise LinAlgError() # -- Get the LSQ problem solution # The problem is triangular, but the condition number may be # bad (or in case of breakdown the last diagonal entry may be # zero), so use lstsq instead of trtrs. y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj()) B = B[:,:j+1] return Q, R, B, vs, zs, y, res def gcrotmk(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None, m=20, k=None, CU=None, discard_C=False, truncate='oldest', atol=None): """ Solve a matrix equation using flexible GCROT(m,k) algorithm. Parameters ---------- A : {sparse matrix, dense matrix, LinearOperator} The real or complex N-by-N matrix of the linear system. b : {array, matrix} Right hand side of the linear system. Has shape (N,) or (N,1). x0 : {array, matrix} Starting guess for the solution. tol, atol : float, optional Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``. The default for ``atol`` is `tol`. .. warning:: The default value for `atol` will be changed in a future release. For future compatibility, specify `atol` explicitly. maxiter : int, optional Maximum number of iterations. Iteration will stop after maxiter steps even if the specified tolerance has not been achieved. M : {sparse matrix, dense matrix, LinearOperator}, optional Preconditioner for A. The preconditioner should approximate the inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner can vary from iteration to iteration. Effective preconditioning dramatically improves the rate of convergence, which implies that fewer iterations are needed to reach a given error tolerance. callback : function, optional User-supplied function to call after each iteration. It is called as callback(xk), where xk is the current solution vector. m : int, optional Number of inner FGMRES iterations per each outer iteration. Default: 20 k : int, optional Number of vectors to carry between inner FGMRES iterations. According to [2]_, good values are around m. Default: m CU : list of tuples, optional List of tuples ``(c, u)`` which contain the columns of the matrices C and U in the GCROT(m,k) algorithm. For details, see [2]_. The list given and vectors contained in it are modified in-place. If not given, start from empty matrices. The ``c`` elements in the tuples can be ``None``, in which case the vectors are recomputed via ``c = A u`` on start and orthogonalized as described in [3]_. discard_C : bool, optional Discard the C-vectors at the end. Useful if recycling Krylov subspaces for different linear systems. truncate : {'oldest', 'smallest'}, optional Truncation scheme to use. Drop: oldest vectors, or vectors with smallest singular values using the scheme discussed in [1,2]. See [2]_ for detailed comparison. Default: 'oldest' Returns ------- x : array or matrix The solution found. info : int Provides convergence information: * 0 : successful exit * >0 : convergence to tolerance not achieved, number of iterations References ---------- .. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace methods'', SIAM J. Numer. Anal. 36, 864 (1999). .. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant of GCROT for solving nonsymmetric linear systems'', SIAM J. Sci. Comput. 32, 172 (2010). .. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti, ''Recycling Krylov subspaces for sequences of linear systems'', SIAM J. Sci. Comput. 28, 1651 (2006). """ A,M,x,b,postprocess = make_system(A,M,x0,b) if not np.isfinite(b).all(): raise ValueError("RHS must contain only finite numbers") if truncate not in ('oldest', 'smallest'): raise ValueError("Invalid value for 'truncate': %r" % (truncate,)) if atol is None: warnings.warn("scipy.sparse.linalg.gcrotmk called without specifying `atol`. " "The default value will change in the future. To preserve " "current behavior, set ``atol=tol``.", category=DeprecationWarning, stacklevel=2) atol = tol matvec = A.matvec psolve = M.matvec if CU is None: CU = [] if k is None: k = m axpy, dot, scal = None, None, None r = b - matvec(x) axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r)) b_norm = nrm2(b) if discard_C: CU[:] = [(None, u) for c, u in CU] # Reorthogonalize old vectors if CU: # Sort already existing vectors to the front CU.sort(key=lambda cu: cu[0] is not None) # Fill-in missing ones C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F') us = [] j = 0 while CU: # More memory-efficient: throw away old vectors as we go c, u = CU.pop(0) if c is None: c = matvec(u) C[:,j] = c j += 1 us.append(u) # Orthogonalize Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True) del C # C := Q cs = list(Q.T) # U := U P R^-1, back-substitution new_us = [] for j in xrange(len(cs)): u = us[P[j]] for i in xrange(j): u = axpy(us[P[i]], u, u.shape[0], -R[i,j]) if abs(R[j,j]) < 1e-12 * abs(R[0,0]): # discard rest of the vectors break u = scal(1.0/R[j,j], u) new_us.append(u) # Form the new CU lists CU[:] = list(zip(cs, new_us))[::-1] if CU: axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,)) # Solve first the projection operation with respect to the CU # vectors. This corresponds to modifying the initial guess to # be # # x' = x + U y # y = argmin_y || b - A (x + U y) ||^2 # # The solution is y = C^H (b - A x) for c, u in CU: yc = dot(c, r) x = axpy(u, x, x.shape[0], yc) r = axpy(c, r, r.shape[0], -yc) # GCROT main iteration for j_outer in xrange(maxiter): # -- callback if callback is not None: callback(x) beta = nrm2(r) # -- check stopping condition beta_tol = max(atol, tol * b_norm) if beta <= beta_tol and (j_outer > 0 or CU): # recompute residual to avoid rounding error r = b - matvec(x) beta = nrm2(r) if beta <= beta_tol: j_outer = -1 break ml = m + max(k - len(CU), 0) cs = [c for c, u in CU] try: Q, R, B, vs, zs, y, pres = _fgmres(matvec, r/beta, ml, rpsolve=psolve, atol=max(atol, tol*b_norm)/beta, cs=cs) y *= beta except LinAlgError: # Floating point over/underflow, non-finite result from # matmul etc. -- report failure. break # # At this point, # # [A U, A Z] = [C, V] G; G = [ I B ] # [ 0 H ] # # where [C, V] has orthonormal columns, and r = beta v_0. Moreover, # # || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min! # # from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y # # # GCROT(m,k) update # # Define new outer vectors # ux := (Z - U B) y ux = zs[0]*y[0] for z, yc in zip(zs[1:], y[1:]): ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc by = B.dot(y) for cu, byc in zip(CU, by): c, u = cu ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc # cx := V H y hy = Q.dot(R.dot(y)) cx = vs[0] * hy[0] for v, hyc in zip(vs[1:], hy[1:]): cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc # Normalize cx, maintaining cx = A ux # This new cx is orthogonal to the previous C, by construction try: alpha = 1/nrm2(cx) if not np.isfinite(alpha): raise FloatingPointError() except (FloatingPointError, ZeroDivisionError): # Cannot update, so skip it continue cx = scal(alpha, cx) ux = scal(alpha, ux) # Update residual and solution gamma = dot(cx, r) r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux # Truncate CU if truncate == 'oldest': while len(CU) >= k and CU: del CU[0] elif truncate == 'smallest': if len(CU) >= k and CU: # cf. [1,2] D = solve(R[:-1,:].T, B.T).T W, sigma, V = svd(D) # C := C W[:,:k-1], U := U W[:,:k-1] new_CU = [] for j, w in enumerate(W[:,:k-1].T): c, u = CU[0] c = c * w[0] u = u * w[0] for cup, wp in zip(CU[1:], w[1:]): cp, up = cup c = axpy(cp, c, c.shape[0], wp) u = axpy(up, u, u.shape[0], wp) # Reorthogonalize at the same time; not necessary # in exact arithmetic, but floating point error # tends to accumulate here for cp, up in new_CU: alpha = dot(cp, c) c = axpy(cp, c, c.shape[0], -alpha) u = axpy(up, u, u.shape[0], -alpha) alpha = nrm2(c) c = scal(1.0/alpha, c) u = scal(1.0/alpha, u) new_CU.append((c, u)) CU[:] = new_CU # Add new vector to CU CU.append((cx, ux)) # Include the solution vector to the span CU.append((None, x.copy())) if discard_C: CU[:] = [(None, uz) for cz, uz in CU] return postprocess(x), j_outer + 1
15,478
30.719262
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py
#!/usr/bin/env python """Tests for the linalg.isolve.gcrotmk module """ from __future__ import division, print_function, absolute_import from numpy.testing import assert_, assert_allclose, assert_equal from scipy._lib._numpy_compat import suppress_warnings import numpy as np from numpy import zeros, array, allclose from scipy.linalg import norm from scipy.sparse import csr_matrix, eye, rand from scipy.sparse.linalg.interface import LinearOperator from scipy.sparse.linalg import splu from scipy.sparse.linalg.isolve import gcrotmk, gmres Am = csr_matrix(array([[-2,1,0,0,0,9], [1,-2,1,0,5,0], [0,1,-2,1,0,0], [0,0,1,-2,1,0], [0,3,0,1,-2,1], [1,0,0,0,1,-2]])) b = array([1,2,3,4,5,6]) count = [0] def matvec(v): count[0] += 1 return Am*v A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) def do_solve(**kw): count[0] = 0 with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw) count_0 = count[0] assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b)) return x0, count_0 class TestGCROTMK(object): def test_preconditioner(self): # Check that preconditioning works pc = splu(Am.tocsc()) M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype) x0, count_0 = do_solve() x1, count_1 = do_solve(M=M) assert_equal(count_1, 3) assert_(count_1 < count_0/2) assert_(allclose(x1, x0, rtol=1e-14)) def test_arnoldi(self): np.random.rand(1234) A = eye(10000) + rand(10000,10000,density=1e-4) b = np.random.rand(10000) # The inner arnoldi should be equivalent to gmres with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1) x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1) assert_equal(flag0, 1) assert_equal(flag1, 1) assert_(np.linalg.norm(A.dot(x0) - b) > 1e-3) assert_allclose(x0, x1) def test_cornercase(self): np.random.seed(1234) # Rounding error may prevent convergence with tol=0 --- ensure # that the return values in this case are correct, and no # exceptions are raised for n in [3, 5, 10, 100]: A = 2*eye(n) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") b = np.ones(n) x, info = gcrotmk(A, b, maxiter=10) assert_equal(info, 0) assert_allclose(A.dot(x) - b, 0, atol=1e-14) x, info = gcrotmk(A, b, tol=0, maxiter=10) if info == 0: assert_allclose(A.dot(x) - b, 0, atol=1e-14) b = np.random.rand(n) x, info = gcrotmk(A, b, maxiter=10) assert_equal(info, 0) assert_allclose(A.dot(x) - b, 0, atol=1e-14) x, info = gcrotmk(A, b, tol=0, maxiter=10) if info == 0: assert_allclose(A.dot(x) - b, 0, atol=1e-14) def test_nans(self): A = eye(3, format='lil') A[1,1] = np.nan b = np.ones(3) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x, info = gcrotmk(A, b, tol=0, maxiter=10) assert_equal(info, 1) def test_truncate(self): np.random.seed(1234) A = np.random.rand(30, 30) + np.eye(30) b = np.random.rand(30) for truncate in ['oldest', 'smallest']: with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4, maxiter=200) assert_equal(info, 0) assert_allclose(A.dot(x) - b, 0, atol=1e-3) def test_CU(self): for discard_C in (True, False): # Check that C,U behave as expected CU = [] x0, count_0 = do_solve(CU=CU, discard_C=discard_C) assert_(len(CU) > 0) assert_(len(CU) <= 6) if discard_C: for c, u in CU: assert_(c is None) # should converge immediately x1, count_1 = do_solve(CU=CU, discard_C=discard_C) if discard_C: assert_equal(count_1, 2 + len(CU)) else: assert_equal(count_1, 3) assert_(count_1 <= count_0/2) assert_allclose(x1, x0, atol=1e-14) def test_denormals(self): # Check that no warnings are emitted if the matrix contains # numbers for which 1/x has no float representation, and that # the solver behaves properly. A = np.array([[1, 2], [3, 4]], dtype=float) A *= 100 * np.nextafter(0, 1) b = np.array([1, 1]) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") xp, info = gcrotmk(A, b) if info == 0: assert_allclose(A.dot(xp), b)
5,493
31.702381
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/test_lsmr.py
""" Copyright (C) 2010 David Fong and Michael Saunders Distributed under the same license as Scipy Testing Code for LSMR. 03 Jun 2010: First version release with lsmr.py David Chin-lung Fong clfong@stanford.edu Institute for Computational and Mathematical Engineering Stanford University Michael Saunders saunders@stanford.edu Systems Optimization Laboratory Dept of MS&E, Stanford University. """ from __future__ import division, print_function, absolute_import from numpy import array, arange, eye, zeros, ones, sqrt, transpose, hstack from numpy.linalg import norm from numpy.testing import (assert_almost_equal, assert_array_almost_equal) from scipy.sparse import coo_matrix from scipy.sparse.linalg.interface import aslinearoperator from scipy.sparse.linalg import lsmr from .test_lsqr import G, b class TestLSMR: def setup_method(self): self.n = 10 self.m = 10 def assertCompatibleSystem(self, A, xtrue): Afun = aslinearoperator(A) b = Afun.matvec(xtrue) x = lsmr(A, b)[0] assert_almost_equal(norm(x - xtrue), 0, decimal=5) def testIdentityACase1(self): A = eye(self.n) xtrue = zeros((self.n, 1)) self.assertCompatibleSystem(A, xtrue) def testIdentityACase2(self): A = eye(self.n) xtrue = ones((self.n,1)) self.assertCompatibleSystem(A, xtrue) def testIdentityACase3(self): A = eye(self.n) xtrue = transpose(arange(self.n,0,-1)) self.assertCompatibleSystem(A, xtrue) def testBidiagonalA(self): A = lowerBidiagonalMatrix(20,self.n) xtrue = transpose(arange(self.n,0,-1)) self.assertCompatibleSystem(A,xtrue) def testScalarB(self): A = array([[1.0, 2.0]]) b = 3.0 x = lsmr(A, b)[0] assert_almost_equal(norm(A.dot(x) - b), 0) def testColumnB(self): A = eye(self.n) b = ones((self.n, 1)) x = lsmr(A, b)[0] assert_almost_equal(norm(A.dot(x) - b.ravel()), 0) def testInitialization(self): # Test that the default setting is not modified x_ref = lsmr(G, b)[0] x0 = zeros(b.shape) x = lsmr(G, b, x0=x0)[0] assert_array_almost_equal(x_ref, x) # Test warm-start with single iteration x0 = lsmr(G, b, maxiter=1)[0] x = lsmr(G, b, x0=x0)[0] assert_array_almost_equal(x_ref, x) class TestLSMRReturns: def setup_method(self): self.n = 10 self.A = lowerBidiagonalMatrix(20,self.n) self.xtrue = transpose(arange(self.n,0,-1)) self.Afun = aslinearoperator(self.A) self.b = self.Afun.matvec(self.xtrue) self.returnValues = lsmr(self.A,self.b) def testNormr(self): x, istop, itn, normr, normar, normA, condA, normx = self.returnValues assert_almost_equal(normr, norm(self.b - self.Afun.matvec(x))) def testNormar(self): x, istop, itn, normr, normar, normA, condA, normx = self.returnValues assert_almost_equal(normar, norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x)))) def testNormx(self): x, istop, itn, normr, normar, normA, condA, normx = self.returnValues assert_almost_equal(normx, norm(x)) def lowerBidiagonalMatrix(m, n): # This is a simple example for testing LSMR. # It uses the leading m*n submatrix from # A = [ 1 # 1 2 # 2 3 # 3 4 # ... # n ] # suitably padded by zeros. # # 04 Jun 2010: First version for distribution with lsmr.py if m <= n: row = hstack((arange(m, dtype=int), arange(1, m, dtype=int))) col = hstack((arange(m, dtype=int), arange(m-1, dtype=int))) data = hstack((arange(1, m+1, dtype=float), arange(1,m, dtype=float))) return coo_matrix((data, (row, col)), shape=(m,n)) else: row = hstack((arange(n, dtype=int), arange(1, n+1, dtype=int))) col = hstack((arange(n, dtype=int), arange(n, dtype=int))) data = hstack((arange(1, n+1, dtype=float), arange(1,n+1, dtype=float))) return coo_matrix((data,(row, col)), shape=(m,n)) def lsmrtest(m, n, damp): """Verbose testing of lsmr""" A = lowerBidiagonalMatrix(m,n) xtrue = arange(n,0,-1, dtype=float) Afun = aslinearoperator(A) b = Afun.matvec(xtrue) atol = 1.0e-7 btol = 1.0e-7 conlim = 1.0e+10 itnlim = 10*n show = 1 x, istop, itn, normr, normar, norma, conda, normx \ = lsmr(A, b, damp, atol, btol, conlim, itnlim, show) j1 = min(n,5) j2 = max(n-4,1) print(' ') print('First elements of x:') str = ['%10.4f' % (xi) for xi in x[0:j1]] print(''.join(str)) print(' ') print('Last elements of x:') str = ['%10.4f' % (xi) for xi in x[j2-1:]] print(''.join(str)) r = b - Afun.matvec(x) r2 = sqrt(norm(r)**2 + (damp*norm(x))**2) print(' ') str = 'normr (est.) %17.10e' % (normr) str2 = 'normr (true) %17.10e' % (r2) print(str) print(str2) print(' ') if __name__ == "__main__": lsmrtest(20,10,0)
5,317
28.381215
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/test_lgmres.py
"""Tests for the linalg.isolve.lgmres module """ from __future__ import division, print_function, absolute_import from numpy.testing import assert_, assert_allclose, assert_equal import numpy as np from numpy import zeros, array, allclose from scipy.linalg import norm from scipy.sparse import csr_matrix, eye, rand from scipy.sparse.linalg.interface import LinearOperator from scipy.sparse.linalg import splu from scipy.sparse.linalg.isolve import lgmres, gmres from scipy._lib._numpy_compat import suppress_warnings Am = csr_matrix(array([[-2,1,0,0,0,9], [1,-2,1,0,5,0], [0,1,-2,1,0,0], [0,0,1,-2,1,0], [0,3,0,1,-2,1], [1,0,0,0,1,-2]])) b = array([1,2,3,4,5,6]) count = [0] def matvec(v): count[0] += 1 return Am*v A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) def do_solve(**kw): count[0] = 0 with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=6, tol=1e-14, **kw) count_0 = count[0] assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b)) return x0, count_0 class TestLGMRES(object): def test_preconditioner(self): # Check that preconditioning works pc = splu(Am.tocsc()) M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype) x0, count_0 = do_solve() x1, count_1 = do_solve(M=M) assert_(count_1 == 3) assert_(count_1 < count_0/2) assert_(allclose(x1, x0, rtol=1e-14)) def test_outer_v(self): # Check that the augmentation vectors behave as expected outer_v = [] x0, count_0 = do_solve(outer_k=6, outer_v=outer_v) assert_(len(outer_v) > 0) assert_(len(outer_v) <= 6) x1, count_1 = do_solve(outer_k=6, outer_v=outer_v, prepend_outer_v=True) assert_(count_1 == 2, count_1) assert_(count_1 < count_0/2) assert_(allclose(x1, x0, rtol=1e-14)) # --- outer_v = [] x0, count_0 = do_solve(outer_k=6, outer_v=outer_v, store_outer_Av=False) assert_(array([v[1] is None for v in outer_v]).all()) assert_(len(outer_v) > 0) assert_(len(outer_v) <= 6) x1, count_1 = do_solve(outer_k=6, outer_v=outer_v, prepend_outer_v=True) assert_(count_1 == 3, count_1) assert_(count_1 < count_0/2) assert_(allclose(x1, x0, rtol=1e-14)) def test_arnoldi(self): np.random.rand(1234) A = eye(10000) + rand(10000,10000,density=1e-4) b = np.random.rand(10000) # The inner arnoldi should be equivalent to gmres with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=15, maxiter=1) x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1) assert_equal(flag0, 1) assert_equal(flag1, 1) assert_(np.linalg.norm(A.dot(x0) - b) > 1e-3) assert_allclose(x0, x1) def test_cornercase(self): np.random.seed(1234) # Rounding error may prevent convergence with tol=0 --- ensure # that the return values in this case are correct, and no # exceptions are raised for n in [3, 5, 10, 100]: A = 2*eye(n) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") b = np.ones(n) x, info = lgmres(A, b, maxiter=10) assert_equal(info, 0) assert_allclose(A.dot(x) - b, 0, atol=1e-14) x, info = lgmres(A, b, tol=0, maxiter=10) if info == 0: assert_allclose(A.dot(x) - b, 0, atol=1e-14) b = np.random.rand(n) x, info = lgmres(A, b, maxiter=10) assert_equal(info, 0) assert_allclose(A.dot(x) - b, 0, atol=1e-14) x, info = lgmres(A, b, tol=0, maxiter=10) if info == 0: assert_allclose(A.dot(x) - b, 0, atol=1e-14) def test_nans(self): A = eye(3, format='lil') A[1,1] = np.nan b = np.ones(3) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x, info = lgmres(A, b, tol=0, maxiter=10) assert_equal(info, 1) def test_breakdown_with_outer_v(self): A = np.array([[1, 2], [3, 4]], dtype=float) b = np.array([1, 2]) x = np.linalg.solve(A, b) v0 = np.array([1, 0]) # The inner iteration should converge to the correct solution, # since it's in the outer vector list with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1) assert_allclose(xp, x, atol=1e-12) def test_breakdown_underdetermined(self): # Should find LSQ solution in the Krylov span in one inner # iteration, despite solver breakdown from nilpotent A. A = np.array([[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], dtype=float) bs = [ np.array([1, 1, 1, 1]), np.array([1, 1, 1, 0]), np.array([1, 1, 0, 0]), np.array([1, 0, 0, 0]), ] for b in bs: with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") xp, info = lgmres(A, b, maxiter=1) resp = np.linalg.norm(A.dot(xp) - b) K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))] y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1) x = K.dot(y) res = np.linalg.norm(A.dot(x) - b) assert_allclose(resp, res, err_msg=repr(b)) def test_denormals(self): # Check that no warnings are emitted if the matrix contains # numbers for which 1/x has no float representation, and that # the solver behaves properly. A = np.array([[1, 2], [3, 4]], dtype=float) A *= 100 * np.nextafter(0, 1) b = np.array([1, 1]) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") xp, info = lgmres(A, b) if info == 0: assert_allclose(A.dot(xp), b)
6,722
31.955882
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/demo_lgmres.py
from __future__ import division, print_function, absolute_import import scipy.sparse.linalg as la import scipy.sparse as sp import scipy.io as io import numpy as np import sys #problem = "SPARSKIT/drivcav/e05r0100" problem = "SPARSKIT/drivcav/e05r0200" #problem = "Harwell-Boeing/sherman/sherman1" #problem = "misc/hamm/add32" mm = np.lib._datasource.Repository('ftp://math.nist.gov/pub/MatrixMarket2/') f = mm.open('%s.mtx.gz' % problem) Am = io.mmread(f).tocsr() f.close() f = mm.open('%s_rhs1.mtx.gz' % problem) b = np.array(io.mmread(f)).ravel() f.close() count = [0] def matvec(v): count[0] += 1 sys.stderr.write('%d\r' % count[0]) return Am*v A = la.LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) M = 100 print("MatrixMarket problem %s" % problem) print("Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz)) count[0] = 0 x0, info = la.gmres(A, b, restrt=M, tol=1e-14) count_0 = count[0] err0 = np.linalg.norm(Am*x0 - b) / np.linalg.norm(b) print("GMRES(%d):" % M, count_0, "matvecs, residual", err0) if info != 0: print("Didn't converge") count[0] = 0 x1, info = la.lgmres(A, b, inner_m=M-6*2, outer_k=6, tol=1e-14) count_1 = count[0] err1 = np.linalg.norm(Am*x1 - b) / np.linalg.norm(b) print("LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1, "matvecs, residual:", err1) if info != 0: print("Didn't converge") count[0] = 0 x2, info = la.lgmres(A, b, inner_m=M-6, outer_k=6, tol=1e-14) count_2 = count[0] err2 = np.linalg.norm(Am*x2 - b) / np.linalg.norm(b) print("LGMRES(%d,6) [same subspace size]:" % (M-6), count_2, "matvecs, residual:", err2) if info != 0: print("Didn't converge")
1,680
25.265625
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/test_lsqr.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_, assert_equal, assert_almost_equal, assert_array_almost_equal) from scipy._lib.six import xrange import scipy.sparse import scipy.sparse.linalg from scipy.sparse.linalg import lsqr from time import time # Set up a test problem n = 35 G = np.eye(n) normal = np.random.normal norm = np.linalg.norm for jj in xrange(5): gg = normal(size=n) hh = gg * gg.T G += (hh + hh.T) * 0.5 G += normal(size=n) * normal(size=n) b = normal(size=n) tol = 1e-10 show = False maxit = None def test_basic(): b_copy = b.copy() X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit) assert_(np.all(b_copy == b)) svx = np.linalg.solve(G, b) xo = X[0] assert_(norm(svx - xo) < 1e-5) def test_gh_2466(): row = np.array([0, 0]) col = np.array([0, 1]) val = np.array([1, -1]) A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2)) b = np.asarray([4]) lsqr(A, b) def test_well_conditioned_problems(): # Test that sparse the lsqr solver returns the right solution # on various problems with different random seeds. # This is a non-regression test for a potential ZeroDivisionError # raised when computing the `test2` & `test3` convergence conditions. n = 10 A_sparse = scipy.sparse.eye(n, n) A_dense = A_sparse.toarray() with np.errstate(invalid='raise'): for seed in range(30): rng = np.random.RandomState(seed + 10) beta = rng.rand(n) beta[beta == 0] = 0.00001 # ensure that all the betas are not null b = A_sparse * beta[:, np.newaxis] output = lsqr(A_sparse, b, show=show) # Check that the termination condition corresponds to an approximate # solution to Ax = b assert_equal(output[1], 1) solution = output[0] # Check that we recover the ground truth solution assert_array_almost_equal(solution, beta) # Sanity check: compare to the dense array solver reference_solution = np.linalg.solve(A_dense, b).ravel() assert_array_almost_equal(solution, reference_solution) def test_b_shapes(): # Test b being a scalar. A = np.array([[1.0, 2.0]]) b = 3.0 x = lsqr(A, b)[0] assert_almost_equal(norm(A.dot(x) - b), 0) # Test b being a column vector. A = np.eye(10) b = np.ones((10, 1)) x = lsqr(A, b)[0] assert_almost_equal(norm(A.dot(x) - b.ravel()), 0) def test_initialization(): # Test the default setting is the same as zeros b_copy = b.copy() x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit) x0 = np.zeros(x_ref[0].shape) x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0) assert_(np.all(b_copy == b)) assert_array_almost_equal(x_ref[0], x[0]) # Test warm-start with single iteration x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0] x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0) assert_array_almost_equal(x_ref[0], x[0]) assert_(np.all(b_copy == b)) if __name__ == "__main__": svx = np.linalg.solve(G, b) tic = time() X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit) xo = X[0] phio = X[3] psio = X[7] k = X[2] chio = X[8] mg = np.amax(G - G.T) if mg > 1e-14: sym = 'No' else: sym = 'Yes' print('LSQR') print("Is linear operator symmetric? " + sym) print("n: %3g iterations: %3g" % (n, k)) print("Norms computed in %.2fs by LSQR" % (time() - tic)) print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " % (chio, phio, psio)) print("Residual norms computed directly:") print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo), norm(G*xo - b), norm(G.T*(G*xo-b)))) print("Direct solution norms:") print(" ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx - b))) print("") print(" || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo)) print("")
4,268
29.492857
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/test_iterative.py
""" Test functions for the sparse.linalg.isolve module """ from __future__ import division, print_function, absolute_import import itertools import numpy as np from numpy.testing import (assert_equal, assert_array_equal, assert_, assert_allclose) import pytest from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings from numpy import zeros, arange, array, ones, eye, iscomplexobj from scipy.linalg import norm from scipy.sparse import spdiags, csr_matrix, SparseEfficiencyWarning from scipy.sparse.linalg import LinearOperator, aslinearoperator from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk # TODO check that method preserve shape and type # TODO test both preconditioner methods class Case(object): def __init__(self, name, A, skip=None): self.name = name self.A = A if skip is None: self.skip = [] else: self.skip = skip def __repr__(self): return "<%s>" % self.name class IterativeParams(object): def __init__(self): # list of tuples (solver, symmetric, positive_definite ) solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk] sym_solvers = [minres, cg] posdef_solvers = [cg] real_solvers = [minres] self.solvers = solvers # list of tuples (A, symmetric, positive_definite ) self.cases = [] # Symmetric and Positive Definite N = 40 data = ones((3,N)) data[0,:] = 2 data[1,:] = -1 data[2,:] = -1 Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr') self.Poisson1D = Case("poisson1d", Poisson1D) self.cases.append(Case("poisson1d", Poisson1D)) # note: minres fails for single precision self.cases.append(Case("poisson1d", Poisson1D.astype('f'), skip=[minres])) # Symmetric and Negative Definite self.cases.append(Case("neg-poisson1d", -Poisson1D, skip=posdef_solvers)) # note: minres fails for single precision self.cases.append(Case("neg-poisson1d", (-Poisson1D).astype('f'), skip=posdef_solvers + [minres])) # Symmetric and Indefinite data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d') RandDiag = spdiags(data, [0], 10, 10, format='csr') self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers)) self.cases.append(Case("rand-diag", RandDiag.astype('f'), skip=posdef_solvers)) # Random real-valued np.random.seed(1234) data = np.random.rand(4, 4) self.cases.append(Case("rand", data, skip=posdef_solvers+sym_solvers)) self.cases.append(Case("rand", data.astype('f'), skip=posdef_solvers+sym_solvers)) # Random symmetric real-valued np.random.seed(1234) data = np.random.rand(4, 4) data = data + data.T self.cases.append(Case("rand-sym", data, skip=posdef_solvers)) self.cases.append(Case("rand-sym", data.astype('f'), skip=posdef_solvers)) # Random pos-def symmetric real np.random.seed(1234) data = np.random.rand(9, 9) data = np.dot(data.conj(), data.T) self.cases.append(Case("rand-sym-pd", data)) # note: minres fails for single precision self.cases.append(Case("rand-sym-pd", data.astype('f'), skip=[minres])) # Random complex-valued np.random.seed(1234) data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4) self.cases.append(Case("rand-cmplx", data, skip=posdef_solvers+sym_solvers+real_solvers)) self.cases.append(Case("rand-cmplx", data.astype('F'), skip=posdef_solvers+sym_solvers+real_solvers)) # Random hermitian complex-valued np.random.seed(1234) data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4) data = data + data.T.conj() self.cases.append(Case("rand-cmplx-herm", data, skip=posdef_solvers+real_solvers)) self.cases.append(Case("rand-cmplx-herm", data.astype('F'), skip=posdef_solvers+real_solvers)) # Random pos-def hermitian complex-valued np.random.seed(1234) data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9) data = np.dot(data.conj(), data.T) self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers)) self.cases.append(Case("rand-cmplx-sym-pd", data.astype('F'), skip=real_solvers)) # Non-symmetric and Positive Definite # # cgs, qmr, and bicg fail to converge on this one # -- algorithmic limitation apparently data = ones((2,10)) data[0,:] = 2 data[1,:] = -1 A = spdiags(data, [0,-1], 10, 10, format='csr') self.cases.append(Case("nonsymposdef", A, skip=sym_solvers+[cgs, qmr, bicg])) self.cases.append(Case("nonsymposdef", A.astype('F'), skip=sym_solvers+[cgs, qmr, bicg])) params = IterativeParams() def check_maxiter(solver, case): A = case.A tol = 1e-12 b = arange(A.shape[0], dtype=float) x0 = 0*b residuals = [] def callback(x): residuals.append(norm(b - case.A*x)) x, info = solver(A, b, x0=x0, tol=tol, maxiter=1, callback=callback) assert_equal(len(residuals), 1) assert_equal(info, 1) def test_maxiter(): case = params.Poisson1D for solver in params.solvers: if solver in case.skip: continue with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") check_maxiter(solver, case) def assert_normclose(a, b, tol=1e-8): residual = norm(a - b) tolerance = tol*norm(b) msg = "residual (%g) not smaller than tolerance %g" % (residual, tolerance) assert_(residual < tolerance, msg=msg) def check_convergence(solver, case): A = case.A if A.dtype.char in "dD": tol = 1e-8 else: tol = 1e-2 b = arange(A.shape[0], dtype=A.dtype) x0 = 0*b x, info = solver(A, b, x0=x0, tol=tol) assert_array_equal(x0, 0*b) # ensure that x0 is not overwritten assert_equal(info,0) assert_normclose(A.dot(x), b, tol=tol) def test_convergence(): for solver in params.solvers: for case in params.cases: if solver in case.skip: continue with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") check_convergence(solver, case) def check_precond_dummy(solver, case): tol = 1e-8 def identity(b,which=None): """trivial preconditioner""" return b A = case.A M,N = A.shape D = spdiags([1.0/A.diagonal()], [0], M, N) b = arange(A.shape[0], dtype=float) x0 = 0*b precond = LinearOperator(A.shape, identity, rmatvec=identity) if solver is qmr: x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol) else: x, info = solver(A, b, M=precond, x0=x0, tol=tol) assert_equal(info,0) assert_normclose(A.dot(x), b, tol) A = aslinearoperator(A) A.psolve = identity A.rpsolve = identity x, info = solver(A, b, x0=x0, tol=tol) assert_equal(info,0) assert_normclose(A*x, b, tol=tol) def test_precond_dummy(): case = params.Poisson1D for solver in params.solvers: if solver in case.skip: continue with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") check_precond_dummy(solver, case) def check_precond_inverse(solver, case): tol = 1e-8 def inverse(b,which=None): """inverse preconditioner""" A = case.A if not isinstance(A, np.ndarray): A = A.todense() return np.linalg.solve(A, b) def rinverse(b,which=None): """inverse preconditioner""" A = case.A if not isinstance(A, np.ndarray): A = A.todense() return np.linalg.solve(A.T, b) matvec_count = [0] def matvec(b): matvec_count[0] += 1 return case.A.dot(b) def rmatvec(b): matvec_count[0] += 1 return case.A.T.dot(b) b = arange(case.A.shape[0], dtype=float) x0 = 0*b A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec) precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse) # Solve with preconditioner matvec_count = [0] x, info = solver(A, b, M=precond, x0=x0, tol=tol) assert_equal(info, 0) assert_normclose(case.A.dot(x), b, tol) # Solution should be nearly instant assert_(matvec_count[0] <= 3, repr(matvec_count)) def test_precond_inverse(): case = params.Poisson1D for solver in params.solvers: if solver in case.skip: continue if solver is qmr: continue with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") check_precond_inverse(solver, case) def test_gmres_basic(): A = np.vander(np.arange(10) + 1)[:, ::-1] b = np.zeros(10) b[0] = 1 x = np.linalg.solve(A, b) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x_gm, err = gmres(A, b, restart=5, maxiter=1) assert_allclose(x_gm[0], 0.359, rtol=1e-2) def test_reentrancy(): non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr] reentrant = [lgmres, minres, gcrotmk] for solver in reentrant + non_reentrant: with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") _check_reentrancy(solver, solver in reentrant) def _check_reentrancy(solver, is_reentrant): def matvec(x): A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]]) y, info = solver(A, x) assert_equal(info, 0) return y b = np.array([1, 1./2, 1./3]) op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec, dtype=b.dtype) if not is_reentrant: assert_raises(RuntimeError, solver, op, b) else: y, info = solver(op, b) assert_equal(info, 0) assert_allclose(y, [1, 1, 1]) @pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, lgmres, gcrotmk]) def test_atol(solver): # TODO: minres. It didn't historically use absolute tolerances, so # fixing it is less urgent. np.random.seed(1234) A = np.random.rand(10, 10) A = A.dot(A.T) + 10 * np.eye(10) b = 1e3 * np.random.rand(10) b_norm = np.linalg.norm(b) tols = np.r_[0, np.logspace(np.log10(1e-10), np.log10(1e2), 7), np.inf] # Check effect of badly scaled preconditioners M0 = np.random.randn(10, 10) M0 = M0.dot(M0.T) Ms = [None, 1e-6 * M0, 1e6 * M0] for M, tol, atol in itertools.product(Ms, tols, tols): if tol == 0 and atol == 0: continue if solver is qmr: if M is not None: M = aslinearoperator(M) M2 = aslinearoperator(np.eye(10)) else: M2 = None x, info = solver(A, b, M1=M, M2=M2, tol=tol, atol=atol) else: x, info = solver(A, b, M=M, tol=tol, atol=atol) assert_equal(info, 0) residual = A.dot(x) - b err = np.linalg.norm(residual) atol2 = tol * b_norm assert_(err <= max(atol, atol2)) @pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk]) def test_zero_rhs(solver): np.random.seed(1234) A = np.random.rand(10, 10) A = A.dot(A.T) + 10 * np.eye(10) b = np.zeros(10) tols = np.r_[np.logspace(np.log10(1e-10), np.log10(1e2), 7)] for tol in tols: with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x, info = solver(A, b, tol=tol) assert_equal(info, 0) assert_allclose(x, 0, atol=1e-15) x, info = solver(A, b, tol=tol, x0=ones(10)) assert_equal(info, 0) assert_allclose(x, 0, atol=tol) if solver is not minres: x, info = solver(A, b, tol=tol, atol=0, x0=ones(10)) if info == 0: assert_allclose(x, 0) x, info = solver(A, b, tol=tol, atol=tol) assert_equal(info, 0) assert_allclose(x, 0, atol=1e-300) x, info = solver(A, b, tol=tol, atol=0) assert_equal(info, 0) assert_allclose(x, 0, atol=1e-300) #------------------------------------------------------------------------------ class TestQMR(object): def test_leftright_precond(self): """Check that QMR works with left and right preconditioners""" from scipy.sparse.linalg.dsolve import splu from scipy.sparse.linalg.interface import LinearOperator n = 100 dat = ones(n) A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n) b = arange(n,dtype='d') L = spdiags([-dat/2, dat], [-1,0], n, n) U = spdiags([4*dat, -dat], [0,1], n, n) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") L_solver = splu(L) U_solver = splu(U) def L_solve(b): return L_solver.solve(b) def U_solve(b): return U_solver.solve(b) def LT_solve(b): return L_solver.solve(b,'T') def UT_solve(b): return U_solver.solve(b,'T') M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve) M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) assert_equal(info,0) assert_normclose(A*x, b, tol=1e-8) class TestGMRES(object): def test_callback(self): def store_residual(r, rvec): rvec[rvec.nonzero()[0].max()+1] = r # Define, A,b A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]])) b = ones((A.shape[0],)) maxiter = 1 rvec = zeros(maxiter+1) rvec[0] = 1.0 callback = lambda r:store_residual(r, rvec) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback) # Expected output from Scipy 1.0.0 assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10) # Test preconditioned callback M = 1e-3 * np.eye(A.shape[0]) rvec = zeros(maxiter+1) rvec[0] = 1.0 with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter, callback=callback) # Expected output from Scipy 1.0.0 (callback has preconditioned residual!) assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]), rtol=1e-10) def test_abi(self): # Check we don't segfault on gmres with complex argument A = eye(2) b = ones(2) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") r_x, r_info = gmres(A, b) r_x = r_x.astype(complex) x, info = gmres(A.astype(complex), b.astype(complex)) assert_(iscomplexobj(x)) assert_allclose(r_x, x) assert_(r_info == info) def test_atol_legacy(self): with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") # Check the strange legacy behavior: the tolerance is interpreted # as atol, but only for the initial residual A = eye(2) b = 1e-6 * ones(2) x, info = gmres(A, b, tol=1e-5) assert_array_equal(x, np.zeros(2)) A = eye(2) b = ones(2) x, info = gmres(A, b, tol=1e-5) assert_(np.linalg.norm(A.dot(x) - b) <= 1e-5*np.linalg.norm(b)) assert_allclose(x, b, atol=0, rtol=1e-8) A = np.random.rand(30, 30) b = 1e-6 * ones(30) x, info = gmres(A, b, tol=1e-7, restart=20) assert_(np.linalg.norm(A.dot(x) - b) > 1e-7) A = eye(2) b = 1e-10 * ones(2) x, info = gmres(A, b, tol=1e-8, atol=0) assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b)) def test_defective_precond_breakdown(self): # Breakdown due to defective preconditioner M = np.eye(3) M[2,2] = 0 b = np.array([0, 1, 1]) x = np.array([1, 0, 0]) A = np.diag([2, 3, 4]) x, info = gmres(A, b, x0=x, M=M, tol=1e-15, atol=0) # Should not return nans, nor terminate with false success assert_(not np.isnan(x).any()) if info == 0: assert_(np.linalg.norm(A.dot(x) - b) <= 1e-15*np.linalg.norm(b)) # The solution should be OK outside null space of M assert_allclose(M.dot(A.dot(x)), M.dot(b)) def test_defective_matrix_breakdown(self): # Breakdown due to defective matrix A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]]) b = np.array([1, 0, 1]) x, info = gmres(A, b, tol=1e-8, atol=0) # Should not return nans, nor terminate with false success assert_(not np.isnan(x).any()) if info == 0: assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b)) # The solution should be OK outside null space of A assert_allclose(A.dot(A.dot(x)), A.dot(b))
18,459
31.272727
122
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/test_minres.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_equal, assert_allclose, assert_ from scipy.sparse.linalg.isolve import minres import pytest from pytest import raises as assert_raises from .test_iterative import assert_normclose def get_sample_problem(): # A random 10 x 10 symmetric matrix np.random.seed(1234) matrix = np.random.rand(10, 10) matrix = matrix + matrix.T # A random vector of length 10 vector = np.random.rand(10) return matrix, vector def test_singular(): A, b = get_sample_problem() A[0, ] = 0 b[0] = 0 xp, info = minres(A, b) assert_equal(info, 0) assert_normclose(A.dot(xp), b, tol=1e-5) @pytest.mark.skip(reason="Skip Until gh #6843 is fixed") def test_gh_6843(): """check if x0 is being used by tracing iterates""" A, b = get_sample_problem() # Random x0 to feed minres np.random.seed(12345) x0 = np.random.rand(10) trace = [] def trace_iterates(xk): trace.append(xk) minres(A, b, x0=x0, callback=trace_iterates) trace_with_x0 = trace trace = [] minres(A, b, callback=trace_iterates) assert_(not np.array_equal(trace_with_x0[0], trace[0])) def test_shift(): A, b = get_sample_problem() shift = 0.5 shifted_A = A - shift * np.eye(10) x1, info1 = minres(A, b, shift=shift) x2, info2 = minres(shifted_A, b) assert_equal(info1, 0) assert_allclose(x1, x2, rtol=1e-5) def test_asymmetric_fail(): """Asymmetric matrix should raise `ValueError` when check=True""" A, b = get_sample_problem() A[1, 2] = 1 A[2, 1] = 2 with assert_raises(ValueError): xp, info = minres(A, b, check=True)
1,743
25.424242
69
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/tests/test_utils.py
from __future__ import division, print_function, absolute_import import numpy as np from pytest import raises as assert_raises from scipy.sparse.linalg import utils def test_make_system_bad_shape(): assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
301
26.454545
97
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/dsolve/setup.py
from __future__ import division, print_function, absolute_import from os.path import join, dirname import sys import os import glob def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info from scipy._build_utils import get_sgemv_fix from scipy._build_utils import numpy_nodepr_api config = Configuration('dsolve',parent_package,top_path) config.add_data_dir('tests') lapack_opt = get_info('lapack_opt',notfound_action=2) if sys.platform == 'win32': superlu_defs = [('NO_TIMER',1)] else: superlu_defs = [] superlu_defs.append(('USE_VENDOR_BLAS',1)) superlu_src = join(dirname(__file__), 'SuperLU', 'SRC') sources = list(glob.glob(join(superlu_src, '*.c'))) headers = list(glob.glob(join(superlu_src, '*.h'))) config.add_library('superlu_src', sources=sources, macros=superlu_defs, include_dirs=[superlu_src], ) # Extension ext_sources = ['_superlumodule.c', '_superlu_utils.c', '_superluobject.c'] ext_sources += get_sgemv_fix(lapack_opt) config.add_extension('_superlu', sources=ext_sources, libraries=['superlu_src'], depends=(sources + headers), extra_info=lapack_opt, **numpy_nodepr_api ) # Add license files config.add_data_files('SuperLU/License.txt') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
1,781
29.20339
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/dsolve/linsolve.py
from __future__ import division, print_function, absolute_import from warnings import warn import numpy as np from numpy import asarray from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix, SparseEfficiencyWarning, csc_matrix, csr_matrix) from scipy.linalg import LinAlgError from . import _superlu noScikit = False try: import scikits.umfpack as umfpack except ImportError: noScikit = True useUmfpack = not noScikit __all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized', 'MatrixRankWarning', 'spsolve_triangular'] class MatrixRankWarning(UserWarning): pass def use_solver(**kwargs): """ Select default sparse direct solver to be used. Parameters ---------- useUmfpack : bool, optional Use UMFPACK over SuperLU. Has effect only if scikits.umfpack is installed. Default: True assumeSortedIndices : bool, optional Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix. Has effect only if useUmfpack is True and scikits.umfpack is installed. Default: False Notes ----- The default sparse solver is umfpack when available (scikits.umfpack is installed). This can be changed by passing useUmfpack = False, which then causes the always present SuperLU based solver to be used. Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If sure that the matrix fulfills this, pass ``assumeSortedIndices=True`` to gain some speed. """ if 'useUmfpack' in kwargs: globals()['useUmfpack'] = kwargs['useUmfpack'] if useUmfpack and 'assumeSortedIndices' in kwargs: umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices']) def _get_umf_family(A): """Get umfpack family string given the sparse matrix dtype.""" _families = { (np.float64, np.int32): 'di', (np.complex128, np.int32): 'zi', (np.float64, np.int64): 'dl', (np.complex128, np.int64): 'zl' } f_type = np.sctypeDict[A.dtype.name] i_type = np.sctypeDict[A.indices.dtype.name] try: family = _families[(f_type, i_type)] except KeyError: msg = 'only float64 or complex128 matrices with int32 or int64' \ ' indices are supported! (got: matrix: %s, indices: %s)' \ % (f_type, i_type) raise ValueError(msg) return family def spsolve(A, b, permc_spec=None, use_umfpack=True): """Solve the sparse linear system Ax=b, where b may be a vector or a matrix. Parameters ---------- A : ndarray or sparse matrix The square matrix A will be converted into CSC or CSR form b : ndarray or sparse matrix The matrix or vector representing the right hand side of the equation. If a vector, b.shape must be (n,) or (n, 1). permc_spec : str, optional How to permute the columns of the matrix for sparsity preservation. (default: 'COLAMD') - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering use_umfpack : bool, optional if True (default) then use umfpack for the solution. This is only referenced if b is a vector and ``scikit-umfpack`` is installed. Returns ------- x : ndarray or sparse matrix the solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[1] If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1]) Notes ----- For solving the matrix expression AX = B, this solver assumes the resulting matrix X is sparse, as is often the case for very sparse inputs. If the resulting X is dense, the construction of this sparse result will be relatively expensive. In that case, consider converting A to a dense matrix and using scipy.linalg.solve or its variants. Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import spsolve >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float) >>> x = spsolve(A, B) >>> np.allclose(A.dot(x).todense(), B.todense()) True """ if not (isspmatrix_csc(A) or isspmatrix_csr(A)): A = csc_matrix(A) warn('spsolve requires A be CSC or CSR matrix format', SparseEfficiencyWarning) # b is a vector only if b have shape (n,) or (n, 1) b_is_sparse = isspmatrix(b) if not b_is_sparse: b = asarray(b) b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1)) A.sort_indices() A = A.asfptype() # upcast to a floating point format result_dtype = np.promote_types(A.dtype, b.dtype) if A.dtype != result_dtype: A = A.astype(result_dtype) if b.dtype != result_dtype: b = b.astype(result_dtype) # validate input shapes M, N = A.shape if (M != N): raise ValueError("matrix must be square (has shape %s)" % ((M, N),)) if M != b.shape[0]: raise ValueError("matrix - rhs dimension mismatch (%s - %s)" % (A.shape, b.shape[0])) use_umfpack = use_umfpack and useUmfpack if b_is_vector and use_umfpack: if b_is_sparse: b_vec = b.toarray() else: b_vec = b b_vec = asarray(b_vec, dtype=A.dtype).ravel() if noScikit: raise RuntimeError('Scikits.umfpack not installed.') if A.dtype.char not in 'dD': raise ValueError("convert matrix data to double, please, using" " .astype(), or set linsolve.useUmfpack = False") umf = umfpack.UmfpackContext(_get_umf_family(A)) x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec, autoTranspose=True) else: if b_is_vector and b_is_sparse: b = b.toarray() b_is_sparse = False if not b_is_sparse: if isspmatrix_csc(A): flag = 1 # CSC format else: flag = 0 # CSR format options = dict(ColPerm=permc_spec) x, info = _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr, b, flag, options=options) if info != 0: warn("Matrix is exactly singular", MatrixRankWarning) x.fill(np.nan) if b_is_vector: x = x.ravel() else: # b is sparse Afactsolve = factorized(A) if not isspmatrix_csc(b): warn('spsolve is more efficient when sparse b ' 'is in the CSC matrix format', SparseEfficiencyWarning) b = csc_matrix(b) # Create a sparse output matrix by repeatedly applying # the sparse factorization to solve columns of b. data_segs = [] row_segs = [] col_segs = [] for j in range(b.shape[1]): bj = b[:, j].A.ravel() xj = Afactsolve(bj) w = np.flatnonzero(xj) segment_length = w.shape[0] row_segs.append(w) col_segs.append(np.ones(segment_length, dtype=int)*j) data_segs.append(np.asarray(xj[w], dtype=A.dtype)) sparse_data = np.concatenate(data_segs) sparse_row = np.concatenate(row_segs) sparse_col = np.concatenate(col_segs) x = A.__class__((sparse_data, (sparse_row, sparse_col)), shape=b.shape, dtype=A.dtype) return x def splu(A, permc_spec=None, diag_pivot_thresh=None, relax=None, panel_size=None, options=dict()): """ Compute the LU decomposition of a sparse, square matrix. Parameters ---------- A : sparse matrix Sparse matrix to factorize. Should be in CSR or CSC format. permc_spec : str, optional How to permute the columns of the matrix for sparsity preservation. (default: 'COLAMD') - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering diag_pivot_thresh : float, optional Threshold used for a diagonal entry to be an acceptable pivot. See SuperLU user's guide for details [1]_ relax : int, optional Expert option for customizing the degree of relaxing supernodes. See SuperLU user's guide for details [1]_ panel_size : int, optional Expert option for customizing the panel size. See SuperLU user's guide for details [1]_ options : dict, optional Dictionary containing additional expert options to SuperLU. See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument) for more details. For example, you can specify ``options=dict(Equil=False, IterRefine='SINGLE'))`` to turn equilibration off and perform a single iterative refinement. Returns ------- invA : scipy.sparse.linalg.SuperLU Object, which has a ``solve`` method. See also -------- spilu : incomplete LU decomposition Notes ----- This function uses the SuperLU library. References ---------- .. [1] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/ Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import splu >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) >>> B = splu(A) >>> x = np.array([1., 2., 3.], dtype=float) >>> B.solve(x) array([ 1. , -3. , -1.5]) >>> A.dot(B.solve(x)) array([ 1., 2., 3.]) >>> B.solve(A.dot(x)) array([ 1., 2., 3.]) """ if not isspmatrix_csc(A): A = csc_matrix(A) warn('splu requires CSC matrix format', SparseEfficiencyWarning) A.sort_indices() A = A.asfptype() # upcast to a floating point format M, N = A.shape if (M != N): raise ValueError("can only factor square matrices") # is this true? _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, PanelSize=panel_size, Relax=relax) if options is not None: _options.update(options) return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr, ilu=False, options=_options) def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None, diag_pivot_thresh=None, relax=None, panel_size=None, options=None): """ Compute an incomplete LU decomposition for a sparse, square matrix. The resulting object is an approximation to the inverse of `A`. Parameters ---------- A : (N, N) array_like Sparse matrix to factorize drop_tol : float, optional Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition. (default: 1e-4) fill_factor : float, optional Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10) drop_rule : str, optional Comma-separated string of drop rules to use. Available rules: ``basic``, ``prows``, ``column``, ``area``, ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``) See SuperLU documentation for details. Remaining other options Same as for `splu` Returns ------- invA_approx : scipy.sparse.linalg.SuperLU Object, which has a ``solve`` method. See also -------- splu : complete LU decomposition Notes ----- To improve the better approximation to the inverse, you may need to increase `fill_factor` AND decrease `drop_tol`. This function uses the SuperLU library. Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import spilu >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) >>> B = spilu(A) >>> x = np.array([1., 2., 3.], dtype=float) >>> B.solve(x) array([ 1. , -3. , -1.5]) >>> A.dot(B.solve(x)) array([ 1., 2., 3.]) >>> B.solve(A.dot(x)) array([ 1., 2., 3.]) """ if not isspmatrix_csc(A): A = csc_matrix(A) warn('splu requires CSC matrix format', SparseEfficiencyWarning) A.sort_indices() A = A.asfptype() # upcast to a floating point format M, N = A.shape if (M != N): raise ValueError("can only factor square matrices") # is this true? _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol, ILU_FillFactor=fill_factor, DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, PanelSize=panel_size, Relax=relax) if options is not None: _options.update(options) return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr, ilu=True, options=_options) def factorized(A): """ Return a function for solving a sparse linear system, with A pre-factorized. Parameters ---------- A : (N, N) array_like Input. Returns ------- solve : callable To solve the linear system of equations given in `A`, the `solve` callable should be passed an ndarray of shape (N,). Examples -------- >>> from scipy.sparse.linalg import factorized >>> A = np.array([[ 3. , 2. , -1. ], ... [ 2. , -2. , 4. ], ... [-1. , 0.5, -1. ]]) >>> solve = factorized(A) # Makes LU decomposition. >>> rhs1 = np.array([1, -2, 0]) >>> solve(rhs1) # Uses the LU factors. array([ 1., -2., -2.]) """ if useUmfpack: if noScikit: raise RuntimeError('Scikits.umfpack not installed.') if not isspmatrix_csc(A): A = csc_matrix(A) warn('splu requires CSC matrix format', SparseEfficiencyWarning) A = A.asfptype() # upcast to a floating point format if A.dtype.char not in 'dD': raise ValueError("convert matrix data to double, please, using" " .astype(), or set linsolve.useUmfpack = False") umf = umfpack.UmfpackContext(_get_umf_family(A)) # Make LU decomposition. umf.numeric(A) def solve(b): return umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True) return solve else: return splu(A).solve def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False): """ Solve the equation `A x = b` for `x`, assuming A is a triangular matrix. Parameters ---------- A : (M, M) sparse matrix A sparse square triangular matrix. Should be in CSR format. b : (M,) or (M, N) array_like Right-hand side matrix in `A x = b` lower : bool, optional Whether `A` is a lower or upper triangular matrix. Default is lower triangular matrix. overwrite_A : bool, optional Allow changing `A`. The indices of `A` are going to be sorted and zero entries are going to be removed. Enabling gives a performance gain. Default is False. overwrite_b : bool, optional Allow overwriting data in `b`. Enabling gives a performance gain. Default is False. If `overwrite_b` is True, it should be ensured that `b` has an appropriate dtype to be able to store the result. Returns ------- x : (M,) or (M, N) ndarray Solution to the system `A x = b`. Shape of return matches shape of `b`. Raises ------ LinAlgError If `A` is singular or not triangular. ValueError If shape of `A` or shape of `b` do not match the requirements. Notes ----- .. versionadded:: 0.19.0 Examples -------- >>> from scipy.sparse import csr_matrix >>> from scipy.sparse.linalg import spsolve_triangular >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float) >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float) >>> x = spsolve_triangular(A, B) >>> np.allclose(A.dot(x), B) True """ # Check the input for correct type and format. if not isspmatrix_csr(A): warn('CSR matrix format is required. Converting to CSR matrix.', SparseEfficiencyWarning) A = csr_matrix(A) elif not overwrite_A: A = A.copy() if A.shape[0] != A.shape[1]: raise ValueError( 'A must be a square matrix but its shape is {}.'.format(A.shape)) A.eliminate_zeros() A.sort_indices() b = np.asanyarray(b) if b.ndim not in [1, 2]: raise ValueError( 'b must have 1 or 2 dims but its shape is {}.'.format(b.shape)) if A.shape[0] != b.shape[0]: raise ValueError( 'The size of the dimensions of A must be equal to ' 'the size of the first dimension of b but the shape of A is ' '{} and the shape of b is {}.'.format(A.shape, b.shape)) # Init x as (a copy of) b. x_dtype = np.result_type(A.data, b, np.float) if overwrite_b: if np.can_cast(b.dtype, x_dtype, casting='same_kind'): x = b else: raise ValueError( 'Cannot overwrite b (dtype {}) with result ' 'of type {}.'.format(b.dtype, x_dtype)) else: x = b.astype(x_dtype, copy=True) # Choose forward or backward order. if lower: row_indices = range(len(b)) else: row_indices = range(len(b) - 1, -1, -1) # Fill x iteratively. for i in row_indices: # Get indices for i-th row. indptr_start = A.indptr[i] indptr_stop = A.indptr[i + 1] if lower: A_diagonal_index_row_i = indptr_stop - 1 A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1) else: A_diagonal_index_row_i = indptr_start A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop) # Check regularity and triangularity of A. if indptr_stop <= indptr_start or A.indices[A_diagonal_index_row_i] < i: raise LinAlgError( 'A is singular: diagonal {} is zero.'.format(i)) if A.indices[A_diagonal_index_row_i] > i: raise LinAlgError( 'A is not triangular: A[{}, {}] is nonzero.' ''.format(i, A.indices[A_diagonal_index_row_i])) # Incorporate off-diagonal entries. A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i] A_values_in_row_i = A.data[A_off_diagonal_indices_row_i] x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i) # Compute i-th entry of x. x[i] /= A.data[A_diagonal_index_row_i] return x
19,106
32.639085
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/dsolve/_add_newdocs.py
from numpy.lib import add_newdoc add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', """ LU factorization of a sparse matrix. Factorization is represented as:: Pr * A * Pc = L * U To construct these `SuperLU` objects, call the `splu` and `spilu` functions. Attributes ---------- shape nnz perm_c perm_r L U Methods ------- solve Notes ----- .. versionadded:: 0.14.0 Examples -------- The LU decomposition can be used to solve matrix equations. Consider: >>> import numpy as np >>> from scipy.sparse import csc_matrix, linalg as sla >>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]]) This can be solved for a given right-hand side: >>> lu = sla.splu(A) >>> b = np.array([1, 2, 3, 4]) >>> x = lu.solve(b) >>> A.dot(x) array([ 1., 2., 3., 4.]) The ``lu`` object also contains an explicit representation of the decomposition. The permutations are represented as mappings of indices: >>> lu.perm_r array([0, 2, 1, 3], dtype=int32) >>> lu.perm_c array([2, 0, 1, 3], dtype=int32) The L and U factors are sparse matrices in CSC format: >>> lu.L.A array([[ 1. , 0. , 0. , 0. ], [ 0. , 1. , 0. , 0. ], [ 0. , 0. , 1. , 0. ], [ 1. , 0.5, 0.5, 1. ]]) >>> lu.U.A array([[ 2., 0., 1., 4.], [ 0., 2., 1., 1.], [ 0., 0., 1., 1.], [ 0., 0., 0., -5.]]) The permutation matrices can be constructed: >>> Pr = csc_matrix((4, 4)) >>> Pr[lu.perm_r, np.arange(4)] = 1 >>> Pc = csc_matrix((4, 4)) >>> Pc[np.arange(4), lu.perm_c] = 1 We can reassemble the original matrix: >>> (Pr.T * (lu.L * lu.U) * Pc.T).A array([[ 1., 2., 0., 4.], [ 1., 0., 0., 1.], [ 1., 0., 2., 1.], [ 2., 2., 1., 0.]]) """) add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('solve', """ solve(rhs[, trans]) Solves linear system of equations with one or several right-hand sides. Parameters ---------- rhs : ndarray, shape (n,) or (n, k) Right hand side(s) of equation trans : {'N', 'T', 'H'}, optional Type of system to solve:: 'N': A * x == rhs (default) 'T': A^T * x == rhs 'H': A^H * x == rhs i.e., normal, transposed, and hermitian conjugate. Returns ------- x : ndarray, shape ``rhs.shape`` Solution vector(s) """)) add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('L', """ Lower triangular factor with unit diagonal as a `scipy.sparse.csc_matrix`. .. versionadded:: 0.14.0 """)) add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('U', """ Upper triangular factor as a `scipy.sparse.csc_matrix`. .. versionadded:: 0.14.0 """)) add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('shape', """ Shape of the original matrix as a tuple of ints. """)) add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('nnz', """ Number of nonzero elements in the matrix. """)) add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_c', """ Permutation Pc represented as an array of indices. The column permutation matrix can be reconstructed via: >>> Pc = np.zeros((n, n)) >>> Pc[np.arange(n), perm_c] = 1 """)) add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_r', """ Permutation Pr represented as an array of indices. The row permutation matrix can be reconstructed via: >>> Pr = np.zeros((n, n)) >>> Pr[perm_r, np.arange(n)] = 1 """))
3,801
23.529032
75
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/dsolve/__init__.py
""" Linear Solvers ============== The default solver is SuperLU (included in the scipy distribution), which can solve real or complex linear systems in both single and double precisions. It is automatically replaced by UMFPACK, if available. Note that UMFPACK works in double precision only, so switch it off by:: >>> use_solver(useUmfpack=False) to solve in the single precision. See also use_solver documentation. Example session:: >>> from scipy.sparse import csc_matrix, spdiags >>> from numpy import array >>> from scipy.sparse.linalg import spsolve, use_solver >>> >>> print("Inverting a sparse linear system:") >>> print("The sparse matrix (constructed from diagonals):") >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) >>> b = array([1, 2, 3, 4, 5]) >>> print("Solve: single precision complex:") >>> use_solver( useUmfpack = False ) >>> a = a.astype('F') >>> x = spsolve(a, b) >>> print(x) >>> print("Error: ", a*x-b) >>> >>> print("Solve: double precision complex:") >>> use_solver( useUmfpack = True ) >>> a = a.astype('D') >>> x = spsolve(a, b) >>> print(x) >>> print("Error: ", a*x-b) >>> >>> print("Solve: double precision:") >>> a = a.astype('d') >>> x = spsolve(a, b) >>> print(x) >>> print("Error: ", a*x-b) >>> >>> print("Solve: single precision:") >>> use_solver( useUmfpack = False ) >>> a = a.astype('f') >>> x = spsolve(a, b.astype('f')) >>> print(x) >>> print("Error: ", a*x-b) """ from __future__ import division, print_function, absolute_import #import umfpack #__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) ) #del umfpack from .linsolve import * from ._superlu import SuperLU from . import _add_newdocs __all__ = [s for s in dir() if not s.startswith('_')] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
1,953
27.318841
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/dsolve/tests/test_linsolve.py
from __future__ import division, print_function, absolute_import import sys import threading import numpy as np from numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix import numpy.random as random from numpy.testing import ( assert_array_almost_equal, assert_almost_equal, assert_equal, assert_array_equal, assert_, assert_allclose, assert_warns) import pytest from pytest import raises as assert_raises import scipy.linalg from scipy.linalg import norm, inv from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix, csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix) from scipy.sparse.linalg import SuperLU from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu, MatrixRankWarning, _superlu, spsolve_triangular, factorized) from scipy._lib._numpy_compat import suppress_warnings sup_sparse_efficiency = suppress_warnings() sup_sparse_efficiency.filter(SparseEfficiencyWarning) # scikits.umfpack is not a SciPy dependency but it is optionally used in # dsolve, so check whether it's available try: import scikits.umfpack as umfpack has_umfpack = True except ImportError: has_umfpack = False def toarray(a): if isspmatrix(a): return a.toarray() else: return a class TestFactorized(object): def setup_method(self): n = 5 d = arange(n) + 1 self.n = n self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc() random.seed(1234) def _check_singular(self): A = csc_matrix((5,5), dtype='d') b = ones(5) assert_array_almost_equal(0. * b, factorized(A)(b)) def _check_non_singular(self): # Make a diagonal dominant, to make sure it is not singular n = 5 a = csc_matrix(random.rand(n, n)) b = ones(n) expected = splu(a).solve(b) assert_array_almost_equal(factorized(a)(b), expected) def test_singular_without_umfpack(self): use_solver(useUmfpack=False) with assert_raises(RuntimeError, message="Factor is exactly singular"): self._check_singular() @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_singular_with_umfpack(self): use_solver(useUmfpack=True) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars") assert_warns(umfpack.UmfpackWarning, self._check_singular) def test_non_singular_without_umfpack(self): use_solver(useUmfpack=False) self._check_non_singular() @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_non_singular_with_umfpack(self): use_solver(useUmfpack=True) self._check_non_singular() def test_cannot_factorize_nonsquare_matrix_without_umfpack(self): use_solver(useUmfpack=False) msg = "can only factor square matrices" with assert_raises(ValueError, message=msg): factorized(self.A[:, :4]) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_factorizes_nonsquare_matrix_with_umfpack(self): use_solver(useUmfpack=True) # does not raise factorized(self.A[:,:4]) def test_call_with_incorrectly_sized_matrix_without_umfpack(self): use_solver(useUmfpack=False) solve = factorized(self.A) b = random.rand(4) B = random.rand(4, 3) BB = random.rand(self.n, 3, 9) with assert_raises(ValueError, message="is of incompatible size"): solve(b) with assert_raises(ValueError, message="is of incompatible size"): solve(B) with assert_raises(ValueError, message="object too deep for desired array"): solve(BB) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_call_with_incorrectly_sized_matrix_with_umfpack(self): use_solver(useUmfpack=True) solve = factorized(self.A) b = random.rand(4) B = random.rand(4, 3) BB = random.rand(self.n, 3, 9) # does not raise solve(b) msg = "object too deep for desired array" with assert_raises(ValueError, message=msg): solve(B) with assert_raises(ValueError, message=msg): solve(BB) def test_call_with_cast_to_complex_without_umfpack(self): use_solver(useUmfpack=False) solve = factorized(self.A) b = random.rand(4) for t in [np.complex64, np.complex128]: with assert_raises(TypeError, message="Cannot cast array data"): solve(b.astype(t)) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_call_with_cast_to_complex_with_umfpack(self): use_solver(useUmfpack=True) solve = factorized(self.A) b = random.rand(4) for t in [np.complex64, np.complex128]: assert_warns(np.ComplexWarning, solve, b.astype(t)) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_assume_sorted_indices_flag(self): # a sparse matrix with unsorted indices unsorted_inds = np.array([2, 0, 1, 0]) data = np.array([10, 16, 5, 0.4]) indptr = np.array([0, 1, 2, 4]) A = csc_matrix((data, unsorted_inds, indptr), (3, 3)) b = ones(3) # should raise when incorrectly assuming indices are sorted use_solver(useUmfpack=True, assumeSortedIndices=True) with assert_raises(RuntimeError, message="UMFPACK_ERROR_invalid_matrix"): factorized(A) # should sort indices and succeed when not assuming indices are sorted use_solver(useUmfpack=True, assumeSortedIndices=False) expected = splu(A.copy()).solve(b) assert_equal(A.has_sorted_indices, 0) assert_array_almost_equal(factorized(A)(b), expected) assert_equal(A.has_sorted_indices, 1) class TestLinsolve(object): def setup_method(self): use_solver(useUmfpack=False) def test_singular(self): A = csc_matrix((5,5), dtype='d') b = array([1, 2, 3, 4, 5],dtype='d') with suppress_warnings() as sup: sup.filter(MatrixRankWarning, "Matrix is exactly singular") x = spsolve(A, b) assert_(not np.isfinite(x).any()) def test_singular_gh_3312(self): # "Bad" test case that leads SuperLU to call LAPACK with invalid # arguments. Check that it fails moderately gracefully. ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32) v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296]) A = csc_matrix((v, ij.T), shape=(20, 20)) b = np.arange(20) try: # should either raise a runtimeerror or return value # appropriate for singular input x = spsolve(A, b) assert_(not np.isfinite(x).any()) except RuntimeError: pass def test_twodiags(self): A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) b = array([1, 2, 3, 4, 5]) # condition number of A cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2) for t in ['f','d','F','D']: eps = finfo(t).eps # floating point epsilon b = b.astype(t) for format in ['csc','csr']: Asp = A.astype(t).asformat(format) x = spsolve(Asp,b) assert_(norm(b - Asp*x) < 10 * cond_A * eps) def test_bvector_smoketest(self): Adense = matrix([[0., 1., 1.], [1., 0., 1.], [0., 0., 1.]]) As = csc_matrix(Adense) random.seed(1234) x = random.randn(3) b = As*x x2 = spsolve(As, b) assert_array_almost_equal(x, x2) def test_bmatrix_smoketest(self): Adense = matrix([[0., 1., 1.], [1., 0., 1.], [0., 0., 1.]]) As = csc_matrix(Adense) random.seed(1234) x = random.randn(3, 4) Bdense = As.dot(x) Bs = csc_matrix(Bdense) x2 = spsolve(As, Bs) assert_array_almost_equal(x, x2.todense()) @sup_sparse_efficiency def test_non_square(self): # A is not square. A = ones((3, 4)) b = ones((4, 1)) assert_raises(ValueError, spsolve, A, b) # A2 and b2 have incompatible shapes. A2 = csc_matrix(eye(3)) b2 = array([1.0, 2.0]) assert_raises(ValueError, spsolve, A2, b2) @sup_sparse_efficiency def test_example_comparison(self): row = array([0,0,1,2,2,2]) col = array([0,2,2,0,1,2]) data = array([1,2,3,-4,5,6]) sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) M = sM.todense() row = array([0,0,1,1,0,0]) col = array([0,2,1,1,0,0]) data = array([1,1,1,1,1,1]) sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) N = sN.todense() sX = spsolve(sM, sN) X = scipy.linalg.solve(M, N) assert_array_almost_equal(X, sX.todense()) @sup_sparse_efficiency @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_shape_compatibility(self): use_solver(useUmfpack=True) A = csc_matrix([[1., 0], [0, 2]]) bs = [ [1, 6], array([1, 6]), [[1], [6]], array([[1], [6]]), csc_matrix([[1], [6]]), csr_matrix([[1], [6]]), dok_matrix([[1], [6]]), bsr_matrix([[1], [6]]), array([[1., 2., 3.], [6., 8., 10.]]), csc_matrix([[1., 2., 3.], [6., 8., 10.]]), csr_matrix([[1., 2., 3.], [6., 8., 10.]]), dok_matrix([[1., 2., 3.], [6., 8., 10.]]), bsr_matrix([[1., 2., 3.], [6., 8., 10.]]), ] for b in bs: x = np.linalg.solve(A.toarray(), toarray(b)) for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]: x1 = spsolve(spmattype(A), b, use_umfpack=True) x2 = spsolve(spmattype(A), b, use_umfpack=False) # check solution if x.ndim == 2 and x.shape[1] == 1: # interprets also these as "vectors" x = x.ravel() assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1))) assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2))) # dense vs. sparse output ("vectors" are always dense) if isspmatrix(b) and x.ndim > 1: assert_(isspmatrix(x1), repr((b, spmattype, 1))) assert_(isspmatrix(x2), repr((b, spmattype, 2))) else: assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1))) assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2))) # check output shape if x.ndim == 1: # "vector" assert_equal(x1.shape, (A.shape[1],)) assert_equal(x2.shape, (A.shape[1],)) else: # "matrix" assert_equal(x1.shape, x.shape) assert_equal(x2.shape, x.shape) A = csc_matrix((3, 3)) b = csc_matrix((1, 3)) assert_raises(ValueError, spsolve, A, b) @sup_sparse_efficiency def test_ndarray_support(self): A = array([[1., 2.], [2., 0.]]) x = array([[1., 1.], [0.5, -0.5]]) b = array([[2., 0.], [2., 2.]]) assert_array_almost_equal(x, spsolve(A, b)) def test_gssv_badinput(self): N = 10 d = arange(N) + 1.0 A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N) for spmatrix in (csc_matrix, csr_matrix): A = spmatrix(A) b = np.arange(N) def not_c_contig(x): return x.repeat(2)[::2] def not_1dim(x): return x[:,None] def bad_type(x): return x.astype(bool) def too_short(x): return x[:-1] badops = [not_c_contig, not_1dim, bad_type, too_short] for badop in badops: msg = "%r %r" % (spmatrix, badop) # Not C-contiguous assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, badop(A.data), A.indices, A.indptr, b, int(spmatrix == csc_matrix), err_msg=msg) assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, A.data, badop(A.indices), A.indptr, b, int(spmatrix == csc_matrix), err_msg=msg) assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, A.data, A.indices, badop(A.indptr), b, int(spmatrix == csc_matrix), err_msg=msg) def test_sparsity_preservation(self): ident = csc_matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) b = csc_matrix([ [0, 1], [1, 0], [0, 0]]) x = spsolve(ident, b) assert_equal(ident.nnz, 3) assert_equal(b.nnz, 2) assert_equal(x.nnz, 2) assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12) def test_dtype_cast(self): A_real = scipy.sparse.csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) A_complex = scipy.sparse.csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5 + 1j]]) b_real = np.array([1,1,1]) b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1]) x = spsolve(A_real, b_real) assert_(np.issubdtype(x.dtype, np.floating)) x = spsolve(A_real, b_complex) assert_(np.issubdtype(x.dtype, np.complexfloating)) x = spsolve(A_complex, b_real) assert_(np.issubdtype(x.dtype, np.complexfloating)) x = spsolve(A_complex, b_complex) assert_(np.issubdtype(x.dtype, np.complexfloating)) class TestSplu(object): def setup_method(self): use_solver(useUmfpack=False) n = 40 d = arange(n) + 1 self.n = n self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) random.seed(1234) def _smoketest(self, spxlu, check, dtype): if np.issubdtype(dtype, np.complexfloating): A = self.A + 1j*self.A.T else: A = self.A A = A.astype(dtype) lu = spxlu(A) rng = random.RandomState(1234) # Input shapes for k in [None, 1, 2, self.n, self.n+2]: msg = "k=%r" % (k,) if k is None: b = rng.rand(self.n) else: b = rng.rand(self.n, k) if np.issubdtype(dtype, np.complexfloating): b = b + 1j*rng.rand(*b.shape) b = b.astype(dtype) x = lu.solve(b) check(A, b, x, msg) x = lu.solve(b, 'T') check(A.T, b, x, msg) x = lu.solve(b, 'H') check(A.T.conj(), b, x, msg) @sup_sparse_efficiency def test_splu_smoketest(self): self._internal_test_splu_smoketest() def _internal_test_splu_smoketest(self): # Check that splu works at all def check(A, b, x, msg=""): eps = np.finfo(A.dtype).eps r = A * x assert_(abs(r - b).max() < 1e3*eps, msg) self._smoketest(splu, check, np.float32) self._smoketest(splu, check, np.float64) self._smoketest(splu, check, np.complex64) self._smoketest(splu, check, np.complex128) @sup_sparse_efficiency def test_spilu_smoketest(self): self._internal_test_spilu_smoketest() def _internal_test_spilu_smoketest(self): errors = [] def check(A, b, x, msg=""): r = A * x err = abs(r - b).max() assert_(err < 1e-2, msg) if b.dtype in (np.float64, np.complex128): errors.append(err) self._smoketest(spilu, check, np.float32) self._smoketest(spilu, check, np.float64) self._smoketest(spilu, check, np.complex64) self._smoketest(spilu, check, np.complex128) assert_(max(errors) > 1e-5) @sup_sparse_efficiency def test_spilu_drop_rule(self): # Test passing in the drop_rule argument to spilu. A = identity(2) rules = [ b'basic,area'.decode('ascii'), # unicode b'basic,area', # ascii [b'basic', b'area'.decode('ascii')] ] for rule in rules: # Argument should be accepted assert_(isinstance(spilu(A, drop_rule=rule), SuperLU)) def test_splu_nnz0(self): A = csc_matrix((5,5), dtype='d') assert_raises(RuntimeError, splu, A) def test_spilu_nnz0(self): A = csc_matrix((5,5), dtype='d') assert_raises(RuntimeError, spilu, A) def test_splu_basic(self): # Test basic splu functionality. n = 30 rng = random.RandomState(12) a = rng.rand(n, n) a[a < 0.95] = 0 # First test with a singular matrix a[:, 0] = 0 a_ = csc_matrix(a) # Matrix is exactly singular assert_raises(RuntimeError, splu, a_) # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) b = ones(n) x = lu.solve(b) assert_almost_equal(dot(a, x), b) def test_splu_perm(self): # Test the permutation vectors exposed by splu. n = 30 a = random.random((n, n)) a[a < 0.95] = 0 # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) # Check that the permutation indices do belong to [0, n-1]. for perm in (lu.perm_r, lu.perm_c): assert_(all(perm > -1)) assert_(all(perm < n)) assert_equal(len(unique(perm)), len(perm)) # Now make a symmetric, and test that the two permutation vectors are # the same # Note: a += a.T relies on undefined behavior. a = a + a.T a_ = csc_matrix(a) lu = splu(a_) assert_array_equal(lu.perm_r, lu.perm_c) @pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount") def test_lu_refcount(self): # Test that we are keeping track of the reference count with splu. n = 30 a = random.random((n, n)) a[a < 0.95] = 0 # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) # And now test that we don't have a refcount bug rc = sys.getrefcount(lu) for attr in ('perm_r', 'perm_c'): perm = getattr(lu, attr) assert_equal(sys.getrefcount(lu), rc + 1) del perm assert_equal(sys.getrefcount(lu), rc) def test_bad_inputs(self): A = self.A.tocsc() assert_raises(ValueError, splu, A[:,:4]) assert_raises(ValueError, spilu, A[:,:4]) for lu in [splu(A), spilu(A)]: b = random.rand(42) B = random.rand(42, 3) BB = random.rand(self.n, 3, 9) assert_raises(ValueError, lu.solve, b) assert_raises(ValueError, lu.solve, B) assert_raises(ValueError, lu.solve, BB) assert_raises(TypeError, lu.solve, b.astype(np.complex64)) assert_raises(TypeError, lu.solve, b.astype(np.complex128)) @sup_sparse_efficiency def test_superlu_dlamch_i386_nan(self): # SuperLU 4.3 calls some functions returning floats without # declaring them. On i386@linux call convention, this fails to # clear floating point registers after call. As a result, NaN # can appear in the next floating point operation made. # # Here's a test case that triggered the issue. n = 8 d = np.arange(n) + 1 A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) A = A.astype(np.float32) spilu(A) A = A + 1j*A B = A.A assert_(not np.isnan(B).any()) @sup_sparse_efficiency def test_lu_attr(self): def check(dtype, complex_2=False): A = self.A.astype(dtype) if complex_2: A = A + 1j*A.T n = A.shape[0] lu = splu(A) # Check that the decomposition is as advertized Pc = np.zeros((n, n)) Pc[np.arange(n), lu.perm_c] = 1 Pr = np.zeros((n, n)) Pr[lu.perm_r, np.arange(n)] = 1 Ad = A.toarray() lhs = Pr.dot(Ad).dot(Pc) rhs = (lu.L * lu.U).toarray() eps = np.finfo(dtype).eps assert_allclose(lhs, rhs, atol=100*eps) check(np.float32) check(np.float64) check(np.complex64) check(np.complex128) check(np.complex64, True) check(np.complex128, True) @sup_sparse_efficiency def test_threads_parallel(self): oks = [] def worker(): try: self.test_splu_basic() self._internal_test_splu_smoketest() self._internal_test_spilu_smoketest() oks.append(True) except: pass threads = [threading.Thread(target=worker) for k in range(20)] for t in threads: t.start() for t in threads: t.join() assert_equal(len(oks), 20) class TestSpsolveTriangular(object): def setup_method(self): use_solver(useUmfpack=False) def test_singular(self): n = 5 A = csr_matrix((n, n)) b = np.arange(n) for lower in (True, False): assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower) @sup_sparse_efficiency def test_bad_shape(self): # A is not square. A = np.zeros((3, 4)) b = ones((4, 1)) assert_raises(ValueError, spsolve_triangular, A, b) # A2 and b2 have incompatible shapes. A2 = csr_matrix(eye(3)) b2 = array([1.0, 2.0]) assert_raises(ValueError, spsolve_triangular, A2, b2) @sup_sparse_efficiency def test_input_types(self): A = array([[1., 0.], [1., 2.]]) b = array([[2., 0.], [2., 2.]]) for matrix_type in (array, csc_matrix, csr_matrix): x = spsolve_triangular(matrix_type(A), b, lower=True) assert_array_almost_equal(A.dot(x), b) @sup_sparse_efficiency def test_random(self): def random_triangle_matrix(n, lower=True): A = scipy.sparse.random(n, n, density=0.1, format='coo') if lower: A = scipy.sparse.tril(A) else: A = scipy.sparse.triu(A) A = A.tocsr(copy=False) for i in range(n): A[i, i] = np.random.rand() + 1 return A np.random.seed(1234) for lower in (True, False): for n in (10, 10**2, 10**3): A = random_triangle_matrix(n, lower=lower) for m in (1, 10): for b in (np.random.rand(n, m), np.random.randint(-9, 9, (n, m)), np.random.randint(-9, 9, (n, m)) + np.random.randint(-9, 9, (n, m)) * 1j): x = spsolve_triangular(A, b, lower=lower) assert_array_almost_equal(A.dot(x), b)
24,273
32.854951
90
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/sparse/linalg/dsolve/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/mmio.py
""" Matrix Market I/O in Python. See http://math.nist.gov/MatrixMarket/formats.html for information about the Matrix Market format. """ # # Author: Pearu Peterson <pearu@cens.ioc.ee> # Created: October, 2004 # # References: # http://math.nist.gov/MatrixMarket/ # from __future__ import division, print_function, absolute_import import os import sys from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate, ones, ascontiguousarray, vstack, savetxt, fromfile, fromstring, can_cast) from numpy.compat import asbytes, asstr from scipy._lib.six import string_types from scipy.sparse import coo_matrix, isspmatrix __all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile'] # ----------------------------------------------------------------------------- def mminfo(source): """ Return size and storage parameters from Matrix Market file-like 'source'. Parameters ---------- source : str or file-like Matrix Market filename (extension .mtx) or open file-like object Returns ------- rows : int Number of matrix rows. cols : int Number of matrix columns. entries : int Number of non-zero entries of a sparse matrix or rows*cols for a dense matrix. format : str Either 'coordinate' or 'array'. field : str Either 'real', 'complex', 'pattern', or 'integer'. symmetry : str Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. """ return MMFile.info(source) # ----------------------------------------------------------------------------- def mmread(source): """ Reads the contents of a Matrix Market file-like 'source' into a matrix. Parameters ---------- source : str or file-like Matrix Market filename (extensions .mtx, .mtz.gz) or open file-like object. Returns ------- a : ndarray or coo_matrix Dense or sparse matrix depending on the matrix format in the Matrix Market file. """ return MMFile().read(source) # ----------------------------------------------------------------------------- def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None): """ Writes the sparse or dense array `a` to Matrix Market file-like `target`. Parameters ---------- target : str or file-like Matrix Market filename (extension .mtx) or open file-like object. a : array like Sparse or dense 2D array. comment : str, optional Comments to be prepended to the Matrix Market file. field : None or str, optional Either 'real', 'complex', 'pattern', or 'integer'. precision : None or int, optional Number of digits to display for real or complex values. symmetry : None or str, optional Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. If symmetry is None the symmetry type of 'a' is determined by its values. """ MMFile().write(target, a, comment, field, precision, symmetry) ############################################################################### class MMFile (object): __slots__ = ('_rows', '_cols', '_entries', '_format', '_field', '_symmetry') @property def rows(self): return self._rows @property def cols(self): return self._cols @property def entries(self): return self._entries @property def format(self): return self._format @property def field(self): return self._field @property def symmetry(self): return self._symmetry @property def has_symmetry(self): return self._symmetry in (self.SYMMETRY_SYMMETRIC, self.SYMMETRY_SKEW_SYMMETRIC, self.SYMMETRY_HERMITIAN) # format values FORMAT_COORDINATE = 'coordinate' FORMAT_ARRAY = 'array' FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY) @classmethod def _validate_format(self, format): if format not in self.FORMAT_VALUES: raise ValueError('unknown format type %s, must be one of %s' % (format, self.FORMAT_VALUES)) # field values FIELD_INTEGER = 'integer' FIELD_UNSIGNED = 'unsigned-integer' FIELD_REAL = 'real' FIELD_COMPLEX = 'complex' FIELD_PATTERN = 'pattern' FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN) @classmethod def _validate_field(self, field): if field not in self.FIELD_VALUES: raise ValueError('unknown field type %s, must be one of %s' % (field, self.FIELD_VALUES)) # symmetry values SYMMETRY_GENERAL = 'general' SYMMETRY_SYMMETRIC = 'symmetric' SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric' SYMMETRY_HERMITIAN = 'hermitian' SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC, SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN) @classmethod def _validate_symmetry(self, symmetry): if symmetry not in self.SYMMETRY_VALUES: raise ValueError('unknown symmetry type %s, must be one of %s' % (symmetry, self.SYMMETRY_VALUES)) DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp', FIELD_UNSIGNED: 'uint64', FIELD_REAL: 'd', FIELD_COMPLEX: 'D', FIELD_PATTERN: 'd'} # ------------------------------------------------------------------------- @staticmethod def reader(): pass # ------------------------------------------------------------------------- @staticmethod def writer(): pass # ------------------------------------------------------------------------- @classmethod def info(self, source): """ Return size, storage parameters from Matrix Market file-like 'source'. Parameters ---------- source : str or file-like Matrix Market filename (extension .mtx) or open file-like object Returns ------- rows : int Number of matrix rows. cols : int Number of matrix columns. entries : int Number of non-zero entries of a sparse matrix or rows*cols for a dense matrix. format : str Either 'coordinate' or 'array'. field : str Either 'real', 'complex', 'pattern', or 'integer'. symmetry : str Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. """ stream, close_it = self._open(source) try: # read and validate header line line = stream.readline() mmid, matrix, format, field, symmetry = \ [asstr(part.strip()) for part in line.split()] if not mmid.startswith('%%MatrixMarket'): raise ValueError('source is not in Matrix Market format') if not matrix.lower() == 'matrix': raise ValueError("Problem reading file header: " + line) # http://math.nist.gov/MatrixMarket/formats.html if format.lower() == 'array': format = self.FORMAT_ARRAY elif format.lower() == 'coordinate': format = self.FORMAT_COORDINATE # skip comments while line.startswith(b'%'): line = stream.readline() line = line.split() if format == self.FORMAT_ARRAY: if not len(line) == 2: raise ValueError("Header line not of length 2: " + line) rows, cols = map(int, line) entries = rows * cols else: if not len(line) == 3: raise ValueError("Header line not of length 3: " + line) rows, cols, entries = map(int, line) return (rows, cols, entries, format, field.lower(), symmetry.lower()) finally: if close_it: stream.close() # ------------------------------------------------------------------------- @staticmethod def _open(filespec, mode='rb'): """ Return an open file stream for reading based on source. If source is a file name, open it (after trying to find it with mtx and gzipped mtx extensions). Otherwise, just return source. Parameters ---------- filespec : str or file-like String giving file name or file-like object mode : str, optional Mode with which to open file, if `filespec` is a file name. Returns ------- fobj : file-like Open file-like object. close_it : bool True if the calling function should close this file when done, false otherwise. """ close_it = False if isinstance(filespec, string_types): close_it = True # open for reading if mode[0] == 'r': # determine filename plus extension if not os.path.isfile(filespec): if os.path.isfile(filespec+'.mtx'): filespec = filespec + '.mtx' elif os.path.isfile(filespec+'.mtx.gz'): filespec = filespec + '.mtx.gz' elif os.path.isfile(filespec+'.mtx.bz2'): filespec = filespec + '.mtx.bz2' # open filename if filespec.endswith('.gz'): import gzip stream = gzip.open(filespec, mode) elif filespec.endswith('.bz2'): import bz2 stream = bz2.BZ2File(filespec, 'rb') else: stream = open(filespec, mode) # open for writing else: if filespec[-4:] != '.mtx': filespec = filespec + '.mtx' stream = open(filespec, mode) else: stream = filespec return stream, close_it # ------------------------------------------------------------------------- @staticmethod def _get_symmetry(a): m, n = a.shape if m != n: return MMFile.SYMMETRY_GENERAL issymm = True isskew = True isherm = a.dtype.char in 'FD' # sparse input if isspmatrix(a): # check if number of nonzero entries of lower and upper triangle # matrix are equal a = a.tocoo() (row, col) = a.nonzero() if (row < col).sum() != (row > col).sum(): return MMFile.SYMMETRY_GENERAL # define iterator over symmetric pair entries a = a.todok() def symm_iterator(): for ((i, j), aij) in a.items(): if i > j: aji = a[j, i] yield (aij, aji) # non-sparse input else: # define iterator over symmetric pair entries def symm_iterator(): for j in range(n): for i in range(j+1, n): aij, aji = a[i][j], a[j][i] yield (aij, aji) # check for symmetry for (aij, aji) in symm_iterator(): if issymm and aij != aji: issymm = False if isskew and aij != -aji: isskew = False if isherm and aij != conj(aji): isherm = False if not (issymm or isskew or isherm): break # return symmetry value if issymm: return MMFile.SYMMETRY_SYMMETRIC if isskew: return MMFile.SYMMETRY_SKEW_SYMMETRIC if isherm: return MMFile.SYMMETRY_HERMITIAN return MMFile.SYMMETRY_GENERAL # ------------------------------------------------------------------------- @staticmethod def _field_template(field, precision): return {MMFile.FIELD_REAL: '%%.%ie\n' % precision, MMFile.FIELD_INTEGER: '%i\n', MMFile.FIELD_UNSIGNED: '%u\n', MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % (precision, precision) }.get(field, None) # ------------------------------------------------------------------------- def __init__(self, **kwargs): self._init_attrs(**kwargs) # ------------------------------------------------------------------------- def read(self, source): """ Reads the contents of a Matrix Market file-like 'source' into a matrix. Parameters ---------- source : str or file-like Matrix Market filename (extensions .mtx, .mtz.gz) or open file object. Returns ------- a : ndarray or coo_matrix Dense or sparse matrix depending on the matrix format in the Matrix Market file. """ stream, close_it = self._open(source) try: self._parse_header(stream) return self._parse_body(stream) finally: if close_it: stream.close() # ------------------------------------------------------------------------- def write(self, target, a, comment='', field=None, precision=None, symmetry=None): """ Writes sparse or dense array `a` to Matrix Market file-like `target`. Parameters ---------- target : str or file-like Matrix Market filename (extension .mtx) or open file-like object. a : array like Sparse or dense 2D array. comment : str, optional Comments to be prepended to the Matrix Market file. field : None or str, optional Either 'real', 'complex', 'pattern', or 'integer'. precision : None or int, optional Number of digits to display for real or complex values. symmetry : None or str, optional Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. If symmetry is None the symmetry type of 'a' is determined by its values. """ stream, close_it = self._open(target, 'wb') try: self._write(stream, a, comment, field, precision, symmetry) finally: if close_it: stream.close() else: stream.flush() # ------------------------------------------------------------------------- def _init_attrs(self, **kwargs): """ Initialize each attributes with the corresponding keyword arg value or a default of None """ attrs = self.__class__.__slots__ public_attrs = [attr[1:] for attr in attrs] invalid_keys = set(kwargs.keys()) - set(public_attrs) if invalid_keys: raise ValueError('''found %s invalid keyword arguments, please only use %s''' % (tuple(invalid_keys), public_attrs)) for attr in attrs: setattr(self, attr, kwargs.get(attr[1:], None)) # ------------------------------------------------------------------------- def _parse_header(self, stream): rows, cols, entries, format, field, symmetry = \ self.__class__.info(stream) self._init_attrs(rows=rows, cols=cols, entries=entries, format=format, field=field, symmetry=symmetry) # ------------------------------------------------------------------------- def _parse_body(self, stream): rows, cols, entries, format, field, symm = (self.rows, self.cols, self.entries, self.format, self.field, self.symmetry) try: from scipy.sparse import coo_matrix except ImportError: coo_matrix = None dtype = self.DTYPES_BY_FIELD.get(field, None) has_symmetry = self.has_symmetry is_integer = field == self.FIELD_INTEGER is_unsigned_integer = field == self.FIELD_UNSIGNED is_complex = field == self.FIELD_COMPLEX is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC is_herm = symm == self.SYMMETRY_HERMITIAN is_pattern = field == self.FIELD_PATTERN if format == self.FORMAT_ARRAY: a = zeros((rows, cols), dtype=dtype) line = 1 i, j = 0, 0 if is_skew: a[i, j] = 0 if i < rows - 1: i += 1 while line: line = stream.readline() if not line or line.startswith(b'%'): continue if is_integer: aij = int(line) elif is_unsigned_integer: aij = int(line) elif is_complex: aij = complex(*map(float, line.split())) else: aij = float(line) a[i, j] = aij if has_symmetry and i != j: if is_skew: a[j, i] = -aij elif is_herm: a[j, i] = conj(aij) else: a[j, i] = aij if i < rows-1: i = i + 1 else: j = j + 1 if not has_symmetry: i = 0 else: i = j if is_skew: a[i, j] = 0 if i < rows-1: i += 1 if is_skew: if not (i in [0, j] and j == cols - 1): raise ValueError("Parse error, did not read all lines.") else: if not (i in [0, j] and j == cols): raise ValueError("Parse error, did not read all lines.") elif format == self.FORMAT_COORDINATE and coo_matrix is None: # Read sparse matrix to dense when coo_matrix is not available. a = zeros((rows, cols), dtype=dtype) line = 1 k = 0 while line: line = stream.readline() if not line or line.startswith(b'%'): continue l = line.split() i, j = map(int, l[:2]) i, j = i-1, j-1 if is_integer: aij = int(l[2]) elif is_unsigned_integer: aij = int(l[2]) elif is_complex: aij = complex(*map(float, l[2:])) else: aij = float(l[2]) a[i, j] = aij if has_symmetry and i != j: if is_skew: a[j, i] = -aij elif is_herm: a[j, i] = conj(aij) else: a[j, i] = aij k = k + 1 if not k == entries: ValueError("Did not read all entries") elif format == self.FORMAT_COORDINATE: # Read sparse COOrdinate format if entries == 0: # empty matrix return coo_matrix((rows, cols), dtype=dtype) I = zeros(entries, dtype='intc') J = zeros(entries, dtype='intc') if is_pattern: V = ones(entries, dtype='int8') elif is_integer: V = zeros(entries, dtype='intp') elif is_unsigned_integer: V = zeros(entries, dtype='uint64') elif is_complex: V = zeros(entries, dtype='complex') else: V = zeros(entries, dtype='float') entry_number = 0 for line in stream: if not line or line.startswith(b'%'): continue if entry_number+1 > entries: raise ValueError("'entries' in header is smaller than " "number of entries") l = line.split() I[entry_number], J[entry_number] = map(int, l[:2]) if not is_pattern: if is_integer: V[entry_number] = int(l[2]) elif is_unsigned_integer: V[entry_number] = int(l[2]) elif is_complex: V[entry_number] = complex(*map(float, l[2:])) else: V[entry_number] = float(l[2]) entry_number += 1 if entry_number < entries: raise ValueError("'entries' in header is larger than " "number of entries") I -= 1 # adjust indices (base 1 -> base 0) J -= 1 if has_symmetry: mask = (I != J) # off diagonal mask od_I = I[mask] od_J = J[mask] od_V = V[mask] I = concatenate((I, od_J)) J = concatenate((J, od_I)) if is_skew: od_V *= -1 elif is_herm: od_V = od_V.conjugate() V = concatenate((V, od_V)) a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype) else: raise NotImplementedError(format) return a # ------------------------------------------------------------------------ def _write(self, stream, a, comment='', field=None, precision=None, symmetry=None): if isinstance(a, list) or isinstance(a, ndarray) or \ isinstance(a, tuple) or hasattr(a, '__array__'): rep = self.FORMAT_ARRAY a = asarray(a) if len(a.shape) != 2: raise ValueError('Expected 2 dimensional array') rows, cols = a.shape if field is not None: if field == self.FIELD_INTEGER: if not can_cast(a.dtype, 'intp'): raise OverflowError("mmwrite does not support integer " "dtypes larger than native 'intp'.") a = a.astype('intp') elif field == self.FIELD_REAL: if a.dtype.char not in 'fd': a = a.astype('d') elif field == self.FIELD_COMPLEX: if a.dtype.char not in 'FD': a = a.astype('D') else: if not isspmatrix(a): raise ValueError('unknown matrix type: %s' % type(a)) rep = 'coordinate' rows, cols = a.shape typecode = a.dtype.char if precision is None: if typecode in 'fF': precision = 8 else: precision = 16 if field is None: kind = a.dtype.kind if kind == 'i': if not can_cast(a.dtype, 'intp'): raise OverflowError("mmwrite does not support integer " "dtypes larger than native 'intp'.") field = 'integer' elif kind == 'f': field = 'real' elif kind == 'c': field = 'complex' elif kind == 'u': field = 'unsigned-integer' else: raise TypeError('unexpected dtype kind ' + kind) if symmetry is None: symmetry = self._get_symmetry(a) # validate rep, field, and symmetry self.__class__._validate_format(rep) self.__class__._validate_field(field) self.__class__._validate_symmetry(symmetry) # write initial header line stream.write(asbytes('%%MatrixMarket matrix {0} {1} {2}\n'.format(rep, field, symmetry))) # write comments for line in comment.split('\n'): stream.write(asbytes('%%%s\n' % (line))) template = self._field_template(field, precision) # write dense format if rep == self.FORMAT_ARRAY: # write shape spec stream.write(asbytes('%i %i\n' % (rows, cols))) if field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED): if symmetry == self.SYMMETRY_GENERAL: for j in range(cols): for i in range(rows): stream.write(asbytes(template % a[i, j])) elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC: for j in range(cols): for i in range(j + 1, rows): stream.write(asbytes(template % a[i, j])) else: for j in range(cols): for i in range(j, rows): stream.write(asbytes(template % a[i, j])) elif field == self.FIELD_COMPLEX: if symmetry == self.SYMMETRY_GENERAL: for j in range(cols): for i in range(rows): aij = a[i, j] stream.write(asbytes(template % (real(aij), imag(aij)))) else: for j in range(cols): for i in range(j, rows): aij = a[i, j] stream.write(asbytes(template % (real(aij), imag(aij)))) elif field == self.FIELD_PATTERN: raise ValueError('pattern type inconsisted with dense format') else: raise TypeError('Unknown field type %s' % field) # write sparse format else: coo = a.tocoo() # convert to COOrdinate format # if symmetry format used, remove values above main diagonal if symmetry != self.SYMMETRY_GENERAL: lower_triangle_mask = coo.row >= coo.col coo = coo_matrix((coo.data[lower_triangle_mask], (coo.row[lower_triangle_mask], coo.col[lower_triangle_mask])), shape=coo.shape) # write shape spec stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz))) template = self._field_template(field, precision-1) if field == self.FIELD_PATTERN: for r, c in zip(coo.row+1, coo.col+1): stream.write(asbytes("%i %i\n" % (r, c))) elif field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED): for r, c, d in zip(coo.row+1, coo.col+1, coo.data): stream.write(asbytes(("%i %i " % (r, c)) + (template % d))) elif field == self.FIELD_COMPLEX: for r, c, d in zip(coo.row+1, coo.col+1, coo.data): stream.write(asbytes(("%i %i " % (r, c)) + (template % (d.real, d.imag)))) else: raise TypeError('Unknown field type %s' % field) def _is_fromfile_compatible(stream): """ Check whether `stream` is compatible with numpy.fromfile. Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with Python3. """ if sys.version_info[0] < 3: return True bad_cls = [] try: import gzip bad_cls.append(gzip.GzipFile) except ImportError: pass try: import bz2 bad_cls.append(bz2.BZ2File) except ImportError: pass bad_cls = tuple(bad_cls) return not isinstance(stream, bad_cls) # ----------------------------------------------------------------------------- if __name__ == '__main__': import time for filename in sys.argv[1:]: print('Reading', filename, '...', end=' ') sys.stdout.flush() t = time.time() mmread(filename) print('took %s seconds' % (time.time() - t))
28,897
33.525687
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/setup.py
from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('io', parent_package, top_path) config.add_extension('_test_fortran', sources=['_test_fortran.pyf', '_test_fortran.f']) config.add_data_dir('tests') config.add_subpackage('matlab') config.add_subpackage('arff') config.add_subpackage('harwell_boeing') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
639
29.47619
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/netcdf.py
""" NetCDF reader/writer module. This module is used to read and create NetCDF files. NetCDF files are accessed through the `netcdf_file` object. Data written to and from NetCDF files are contained in `netcdf_variable` objects. Attributes are given as member variables of the `netcdf_file` and `netcdf_variable` objects. This module implements the Scientific.IO.NetCDF API to read and create NetCDF files. The same API is also used in the PyNIO and pynetcdf modules, allowing these modules to be used interchangeably when working with NetCDF files. Only NetCDF3 is supported here; for NetCDF4 see `netCDF4-python <http://unidata.github.io/netcdf4-python/>`__, which has a similar API. """ from __future__ import division, print_function, absolute_import # TODO: # * properly implement ``_FillValue``. # * fix character variables. # * implement PAGESIZE for Python 2.6? # The Scientific.IO.NetCDF API allows attributes to be added directly to # instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate # between user-set attributes and instance attributes, user-set attributes # are automatically stored in the ``_attributes`` attribute by overloading #``__setattr__``. This is the reason why the code sometimes uses #``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``; # otherwise the key would be inserted into userspace attributes. __all__ = ['netcdf_file'] import sys import warnings import weakref from operator import mul from collections import OrderedDict import mmap as mm import numpy as np from numpy.compat import asbytes, asstr from numpy import frombuffer, dtype, empty, array, asarray from numpy import little_endian as LITTLE_ENDIAN from functools import reduce from scipy._lib.six import integer_types, text_type, binary_type IS_PYPY = ('__pypy__' in sys.modules) ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' ZERO = b'\x00\x00\x00\x00' NC_BYTE = b'\x00\x00\x00\x01' NC_CHAR = b'\x00\x00\x00\x02' NC_SHORT = b'\x00\x00\x00\x03' NC_INT = b'\x00\x00\x00\x04' NC_FLOAT = b'\x00\x00\x00\x05' NC_DOUBLE = b'\x00\x00\x00\x06' NC_DIMENSION = b'\x00\x00\x00\n' NC_VARIABLE = b'\x00\x00\x00\x0b' NC_ATTRIBUTE = b'\x00\x00\x00\x0c' FILL_BYTE = b'\x81' FILL_CHAR = b'\x00' FILL_SHORT = b'\x80\x01' FILL_INT = b'\x80\x00\x00\x01' FILL_FLOAT = b'\x7C\xF0\x00\x00' FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00' TYPEMAP = {NC_BYTE: ('b', 1), NC_CHAR: ('c', 1), NC_SHORT: ('h', 2), NC_INT: ('i', 4), NC_FLOAT: ('f', 4), NC_DOUBLE: ('d', 8)} FILLMAP = {NC_BYTE: FILL_BYTE, NC_CHAR: FILL_CHAR, NC_SHORT: FILL_SHORT, NC_INT: FILL_INT, NC_FLOAT: FILL_FLOAT, NC_DOUBLE: FILL_DOUBLE} REVERSE = {('b', 1): NC_BYTE, ('B', 1): NC_CHAR, ('c', 1): NC_CHAR, ('h', 2): NC_SHORT, ('i', 4): NC_INT, ('f', 4): NC_FLOAT, ('d', 8): NC_DOUBLE, # these come from asarray(1).dtype.char and asarray('foo').dtype.char, # used when getting the types from generic attributes. ('l', 4): NC_INT, ('S', 1): NC_CHAR} class netcdf_file(object): """ A file object for NetCDF data. A `netcdf_file` object has two standard attributes: `dimensions` and `variables`. The values of both are dictionaries, mapping dimension names to their associated lengths and variable names to variables, respectively. Application programs should never modify these dictionaries. All other attributes correspond to global attributes defined in the NetCDF file. Global file attributes are created by assigning to an attribute of the `netcdf_file` object. Parameters ---------- filename : string or file-like string -> filename mode : {'r', 'w', 'a'}, optional read-write-append mode, default is 'r' mmap : None or bool, optional Whether to mmap `filename` when reading. Default is True when `filename` is a file name, False when `filename` is a file-like object. Note that when mmap is in use, data arrays returned refer directly to the mmapped data on disk, and the file cannot be closed as long as references to it exist. version : {1, 2}, optional version of netcdf to read / write, where 1 means *Classic format* and 2 means *64-bit offset format*. Default is 1. See `here <https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_introduction.html#select_format>`__ for more info. maskandscale : bool, optional Whether to automatically scale and/or mask data based on attributes. Default is False. Notes ----- The major advantage of this module over other modules is that it doesn't require the code to be linked to the NetCDF libraries. This module is derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_. NetCDF files are a self-describing binary data format. The file contains metadata that describes the dimensions and variables in the file. More details about NetCDF files can be found `here <https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html>`__. There are three main sections to a NetCDF data structure: 1. Dimensions 2. Variables 3. Attributes The dimensions section records the name and length of each dimension used by the variables. The variables would then indicate which dimensions it uses and any attributes such as data units, along with containing the data values for the variable. It is good practice to include a variable that is the same name as a dimension to provide the values for that axes. Lastly, the attributes section would contain additional information such as the name of the file creator or the instrument used to collect the data. When writing data to a NetCDF file, there is often the need to indicate the 'record dimension'. A record dimension is the unbounded dimension for a variable. For example, a temperature variable may have dimensions of latitude, longitude and time. If one wants to add more temperature data to the NetCDF file as time progresses, then the temperature variable should have the time dimension flagged as the record dimension. In addition, the NetCDF file header contains the position of the data in the file, so access can be done in an efficient manner without loading unnecessary data into memory. It uses the ``mmap`` module to create Numpy arrays mapped to the data on disk, for the same purpose. Note that when `netcdf_file` is used to open a file with mmap=True (default for read-only), arrays returned by it refer to data directly on the disk. The file should not be closed, and cannot be cleanly closed when asked, if such arrays are alive. You may want to copy data arrays obtained from mmapped Netcdf file if they are to be processed after the file is closed, see the example below. Examples -------- To create a NetCDF file: >>> from scipy.io import netcdf >>> f = netcdf.netcdf_file('simple.nc', 'w') >>> f.history = 'Created for a test' >>> f.createDimension('time', 10) >>> time = f.createVariable('time', 'i', ('time',)) >>> time[:] = np.arange(10) >>> time.units = 'days since 2008-01-01' >>> f.close() Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice of the time variable allows for the data to be set in the object, rather than letting ``arange(10)`` overwrite the ``time`` variable. To read the NetCDF file we just created: >>> from scipy.io import netcdf >>> f = netcdf.netcdf_file('simple.nc', 'r') >>> print(f.history) b'Created for a test' >>> time = f.variables['time'] >>> print(time.units) b'days since 2008-01-01' >>> print(time.shape) (10,) >>> print(time[-1]) 9 NetCDF files, when opened read-only, return arrays that refer directly to memory-mapped data on disk: >>> data = time[:] >>> data.base.base <mmap.mmap object at 0x7fe753763180> If the data is to be processed after the file is closed, it needs to be copied to main memory: >>> data = time[:].copy() >>> f.close() >>> data.mean() 4.5 A NetCDF file can also be used as context manager: >>> from scipy.io import netcdf >>> with netcdf.netcdf_file('simple.nc', 'r') as f: ... print(f.history) b'Created for a test' """ def __init__(self, filename, mode='r', mmap=None, version=1, maskandscale=False): """Initialize netcdf_file from fileobj (str or file-like).""" if mode not in 'rwa': raise ValueError("Mode must be either 'r', 'w' or 'a'.") if hasattr(filename, 'seek'): # file-like self.fp = filename self.filename = 'None' if mmap is None: mmap = False elif mmap and not hasattr(filename, 'fileno'): raise ValueError('Cannot use file object for mmap') else: # maybe it's a string self.filename = filename omode = 'r+' if mode == 'a' else mode self.fp = open(self.filename, '%sb' % omode) if mmap is None: # Mmapped files on PyPy cannot be usually closed # before the GC runs, so it's better to use mmap=False # as the default. mmap = (not IS_PYPY) if mode != 'r': # Cannot read write-only files mmap = False self.use_mmap = mmap self.mode = mode self.version_byte = version self.maskandscale = maskandscale self.dimensions = OrderedDict() self.variables = OrderedDict() self._dims = [] self._recs = 0 self._recsize = 0 self._mm = None self._mm_buf = None if self.use_mmap: self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ) self._mm_buf = np.frombuffer(self._mm, dtype=np.int8) self._attributes = OrderedDict() if mode in 'ra': self._read() def __setattr__(self, attr, value): # Store user defined attributes in a separate dict, # so we can save them to file later. try: self._attributes[attr] = value except AttributeError: pass self.__dict__[attr] = value def close(self): """Closes the NetCDF file.""" if hasattr(self, 'fp') and not self.fp.closed: try: self.flush() finally: self.variables = OrderedDict() if self._mm_buf is not None: ref = weakref.ref(self._mm_buf) self._mm_buf = None if ref() is None: # self._mm_buf is gc'd, and we can close the mmap self._mm.close() else: # we cannot close self._mm, since self._mm_buf is # alive and there may still be arrays referring to it warnings.warn(( "Cannot close a netcdf_file opened with mmap=True, when " "netcdf_variables or arrays referring to its data still exist. " "All data arrays obtained from such files refer directly to " "data on disk, and must be copied before the file can be cleanly " "closed. (See netcdf_file docstring for more information on mmap.)" ), category=RuntimeWarning) self._mm = None self.fp.close() __del__ = close def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def createDimension(self, name, length): """ Adds a dimension to the Dimension section of the NetCDF data structure. Note that this function merely adds a new dimension that the variables can reference. The values for the dimension, if desired, should be added as a variable using `createVariable`, referring to this dimension. Parameters ---------- name : str Name of the dimension (Eg, 'lat' or 'time'). length : int Length of the dimension. See Also -------- createVariable """ if length is None and self._dims: raise ValueError("Only first dimension may be unlimited!") self.dimensions[name] = length self._dims.append(name) def createVariable(self, name, type, dimensions): """ Create an empty variable for the `netcdf_file` object, specifying its data type and the dimensions it uses. Parameters ---------- name : str Name of the new variable. type : dtype or str Data type of the variable. dimensions : sequence of str List of the dimension names used by the variable, in the desired order. Returns ------- variable : netcdf_variable The newly created ``netcdf_variable`` object. This object has also been added to the `netcdf_file` object as well. See Also -------- createDimension Notes ----- Any dimensions to be used by the variable should already exist in the NetCDF data structure or should be created by `createDimension` prior to creating the NetCDF variable. """ shape = tuple([self.dimensions[dim] for dim in dimensions]) shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy type = dtype(type) typecode, size = type.char, type.itemsize if (typecode, size) not in REVERSE: raise ValueError("NetCDF 3 does not support type %s" % type) data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 self.variables[name] = netcdf_variable( data, typecode, size, shape, dimensions, maskandscale=self.maskandscale) return self.variables[name] def flush(self): """ Perform a sync-to-disk flush if the `netcdf_file` object is in write mode. See Also -------- sync : Identical function """ if hasattr(self, 'mode') and self.mode in 'wa': self._write() sync = flush def _write(self): self.fp.seek(0) self.fp.write(b'CDF') self.fp.write(array(self.version_byte, '>b').tostring()) # Write headers and data. self._write_numrecs() self._write_dim_array() self._write_gatt_array() self._write_var_array() def _write_numrecs(self): # Get highest record count from all record variables. for var in self.variables.values(): if var.isrec and len(var.data) > self._recs: self.__dict__['_recs'] = len(var.data) self._pack_int(self._recs) def _write_dim_array(self): if self.dimensions: self.fp.write(NC_DIMENSION) self._pack_int(len(self.dimensions)) for name in self._dims: self._pack_string(name) length = self.dimensions[name] self._pack_int(length or 0) # replace None with 0 for record dimension else: self.fp.write(ABSENT) def _write_gatt_array(self): self._write_att_array(self._attributes) def _write_att_array(self, attributes): if attributes: self.fp.write(NC_ATTRIBUTE) self._pack_int(len(attributes)) for name, values in attributes.items(): self._pack_string(name) self._write_att_values(values) else: self.fp.write(ABSENT) def _write_var_array(self): if self.variables: self.fp.write(NC_VARIABLE) self._pack_int(len(self.variables)) # Sort variable names non-recs first, then recs. def sortkey(n): v = self.variables[n] if v.isrec: return (-1,) return v._shape variables = sorted(self.variables, key=sortkey, reverse=True) # Set the metadata for all variables. for name in variables: self._write_var_metadata(name) # Now that we have the metadata, we know the vsize of # each record variable, so we can calculate recsize. self.__dict__['_recsize'] = sum([ var._vsize for var in self.variables.values() if var.isrec]) # Set the data for all variables. for name in variables: self._write_var_data(name) else: self.fp.write(ABSENT) def _write_var_metadata(self, name): var = self.variables[name] self._pack_string(name) self._pack_int(len(var.dimensions)) for dimname in var.dimensions: dimid = self._dims.index(dimname) self._pack_int(dimid) self._write_att_array(var._attributes) nc_type = REVERSE[var.typecode(), var.itemsize()] self.fp.write(asbytes(nc_type)) if not var.isrec: vsize = var.data.size * var.data.itemsize vsize += -vsize % 4 else: # record variable try: vsize = var.data[0].size * var.data.itemsize except IndexError: vsize = 0 rec_vars = len([v for v in self.variables.values() if v.isrec]) if rec_vars > 1: vsize += -vsize % 4 self.variables[name].__dict__['_vsize'] = vsize self._pack_int(vsize) # Pack a bogus begin, and set the real value later. self.variables[name].__dict__['_begin'] = self.fp.tell() self._pack_begin(0) def _write_var_data(self, name): var = self.variables[name] # Set begin in file header. the_beguine = self.fp.tell() self.fp.seek(var._begin) self._pack_begin(the_beguine) self.fp.seek(the_beguine) # Write data. if not var.isrec: self.fp.write(var.data.tostring()) count = var.data.size * var.data.itemsize self._write_var_padding(var, var._vsize - count) else: # record variable # Handle rec vars with shape[0] < nrecs. if self._recs > len(var.data): shape = (self._recs,) + var.data.shape[1:] # Resize in-place does not always work since # the array might not be single-segment try: var.data.resize(shape) except ValueError: var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype) pos0 = pos = self.fp.tell() for rec in var.data: # Apparently scalars cannot be converted to big endian. If we # try to convert a ``=i4`` scalar to, say, '>i4' the dtype # will remain as ``=i4``. if not rec.shape and (rec.dtype.byteorder == '<' or (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)): rec = rec.byteswap() self.fp.write(rec.tostring()) # Padding count = rec.size * rec.itemsize self._write_var_padding(var, var._vsize - count) pos += self._recsize self.fp.seek(pos) self.fp.seek(pos0 + var._vsize) def _write_var_padding(self, var, size): encoded_fill_value = var._get_encoded_fill_value() num_fills = size // len(encoded_fill_value) self.fp.write(encoded_fill_value * num_fills) def _write_att_values(self, values): if hasattr(values, 'dtype'): nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] else: types = [(t, NC_INT) for t in integer_types] types += [ (float, NC_FLOAT), (str, NC_CHAR) ] # bytes index into scalars in py3k. Check for "string" types if isinstance(values, text_type) or isinstance(values, binary_type): sample = values else: try: sample = values[0] # subscriptable? except TypeError: sample = values # scalar for class_, nc_type in types: if isinstance(sample, class_): break typecode, size = TYPEMAP[nc_type] dtype_ = '>%s' % typecode # asarray() dies with bytes and '>c' in py3k. Change to 'S' dtype_ = 'S' if dtype_ == '>c' else dtype_ values = asarray(values, dtype=dtype_) self.fp.write(asbytes(nc_type)) if values.dtype.char == 'S': nelems = values.itemsize else: nelems = values.size self._pack_int(nelems) if not values.shape and (values.dtype.byteorder == '<' or (values.dtype.byteorder == '=' and LITTLE_ENDIAN)): values = values.byteswap() self.fp.write(values.tostring()) count = values.size * values.itemsize self.fp.write(b'\x00' * (-count % 4)) # pad def _read(self): # Check magic bytes and version magic = self.fp.read(3) if not magic == b'CDF': raise TypeError("Error: %s is not a valid NetCDF 3 file" % self.filename) self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] # Read file headers and set data. self._read_numrecs() self._read_dim_array() self._read_gatt_array() self._read_var_array() def _read_numrecs(self): self.__dict__['_recs'] = self._unpack_int() def _read_dim_array(self): header = self.fp.read(4) if header not in [ZERO, NC_DIMENSION]: raise ValueError("Unexpected header.") count = self._unpack_int() for dim in range(count): name = asstr(self._unpack_string()) length = self._unpack_int() or None # None for record dimension self.dimensions[name] = length self._dims.append(name) # preserve order def _read_gatt_array(self): for k, v in self._read_att_array().items(): self.__setattr__(k, v) def _read_att_array(self): header = self.fp.read(4) if header not in [ZERO, NC_ATTRIBUTE]: raise ValueError("Unexpected header.") count = self._unpack_int() attributes = OrderedDict() for attr in range(count): name = asstr(self._unpack_string()) attributes[name] = self._read_att_values() return attributes def _read_var_array(self): header = self.fp.read(4) if header not in [ZERO, NC_VARIABLE]: raise ValueError("Unexpected header.") begin = 0 dtypes = {'names': [], 'formats': []} rec_vars = [] count = self._unpack_int() for var in range(count): (name, dimensions, shape, attributes, typecode, size, dtype_, begin_, vsize) = self._read_var() # https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html # Note that vsize is the product of the dimension lengths # (omitting the record dimension) and the number of bytes # per value (determined from the type), increased to the # next multiple of 4, for each variable. If a record # variable, this is the amount of space per record. The # netCDF "record size" is calculated as the sum of the # vsize's of all the record variables. # # The vsize field is actually redundant, because its value # may be computed from other information in the header. The # 32-bit vsize field is not large enough to contain the size # of variables that require more than 2^32 - 4 bytes, so # 2^32 - 1 is used in the vsize field for such variables. if shape and shape[0] is None: # record variable rec_vars.append(name) # The netCDF "record size" is calculated as the sum of # the vsize's of all the record variables. self.__dict__['_recsize'] += vsize if begin == 0: begin = begin_ dtypes['names'].append(name) dtypes['formats'].append(str(shape[1:]) + dtype_) # Handle padding with a virtual variable. if typecode in 'bch': actual_size = reduce(mul, (1,) + shape[1:]) * size padding = -actual_size % 4 if padding: dtypes['names'].append('_padding_%d' % var) dtypes['formats'].append('(%d,)>b' % padding) # Data will be set later. data = None else: # not a record variable # Calculate size to avoid problems with vsize (above) a_size = reduce(mul, shape, 1) * size if self.use_mmap: data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_) data.shape = shape else: pos = self.fp.tell() self.fp.seek(begin_) data = frombuffer(self.fp.read(a_size), dtype=dtype_ ).copy() data.shape = shape self.fp.seek(pos) # Add variable. self.variables[name] = netcdf_variable( data, typecode, size, shape, dimensions, attributes, maskandscale=self.maskandscale) if rec_vars: # Remove padding when only one record variable. if len(rec_vars) == 1: dtypes['names'] = dtypes['names'][:1] dtypes['formats'] = dtypes['formats'][:1] # Build rec array. if self.use_mmap: rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes) rec_array.shape = (self._recs,) else: pos = self.fp.tell() self.fp.seek(begin) rec_array = frombuffer(self.fp.read(self._recs*self._recsize), dtype=dtypes).copy() rec_array.shape = (self._recs,) self.fp.seek(pos) for var in rec_vars: self.variables[var].__dict__['data'] = rec_array[var] def _read_var(self): name = asstr(self._unpack_string()) dimensions = [] shape = [] dims = self._unpack_int() for i in range(dims): dimid = self._unpack_int() dimname = self._dims[dimid] dimensions.append(dimname) dim = self.dimensions[dimname] shape.append(dim) dimensions = tuple(dimensions) shape = tuple(shape) attributes = self._read_att_array() nc_type = self.fp.read(4) vsize = self._unpack_int() begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() typecode, size = TYPEMAP[nc_type] dtype_ = '>%s' % typecode return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize def _read_att_values(self): nc_type = self.fp.read(4) n = self._unpack_int() typecode, size = TYPEMAP[nc_type] count = n*size values = self.fp.read(int(count)) self.fp.read(-count % 4) # read padding if typecode is not 'c': values = frombuffer(values, dtype='>%s' % typecode).copy() if values.shape == (1,): values = values[0] else: values = values.rstrip(b'\x00') return values def _pack_begin(self, begin): if self.version_byte == 1: self._pack_int(begin) elif self.version_byte == 2: self._pack_int64(begin) def _pack_int(self, value): self.fp.write(array(value, '>i').tostring()) _pack_int32 = _pack_int def _unpack_int(self): return int(frombuffer(self.fp.read(4), '>i')[0]) _unpack_int32 = _unpack_int def _pack_int64(self, value): self.fp.write(array(value, '>q').tostring()) def _unpack_int64(self): return frombuffer(self.fp.read(8), '>q')[0] def _pack_string(self, s): count = len(s) self._pack_int(count) self.fp.write(asbytes(s)) self.fp.write(b'\x00' * (-count % 4)) # pad def _unpack_string(self): count = self._unpack_int() s = self.fp.read(count).rstrip(b'\x00') self.fp.read(-count % 4) # read padding return s class netcdf_variable(object): """ A data object for the `netcdf` module. `netcdf_variable` objects are constructed by calling the method `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` objects behave much like array objects defined in numpy, except that their data resides in a file. Data is read by indexing and written by assigning to an indexed subset; the entire array can be accessed by the index ``[:]`` or (for scalars) by using the methods `getValue` and `assignValue`. `netcdf_variable` objects also have attribute `shape` with the same meaning as for arrays, but the shape cannot be modified. There is another read-only attribute `dimensions`, whose value is the tuple of dimension names. All other attributes correspond to variable attributes defined in the NetCDF file. Variable attributes are created by assigning to an attribute of the `netcdf_variable` object. Parameters ---------- data : array_like The data array that holds the values for the variable. Typically, this is initialized as empty, but with the proper shape. typecode : dtype character code Desired data-type for the data array. size : int Desired element size for the data array. shape : sequence of ints The shape of the array. This should match the lengths of the variable's dimensions. dimensions : sequence of strings The names of the dimensions used by the variable. Must be in the same order of the dimension lengths given by `shape`. attributes : dict, optional Attribute values (any type) keyed by string names. These attributes become attributes for the netcdf_variable object. maskandscale : bool, optional Whether to automatically scale and/or mask data based on attributes. Default is False. Attributes ---------- dimensions : list of str List of names of dimensions used by the variable object. isrec, shape Properties See also -------- isrec, shape """ def __init__(self, data, typecode, size, shape, dimensions, attributes=None, maskandscale=False): self.data = data self._typecode = typecode self._size = size self._shape = shape self.dimensions = dimensions self.maskandscale = maskandscale self._attributes = attributes or OrderedDict() for k, v in self._attributes.items(): self.__dict__[k] = v def __setattr__(self, attr, value): # Store user defined attributes in a separate dict, # so we can save them to file later. try: self._attributes[attr] = value except AttributeError: pass self.__dict__[attr] = value def isrec(self): """Returns whether the variable has a record dimension or not. A record dimension is a dimension along which additional data could be easily appended in the netcdf data structure without much rewriting of the data file. This attribute is a read-only property of the `netcdf_variable`. """ return bool(self.data.shape) and not self._shape[0] isrec = property(isrec) def shape(self): """Returns the shape tuple of the data variable. This is a read-only attribute and can not be modified in the same manner of other numpy arrays. """ return self.data.shape shape = property(shape) def getValue(self): """ Retrieve a scalar value from a `netcdf_variable` of length one. Raises ------ ValueError If the netcdf variable is an array of length greater than one, this exception will be raised. """ return self.data.item() def assignValue(self, value): """ Assign a scalar value to a `netcdf_variable` of length one. Parameters ---------- value : scalar Scalar value (of compatible type) to assign to a length-one netcdf variable. This value will be written to file. Raises ------ ValueError If the input is not a scalar, or if the destination is not a length-one netcdf variable. """ if not self.data.flags.writeable: # Work-around for a bug in NumPy. Calling itemset() on a read-only # memory-mapped array causes a seg. fault. # See NumPy ticket #1622, and SciPy ticket #1202. # This check for `writeable` can be removed when the oldest version # of numpy still supported by scipy contains the fix for #1622. raise RuntimeError("variable is not writeable") self.data.itemset(value) def typecode(self): """ Return the typecode of the variable. Returns ------- typecode : char The character typecode of the variable (eg, 'i' for int). """ return self._typecode def itemsize(self): """ Return the itemsize of the variable. Returns ------- itemsize : int The element size of the variable (eg, 8 for float64). """ return self._size def __getitem__(self, index): if not self.maskandscale: return self.data[index] data = self.data[index].copy() missing_value = self._get_missing_value() data = self._apply_missing_value(data, missing_value) scale_factor = self._attributes.get('scale_factor') add_offset = self._attributes.get('add_offset') if add_offset is not None or scale_factor is not None: data = data.astype(np.float64) if scale_factor is not None: data = data * scale_factor if add_offset is not None: data += add_offset return data def __setitem__(self, index, data): if self.maskandscale: missing_value = ( self._get_missing_value() or getattr(data, 'fill_value', 999999)) self._attributes.setdefault('missing_value', missing_value) self._attributes.setdefault('_FillValue', missing_value) data = ((data - self._attributes.get('add_offset', 0.0)) / self._attributes.get('scale_factor', 1.0)) data = np.ma.asarray(data).filled(missing_value) if self._typecode not in 'fd' and data.dtype.kind == 'f': data = np.round(data) # Expand data for record vars? if self.isrec: if isinstance(index, tuple): rec_index = index[0] else: rec_index = index if isinstance(rec_index, slice): recs = (rec_index.start or 0) + len(data) else: recs = rec_index + 1 if recs > len(self.data): shape = (recs,) + self._shape[1:] # Resize in-place does not always work since # the array might not be single-segment try: self.data.resize(shape) except ValueError: self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype) self.data[index] = data def _default_encoded_fill_value(self): """ The default encoded fill-value for this Variable's data type. """ nc_type = REVERSE[self.typecode(), self.itemsize()] return FILLMAP[nc_type] def _get_encoded_fill_value(self): """ Returns the encoded fill value for this variable as bytes. This is taken from either the _FillValue attribute, or the default fill value for this variable's data type. """ if '_FillValue' in self._attributes: fill_value = np.array(self._attributes['_FillValue'], dtype=self.data.dtype).tostring() if len(fill_value) == self.itemsize(): return fill_value else: return self._default_encoded_fill_value() else: return self._default_encoded_fill_value() def _get_missing_value(self): """ Returns the value denoting "no data" for this variable. If this variable does not have a missing/fill value, returns None. If both _FillValue and missing_value are given, give precedence to _FillValue. The netCDF standard gives special meaning to _FillValue; missing_value is just used for compatibility with old datasets. """ if '_FillValue' in self._attributes: missing_value = self._attributes['_FillValue'] elif 'missing_value' in self._attributes: missing_value = self._attributes['missing_value'] else: missing_value = None return missing_value @staticmethod def _apply_missing_value(data, missing_value): """ Applies the given missing value to the data array. Returns a numpy.ma array, with any value equal to missing_value masked out (unless missing_value is None, in which case the original array is returned). """ if missing_value is None: newdata = data else: try: missing_value_isnan = np.isnan(missing_value) except (TypeError, NotImplementedError): # some data types (e.g., characters) cannot be tested for NaN missing_value_isnan = False if missing_value_isnan: mymask = np.isnan(data) else: mymask = (data == missing_value) newdata = np.ma.masked_where(mymask, data) return newdata NetCDFFile = netcdf_file NetCDFVariable = netcdf_variable
39,509
34.950864
107
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/_fortran.py
""" Module to read / write Fortran unformatted sequential files. This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz. """ from __future__ import division, print_function, absolute_import import warnings import numpy as np __all__ = ['FortranFile'] class FortranFile(object): """ A file object for unformatted sequential files from Fortran code. Parameters ---------- filename : file or str Open file object or filename. mode : {'r', 'w'}, optional Read-write mode, default is 'r'. header_dtype : dtype, optional Data type of the header. Size and endiness must match the input/output file. Notes ----- These files are broken up into records of unspecified types. The size of each record is given at the start (although the size of this header is not standard) and the data is written onto disk without any formatting. Fortran compilers supporting the BACKSPACE statement will write a second copy of the size to facilitate backwards seeking. This class only supports files written with both sizes for the record. It also does not support the subrecords used in Intel and gfortran compilers for records which are greater than 2GB with a 4-byte header. An example of an unformatted sequential file in Fortran would be written as:: OPEN(1, FILE=myfilename, FORM='unformatted') WRITE(1) myvariable Since this is a non-standard file format, whose contents depend on the compiler and the endianness of the machine, caution is advised. Files from gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work. Consider using Fortran direct-access files or files from the newer Stream I/O, which can be easily read by `numpy.fromfile`. Examples -------- To create an unformatted sequential Fortran file: >>> from scipy.io import FortranFile >>> f = FortranFile('test.unf', 'w') >>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32)) >>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T) >>> f.close() To read this file: >>> f = FortranFile('test.unf', 'r') >>> print(f.read_ints(np.int32)) [1 2 3 4 5] >>> print(f.read_reals(float).reshape((5,4), order="F")) [[0. 0.05263158 0.10526316 0.15789474] [0.21052632 0.26315789 0.31578947 0.36842105] [0.42105263 0.47368421 0.52631579 0.57894737] [0.63157895 0.68421053 0.73684211 0.78947368] [0.84210526 0.89473684 0.94736842 1. ]] >>> f.close() Or, in Fortran:: integer :: a(5), i double precision :: b(5,4) open(1, file='test.unf', form='unformatted') read(1) a read(1) b close(1) write(*,*) a do i = 1, 5 write(*,*) b(i,:) end do """ def __init__(self, filename, mode='r', header_dtype=np.uint32): if header_dtype is None: raise ValueError('Must specify dtype') header_dtype = np.dtype(header_dtype) if header_dtype.kind != 'u': warnings.warn("Given a dtype which is not unsigned.") if mode not in 'rw' or len(mode) != 1: raise ValueError('mode must be either r or w') if hasattr(filename, 'seek'): self._fp = filename else: self._fp = open(filename, '%sb' % mode) self._header_dtype = header_dtype def _read_size(self): return int(np.fromfile(self._fp, dtype=self._header_dtype, count=1)) def write_record(self, *items): """ Write a record (including sizes) to the file. Parameters ---------- *items : array_like The data arrays to write. Notes ----- Writes data items to a file:: write_record(a.T, b.T, c.T, ...) write(1) a, b, c, ... Note that data in multidimensional arrays is written in row-major order --- to make them read correctly by Fortran programs, you need to transpose the arrays yourself when writing them. """ items = tuple(np.asarray(item) for item in items) total_size = sum(item.nbytes for item in items) nb = np.array([total_size], dtype=self._header_dtype) nb.tofile(self._fp) for item in items: item.tofile(self._fp) nb.tofile(self._fp) def read_record(self, *dtypes, **kwargs): """ Reads a record of a given type from the file. Parameters ---------- *dtypes : dtypes, optional Data type(s) specifying the size and endiness of the data. Returns ------- data : ndarray A one-dimensional array object. Notes ----- If the record contains a multi-dimensional array, you can specify the size in the dtype. For example:: INTEGER var(5,4) can be read with:: read_record('(4,5)i4').T Note that this function does **not** assume the file data is in Fortran column major order, so you need to (i) swap the order of dimensions when reading and (ii) transpose the resulting array. Alternatively, you can read the data as a 1D array and handle the ordering yourself. For example:: read_record('i4').reshape(5, 4, order='F') For records that contain several variables or mixed types (as opposed to single scalar or array types), give them as separate arguments:: double precision :: a integer :: b write(1) a, b record = f.read_record('<f4', '<i4') a = record[0] # first number b = record[1] # second number and if any of the variables are arrays, the shape can be specified as the third item in the relevant dtype:: double precision :: a integer :: b(3,4) write(1) a, b record = f.read_record('<f4', np.dtype(('<i4', (4, 3)))) a = record[0] b = record[1].T Numpy also supports a short syntax for this kind of type:: record = f.read_record('<f4', '(3,3)<i4') See Also -------- read_reals read_ints """ dtype = kwargs.pop('dtype', None) if kwargs: raise ValueError("Unknown keyword arguments {}".format(tuple(kwargs.keys()))) if dtype is not None: dtypes = dtypes + (dtype,) elif not dtypes: raise ValueError('Must specify at least one dtype') first_size = self._read_size() dtypes = tuple(np.dtype(dtype) for dtype in dtypes) block_size = sum(dtype.itemsize for dtype in dtypes) num_blocks, remainder = divmod(first_size, block_size) if remainder != 0: raise ValueError('Size obtained ({0}) is not a multiple of the ' 'dtypes given ({1}).'.format(first_size, block_size)) if len(dtypes) != 1 and first_size != block_size: # Fortran does not write mixed type array items in interleaved order, # and it's not possible to guess the sizes of the arrays that were written. # The user must specify the exact sizes of each of the arrays. raise ValueError('Size obtained ({0}) does not match with the expected ' 'size ({1}) of multi-item record'.format(first_size, block_size)) data = [] for dtype in dtypes: r = np.fromfile(self._fp, dtype=dtype, count=num_blocks) if dtype.shape != (): # Squeeze outmost block dimension for array items if num_blocks == 1: assert r.shape == (1,) + dtype.shape r = r[0] data.append(r) second_size = self._read_size() if first_size != second_size: raise IOError('Sizes do not agree in the header and footer for ' 'this record - check header dtype') # Unpack result if len(dtypes) == 1: return data[0] else: return tuple(data) def read_ints(self, dtype='i4'): """ Reads a record of a given type from the file, defaulting to an integer type (``INTEGER*4`` in Fortran). Parameters ---------- dtype : dtype, optional Data type specifying the size and endiness of the data. Returns ------- data : ndarray A one-dimensional array object. See Also -------- read_reals read_record """ return self.read_record(dtype) def read_reals(self, dtype='f8'): """ Reads a record of a given type from the file, defaulting to a floating point number (``real*8`` in Fortran). Parameters ---------- dtype : dtype, optional Data type specifying the size and endiness of the data. Returns ------- data : ndarray A one-dimensional array object. See Also -------- read_ints read_record """ return self.read_record(dtype) def close(self): """ Closes the file. It is unsupported to call any other methods off this object after closing it. Note that this class supports the 'with' statement in modern versions of Python, to call this automatically """ self._fp.close() def __enter__(self): return self def __exit__(self, type, value, tb): self.close()
9,737
29.622642
94
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/wavfile.py
""" Module to read / write wav files using numpy arrays Functions --------- `read`: Return the sample rate (in samples/sec) and data from a WAV file. `write`: Write a numpy array as a WAV file. """ from __future__ import division, print_function, absolute_import import sys import numpy import struct import warnings __all__ = [ 'WavFileWarning', 'read', 'write' ] class WavFileWarning(UserWarning): pass WAVE_FORMAT_PCM = 0x0001 WAVE_FORMAT_IEEE_FLOAT = 0x0003 WAVE_FORMAT_EXTENSIBLE = 0xfffe KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT) # assumes file pointer is immediately # after the 'fmt ' id def _read_fmt_chunk(fid, is_big_endian): """ Returns ------- size : int size of format subchunk in bytes (minus 8 for "fmt " and itself) format_tag : int PCM, float, or compressed format channels : int number of channels fs : int sampling frequency in samples per second bytes_per_second : int overall byte rate for the file block_align : int bytes per sample, including all channels bit_depth : int bits per sample """ if is_big_endian: fmt = '>' else: fmt = '<' size = res = struct.unpack(fmt+'I', fid.read(4))[0] bytes_read = 0 if size < 16: raise ValueError("Binary structure of wave file is not compliant") res = struct.unpack(fmt+'HHIIHH', fid.read(16)) bytes_read += 16 format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res if format_tag == WAVE_FORMAT_EXTENSIBLE and size >= (16+2): ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0] bytes_read += 2 if ext_chunk_size >= 22: extensible_chunk_data = fid.read(22) bytes_read += 22 raw_guid = extensible_chunk_data[2+4:2+4+16] # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361) # MS GUID byte order: first three groups are native byte order, # rest is Big Endian if is_big_endian: tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71' else: tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71' if raw_guid.endswith(tail): format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0] else: raise ValueError("Binary structure of wave file is not compliant") if format_tag not in KNOWN_WAVE_FORMATS: raise ValueError("Unknown wave file format") # move file pointer to next chunk if size > (bytes_read): fid.read(size - bytes_read) return (size, format_tag, channels, fs, bytes_per_second, block_align, bit_depth) # assumes file pointer is immediately after the 'data' id def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, mmap=False): if is_big_endian: fmt = '>I' else: fmt = '<I' # Size of the data subchunk in bytes size = struct.unpack(fmt, fid.read(4))[0] # Number of bytes per sample bytes_per_sample = bit_depth//8 if bit_depth == 8: dtype = 'u1' else: if is_big_endian: dtype = '>' else: dtype = '<' if format_tag == WAVE_FORMAT_PCM: dtype += 'i%d' % bytes_per_sample else: dtype += 'f%d' % bytes_per_sample if not mmap: data = numpy.frombuffer(fid.read(size), dtype=dtype) else: start = fid.tell() data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start, shape=(size//bytes_per_sample,)) fid.seek(start + size) if channels > 1: data = data.reshape(-1, channels) return data def _skip_unknown_chunk(fid, is_big_endian): if is_big_endian: fmt = '>I' else: fmt = '<I' data = fid.read(4) # call unpack() and seek() only if we have really read data from file # otherwise empty read at the end of the file would trigger # unnecessary exception at unpack() call # in case data equals somehow to 0, there is no need for seek() anyway if data: size = struct.unpack(fmt, data)[0] fid.seek(size, 1) def _read_riff_chunk(fid): str1 = fid.read(4) # File signature if str1 == b'RIFF': is_big_endian = False fmt = '<I' elif str1 == b'RIFX': is_big_endian = True fmt = '>I' else: # There are also .wav files with "FFIR" or "XFIR" signatures? raise ValueError("File format {}... not " "understood.".format(repr(str1))) # Size of entire file file_size = struct.unpack(fmt, fid.read(4))[0] + 8 str2 = fid.read(4) if str2 != b'WAVE': raise ValueError("Not a WAV file.") return file_size, is_big_endian def read(filename, mmap=False): """ Open a WAV file Return the sample rate (in samples/sec) and data from a WAV file. Parameters ---------- filename : string or open file handle Input wav file. mmap : bool, optional Whether to read data as memory-mapped. Only to be used on real files (Default: False). .. versionadded:: 0.12.0 Returns ------- rate : int Sample rate of wav file. data : numpy array Data read from wav file. Data-type is determined from the file; see Notes. Notes ----- This function cannot read wav files with 24-bit data. Common data types: [1]_ ===================== =========== =========== ============= WAV format Min Max NumPy dtype ===================== =========== =========== ============= 32-bit floating-point -1.0 +1.0 float32 32-bit PCM -2147483648 +2147483647 int32 16-bit PCM -32768 +32767 int16 8-bit PCM 0 255 uint8 ===================== =========== =========== ============= Note that 8-bit PCM is unsigned. References ---------- .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming Interface and Data Specifications 1.0", section "Data Format of the Samples", August 1991 http://www.tactilemedia.com/info/MCI_Control_Info.html """ if hasattr(filename, 'read'): fid = filename mmap = False else: fid = open(filename, 'rb') try: file_size, is_big_endian = _read_riff_chunk(fid) fmt_chunk_received = False channels = 1 bit_depth = 8 format_tag = WAVE_FORMAT_PCM while fid.tell() < file_size: # read the next chunk chunk_id = fid.read(4) if not chunk_id: raise ValueError("Unexpected end of file.") elif len(chunk_id) < 4: raise ValueError("Incomplete wav chunk.") if chunk_id == b'fmt ': fmt_chunk_received = True fmt_chunk = _read_fmt_chunk(fid, is_big_endian) format_tag, channels, fs = fmt_chunk[1:4] bit_depth = fmt_chunk[6] if bit_depth not in (8, 16, 32, 64, 96, 128): raise ValueError("Unsupported bit depth: the wav file " "has {}-bit data.".format(bit_depth)) elif chunk_id == b'fact': _skip_unknown_chunk(fid, is_big_endian) elif chunk_id == b'data': if not fmt_chunk_received: raise ValueError("No fmt chunk before data") data = _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, mmap) elif chunk_id == b'LIST': # Someday this could be handled properly but for now skip it _skip_unknown_chunk(fid, is_big_endian) elif chunk_id in (b'JUNK', b'Fake'): # Skip alignment chunks without warning _skip_unknown_chunk(fid, is_big_endian) else: warnings.warn("Chunk (non-data) not understood, skipping it.", WavFileWarning) _skip_unknown_chunk(fid, is_big_endian) finally: if not hasattr(filename, 'read'): fid.close() else: fid.seek(0) return fs, data def write(filename, rate, data): """ Write a numpy array as a WAV file. Parameters ---------- filename : string or open file handle Output wav file. rate : int The sample rate (in samples/sec). data : ndarray A 1-D or 2-D numpy array of either integer or float data-type. Notes ----- * Writes a simple uncompressed WAV file. * To write multiple-channels, use a 2-D array of shape (Nsamples, Nchannels). * The bits-per-sample and PCM/float will be determined by the data-type. Common data types: [1]_ ===================== =========== =========== ============= WAV format Min Max NumPy dtype ===================== =========== =========== ============= 32-bit floating-point -1.0 +1.0 float32 32-bit PCM -2147483648 +2147483647 int32 16-bit PCM -32768 +32767 int16 8-bit PCM 0 255 uint8 ===================== =========== =========== ============= Note that 8-bit PCM is unsigned. References ---------- .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming Interface and Data Specifications 1.0", section "Data Format of the Samples", August 1991 http://www.tactilemedia.com/info/MCI_Control_Info.html """ if hasattr(filename, 'write'): fid = filename else: fid = open(filename, 'wb') fs = rate try: dkind = data.dtype.kind if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and data.dtype.itemsize == 1)): raise ValueError("Unsupported data type '%s'" % data.dtype) header_data = b'' header_data += b'RIFF' header_data += b'\x00\x00\x00\x00' header_data += b'WAVE' # fmt chunk header_data += b'fmt ' if dkind == 'f': format_tag = WAVE_FORMAT_IEEE_FLOAT else: format_tag = WAVE_FORMAT_PCM if data.ndim == 1: channels = 1 else: channels = data.shape[1] bit_depth = data.dtype.itemsize * 8 bytes_per_second = fs*(bit_depth // 8)*channels block_align = channels * (bit_depth // 8) fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs, bytes_per_second, block_align, bit_depth) if not (dkind == 'i' or dkind == 'u'): # add cbSize field for non-PCM files fmt_chunk_data += b'\x00\x00' header_data += struct.pack('<I', len(fmt_chunk_data)) header_data += fmt_chunk_data # fact chunk (non-PCM files) if not (dkind == 'i' or dkind == 'u'): header_data += b'fact' header_data += struct.pack('<II', 4, data.shape[0]) # check data size (needs to be immediately before the data chunk) if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF: raise ValueError("Data exceeds wave file size limit") fid.write(header_data) # data chunk fid.write(b'data') fid.write(struct.pack('<I', data.nbytes)) if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'): data = data.byteswap() _array_tofile(fid, data) # Determine file size and place it in correct # position at start of the file. size = fid.tell() fid.seek(4) fid.write(struct.pack('<I', size-8)) finally: if not hasattr(filename, 'write'): fid.close() else: fid.seek(0) if sys.version_info[0] >= 3: def _array_tofile(fid, data): # ravel gives a c-contiguous buffer fid.write(data.ravel().view('b').data) else: def _array_tofile(fid, data): fid.write(data.tostring())
12,547
29.906404
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/idl.py
# IDLSave - a python module to read IDL 'save' files # Copyright (c) 2010 Thomas P. Robitaille # Many thanks to Craig Markwardt for publishing the Unofficial Format # Specification for IDL .sav files, without which this Python module would not # exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt). # This code was developed by with permission from ITT Visual Information # Systems. IDL(r) is a registered trademark of ITT Visual Information Systems, # Inc. for their Interactive Data Language software. # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import division, print_function, absolute_import import struct import numpy as np from numpy.compat import asstr import tempfile import zlib import warnings # Define the different data types that can be found in an IDL save file DTYPE_DICT = {1: '>u1', 2: '>i2', 3: '>i4', 4: '>f4', 5: '>f8', 6: '>c8', 7: '|O', 8: '|O', 9: '>c16', 10: '|O', 11: '|O', 12: '>u2', 13: '>u4', 14: '>i8', 15: '>u8'} # Define the different record types that can be found in an IDL save file RECTYPE_DICT = {0: "START_MARKER", 1: "COMMON_VARIABLE", 2: "VARIABLE", 3: "SYSTEM_VARIABLE", 6: "END_MARKER", 10: "TIMESTAMP", 12: "COMPILED", 13: "IDENTIFICATION", 14: "VERSION", 15: "HEAP_HEADER", 16: "HEAP_DATA", 17: "PROMOTE64", 19: "NOTICE", 20: "DESCRIPTION"} # Define a dictionary to contain structure definitions STRUCT_DICT = {} def _align_32(f): '''Align to the next 32-bit position in a file''' pos = f.tell() if pos % 4 != 0: f.seek(pos + 4 - pos % 4) return def _skip_bytes(f, n): '''Skip `n` bytes''' f.read(n) return def _read_bytes(f, n): '''Read the next `n` bytes''' return f.read(n) def _read_byte(f): '''Read a single byte''' return np.uint8(struct.unpack('>B', f.read(4)[:1])[0]) def _read_long(f): '''Read a signed 32-bit integer''' return np.int32(struct.unpack('>l', f.read(4))[0]) def _read_int16(f): '''Read a signed 16-bit integer''' return np.int16(struct.unpack('>h', f.read(4)[2:4])[0]) def _read_int32(f): '''Read a signed 32-bit integer''' return np.int32(struct.unpack('>i', f.read(4))[0]) def _read_int64(f): '''Read a signed 64-bit integer''' return np.int64(struct.unpack('>q', f.read(8))[0]) def _read_uint16(f): '''Read an unsigned 16-bit integer''' return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0]) def _read_uint32(f): '''Read an unsigned 32-bit integer''' return np.uint32(struct.unpack('>I', f.read(4))[0]) def _read_uint64(f): '''Read an unsigned 64-bit integer''' return np.uint64(struct.unpack('>Q', f.read(8))[0]) def _read_float32(f): '''Read a 32-bit float''' return np.float32(struct.unpack('>f', f.read(4))[0]) def _read_float64(f): '''Read a 64-bit float''' return np.float64(struct.unpack('>d', f.read(8))[0]) class Pointer(object): '''Class used to define pointers''' def __init__(self, index): self.index = index return class ObjectPointer(Pointer): '''Class used to define object pointers''' pass def _read_string(f): '''Read a string''' length = _read_long(f) if length > 0: chars = _read_bytes(f, length) _align_32(f) chars = asstr(chars) else: chars = '' return chars def _read_string_data(f): '''Read a data string (length is specified twice)''' length = _read_long(f) if length > 0: length = _read_long(f) string_data = _read_bytes(f, length) _align_32(f) else: string_data = '' return string_data def _read_data(f, dtype): '''Read a variable with a specified data type''' if dtype == 1: if _read_int32(f) != 1: raise Exception("Error occurred while reading byte variable") return _read_byte(f) elif dtype == 2: return _read_int16(f) elif dtype == 3: return _read_int32(f) elif dtype == 4: return _read_float32(f) elif dtype == 5: return _read_float64(f) elif dtype == 6: real = _read_float32(f) imag = _read_float32(f) return np.complex64(real + imag * 1j) elif dtype == 7: return _read_string_data(f) elif dtype == 8: raise Exception("Should not be here - please report this") elif dtype == 9: real = _read_float64(f) imag = _read_float64(f) return np.complex128(real + imag * 1j) elif dtype == 10: return Pointer(_read_int32(f)) elif dtype == 11: return ObjectPointer(_read_int32(f)) elif dtype == 12: return _read_uint16(f) elif dtype == 13: return _read_uint32(f) elif dtype == 14: return _read_int64(f) elif dtype == 15: return _read_uint64(f) else: raise Exception("Unknown IDL type: %i - please report this" % dtype) def _read_structure(f, array_desc, struct_desc): ''' Read a structure, with the array and structure descriptors given as `array_desc` and `structure_desc` respectively. ''' nrows = array_desc['nelements'] columns = struct_desc['tagtable'] dtype = [] for col in columns: if col['structure'] or col['array']: dtype.append(((col['name'].lower(), col['name']), np.object_)) else: if col['typecode'] in DTYPE_DICT: dtype.append(((col['name'].lower(), col['name']), DTYPE_DICT[col['typecode']])) else: raise Exception("Variable type %i not implemented" % col['typecode']) structure = np.recarray((nrows, ), dtype=dtype) for i in range(nrows): for col in columns: dtype = col['typecode'] if col['structure']: structure[col['name']][i] = _read_structure(f, struct_desc['arrtable'][col['name']], struct_desc['structtable'][col['name']]) elif col['array']: structure[col['name']][i] = _read_array(f, dtype, struct_desc['arrtable'][col['name']]) else: structure[col['name']][i] = _read_data(f, dtype) # Reshape structure if needed if array_desc['ndims'] > 1: dims = array_desc['dims'][:int(array_desc['ndims'])] dims.reverse() structure = structure.reshape(dims) return structure def _read_array(f, typecode, array_desc): ''' Read an array of type `typecode`, with the array descriptor given as `array_desc`. ''' if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]: if typecode == 1: nbytes = _read_int32(f) if nbytes != array_desc['nbytes']: warnings.warn("Not able to verify number of bytes from header") # Read bytes as numpy array array = np.frombuffer(f.read(array_desc['nbytes']), dtype=DTYPE_DICT[typecode]) elif typecode in [2, 12]: # These are 2 byte types, need to skip every two as they are not packed array = np.frombuffer(f.read(array_desc['nbytes']*2), dtype=DTYPE_DICT[typecode])[1::2] else: # Read bytes into list array = [] for i in range(array_desc['nelements']): dtype = typecode data = _read_data(f, dtype) array.append(data) array = np.array(array, dtype=np.object_) # Reshape array if needed if array_desc['ndims'] > 1: dims = array_desc['dims'][:int(array_desc['ndims'])] dims.reverse() array = array.reshape(dims) # Go to next alignment position _align_32(f) return array def _read_record(f): '''Function to read in a full record''' record = {'rectype': _read_long(f)} nextrec = _read_uint32(f) nextrec += _read_uint32(f) * 2**32 _skip_bytes(f, 4) if not record['rectype'] in RECTYPE_DICT: raise Exception("Unknown RECTYPE: %i" % record['rectype']) record['rectype'] = RECTYPE_DICT[record['rectype']] if record['rectype'] in ["VARIABLE", "HEAP_DATA"]: if record['rectype'] == "VARIABLE": record['varname'] = _read_string(f) else: record['heap_index'] = _read_long(f) _skip_bytes(f, 4) rectypedesc = _read_typedesc(f) if rectypedesc['typecode'] == 0: if nextrec == f.tell(): record['data'] = None # Indicates NULL value else: raise ValueError("Unexpected type code: 0") else: varstart = _read_long(f) if varstart != 7: raise Exception("VARSTART is not 7") if rectypedesc['structure']: record['data'] = _read_structure(f, rectypedesc['array_desc'], rectypedesc['struct_desc']) elif rectypedesc['array']: record['data'] = _read_array(f, rectypedesc['typecode'], rectypedesc['array_desc']) else: dtype = rectypedesc['typecode'] record['data'] = _read_data(f, dtype) elif record['rectype'] == "TIMESTAMP": _skip_bytes(f, 4*256) record['date'] = _read_string(f) record['user'] = _read_string(f) record['host'] = _read_string(f) elif record['rectype'] == "VERSION": record['format'] = _read_long(f) record['arch'] = _read_string(f) record['os'] = _read_string(f) record['release'] = _read_string(f) elif record['rectype'] == "IDENTIFICATON": record['author'] = _read_string(f) record['title'] = _read_string(f) record['idcode'] = _read_string(f) elif record['rectype'] == "NOTICE": record['notice'] = _read_string(f) elif record['rectype'] == "DESCRIPTION": record['description'] = _read_string_data(f) elif record['rectype'] == "HEAP_HEADER": record['nvalues'] = _read_long(f) record['indices'] = [] for i in range(record['nvalues']): record['indices'].append(_read_long(f)) elif record['rectype'] == "COMMONBLOCK": record['nvars'] = _read_long(f) record['name'] = _read_string(f) record['varnames'] = [] for i in range(record['nvars']): record['varnames'].append(_read_string(f)) elif record['rectype'] == "END_MARKER": record['end'] = True elif record['rectype'] == "UNKNOWN": warnings.warn("Skipping UNKNOWN record") elif record['rectype'] == "SYSTEM_VARIABLE": warnings.warn("Skipping SYSTEM_VARIABLE record") else: raise Exception("record['rectype']=%s not implemented" % record['rectype']) f.seek(nextrec) return record def _read_typedesc(f): '''Function to read in a type descriptor''' typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)} if typedesc['varflags'] & 2 == 2: raise Exception("System variables not implemented") typedesc['array'] = typedesc['varflags'] & 4 == 4 typedesc['structure'] = typedesc['varflags'] & 32 == 32 if typedesc['structure']: typedesc['array_desc'] = _read_arraydesc(f) typedesc['struct_desc'] = _read_structdesc(f) elif typedesc['array']: typedesc['array_desc'] = _read_arraydesc(f) return typedesc def _read_arraydesc(f): '''Function to read in an array descriptor''' arraydesc = {'arrstart': _read_long(f)} if arraydesc['arrstart'] == 8: _skip_bytes(f, 4) arraydesc['nbytes'] = _read_long(f) arraydesc['nelements'] = _read_long(f) arraydesc['ndims'] = _read_long(f) _skip_bytes(f, 8) arraydesc['nmax'] = _read_long(f) arraydesc['dims'] = [] for d in range(arraydesc['nmax']): arraydesc['dims'].append(_read_long(f)) elif arraydesc['arrstart'] == 18: warnings.warn("Using experimental 64-bit array read") _skip_bytes(f, 8) arraydesc['nbytes'] = _read_uint64(f) arraydesc['nelements'] = _read_uint64(f) arraydesc['ndims'] = _read_long(f) _skip_bytes(f, 8) arraydesc['nmax'] = 8 arraydesc['dims'] = [] for d in range(arraydesc['nmax']): v = _read_long(f) if v != 0: raise Exception("Expected a zero in ARRAY_DESC") arraydesc['dims'].append(_read_long(f)) else: raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart']) return arraydesc def _read_structdesc(f): '''Function to read in a structure descriptor''' structdesc = {} structstart = _read_long(f) if structstart != 9: raise Exception("STRUCTSTART should be 9") structdesc['name'] = _read_string(f) predef = _read_long(f) structdesc['ntags'] = _read_long(f) structdesc['nbytes'] = _read_long(f) structdesc['predef'] = predef & 1 structdesc['inherits'] = predef & 2 structdesc['is_super'] = predef & 4 if not structdesc['predef']: structdesc['tagtable'] = [] for t in range(structdesc['ntags']): structdesc['tagtable'].append(_read_tagdesc(f)) for tag in structdesc['tagtable']: tag['name'] = _read_string(f) structdesc['arrtable'] = {} for tag in structdesc['tagtable']: if tag['array']: structdesc['arrtable'][tag['name']] = _read_arraydesc(f) structdesc['structtable'] = {} for tag in structdesc['tagtable']: if tag['structure']: structdesc['structtable'][tag['name']] = _read_structdesc(f) if structdesc['inherits'] or structdesc['is_super']: structdesc['classname'] = _read_string(f) structdesc['nsupclasses'] = _read_long(f) structdesc['supclassnames'] = [] for s in range(structdesc['nsupclasses']): structdesc['supclassnames'].append(_read_string(f)) structdesc['supclasstable'] = [] for s in range(structdesc['nsupclasses']): structdesc['supclasstable'].append(_read_structdesc(f)) STRUCT_DICT[structdesc['name']] = structdesc else: if not structdesc['name'] in STRUCT_DICT: raise Exception("PREDEF=1 but can't find definition") structdesc = STRUCT_DICT[structdesc['name']] return structdesc def _read_tagdesc(f): '''Function to read in a tag descriptor''' tagdesc = {'offset': _read_long(f)} if tagdesc['offset'] == -1: tagdesc['offset'] = _read_uint64(f) tagdesc['typecode'] = _read_long(f) tagflags = _read_long(f) tagdesc['array'] = tagflags & 4 == 4 tagdesc['structure'] = tagflags & 32 == 32 tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT # Assume '10'x is scalar return tagdesc def _replace_heap(variable, heap): if isinstance(variable, Pointer): while isinstance(variable, Pointer): if variable.index == 0: variable = None else: if variable.index in heap: variable = heap[variable.index] else: warnings.warn("Variable referenced by pointer not found " "in heap: variable will be set to None") variable = None replace, new = _replace_heap(variable, heap) if replace: variable = new return True, variable elif isinstance(variable, np.core.records.recarray): # Loop over records for ir, record in enumerate(variable): replace, new = _replace_heap(record, heap) if replace: variable[ir] = new return False, variable elif isinstance(variable, np.core.records.record): # Loop over values for iv, value in enumerate(variable): replace, new = _replace_heap(value, heap) if replace: variable[iv] = new return False, variable elif isinstance(variable, np.ndarray): # Loop over values if type is np.object_ if variable.dtype.type is np.object_: for iv in range(variable.size): replace, new = _replace_heap(variable.item(iv), heap) if replace: variable.itemset(iv, new) return False, variable else: return False, variable class AttrDict(dict): ''' A case-insensitive dictionary with access via item, attribute, and call notations: >>> d = AttrDict() >>> d['Variable'] = 123 >>> d['Variable'] 123 >>> d.Variable 123 >>> d.variable 123 >>> d('VARIABLE') 123 ''' def __init__(self, init={}): dict.__init__(self, init) def __getitem__(self, name): return super(AttrDict, self).__getitem__(name.lower()) def __setitem__(self, key, value): return super(AttrDict, self).__setitem__(key.lower(), value) __getattr__ = __getitem__ __setattr__ = __setitem__ __call__ = __getitem__ def readsav(file_name, idict=None, python_dict=False, uncompressed_file_name=None, verbose=False): """ Read an IDL .sav file. Parameters ---------- file_name : str Name of the IDL save file. idict : dict, optional Dictionary in which to insert .sav file variables. python_dict : bool, optional By default, the object return is not a Python dictionary, but a case-insensitive dictionary with item, attribute, and call access to variables. To get a standard Python dictionary, set this option to True. uncompressed_file_name : str, optional This option only has an effect for .sav files written with the /compress option. If a file name is specified, compressed .sav files are uncompressed to this file. Otherwise, readsav will use the `tempfile` module to determine a temporary filename automatically, and will remove the temporary file upon successfully reading it in. verbose : bool, optional Whether to print out information about the save file, including the records read, and available variables. Returns ------- idl_dict : AttrDict or dict If `python_dict` is set to False (default), this function returns a case-insensitive dictionary with item, attribute, and call access to variables. If `python_dict` is set to True, this function returns a Python dictionary with all variable names in lowercase. If `idict` was specified, then variables are written to the dictionary specified, and the updated dictionary is returned. """ # Initialize record and variable holders records = [] if python_dict or idict: variables = {} else: variables = AttrDict() # Open the IDL file f = open(file_name, 'rb') # Read the signature, which should be 'SR' signature = _read_bytes(f, 2) if signature != b'SR': raise Exception("Invalid SIGNATURE: %s" % signature) # Next, the record format, which is '\x00\x04' for normal .sav # files, and '\x00\x06' for compressed .sav files. recfmt = _read_bytes(f, 2) if recfmt == b'\x00\x04': pass elif recfmt == b'\x00\x06': if verbose: print("IDL Save file is compressed") if uncompressed_file_name: fout = open(uncompressed_file_name, 'w+b') else: fout = tempfile.NamedTemporaryFile(suffix='.sav') if verbose: print(" -> expanding to %s" % fout.name) # Write header fout.write(b'SR\x00\x04') # Cycle through records while True: # Read record type rectype = _read_long(f) fout.write(struct.pack('>l', int(rectype))) # Read position of next record and return as int nextrec = _read_uint32(f) nextrec += _read_uint32(f) * 2**32 # Read the unknown 4 bytes unknown = f.read(4) # Check if the end of the file has been reached if RECTYPE_DICT[rectype] == 'END_MARKER': fout.write(struct.pack('>I', int(nextrec) % 2**32)) fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32))) fout.write(unknown) break # Find current position pos = f.tell() # Decompress record rec_string = zlib.decompress(f.read(nextrec-pos)) # Find new position of next record nextrec = fout.tell() + len(rec_string) + 12 # Write out record fout.write(struct.pack('>I', int(nextrec % 2**32))) fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32))) fout.write(unknown) fout.write(rec_string) # Close the original compressed file f.close() # Set f to be the decompressed file, and skip the first four bytes f = fout f.seek(4) else: raise Exception("Invalid RECFMT: %s" % recfmt) # Loop through records, and add them to the list while True: r = _read_record(f) records.append(r) if 'end' in r: if r['end']: break # Close the file f.close() # Find heap data variables heap = {} for r in records: if r['rectype'] == "HEAP_DATA": heap[r['heap_index']] = r['data'] # Find all variables for r in records: if r['rectype'] == "VARIABLE": replace, new = _replace_heap(r['data'], heap) if replace: r['data'] = new variables[r['varname'].lower()] = r['data'] if verbose: # Print out timestamp info about the file for record in records: if record['rectype'] == "TIMESTAMP": print("-"*50) print("Date: %s" % record['date']) print("User: %s" % record['user']) print("Host: %s" % record['host']) break # Print out version info about the file for record in records: if record['rectype'] == "VERSION": print("-"*50) print("Format: %s" % record['format']) print("Architecture: %s" % record['arch']) print("Operating System: %s" % record['os']) print("IDL Version: %s" % record['release']) break # Print out identification info about the file for record in records: if record['rectype'] == "IDENTIFICATON": print("-"*50) print("Author: %s" % record['author']) print("Title: %s" % record['title']) print("ID Code: %s" % record['idcode']) break # Print out descriptions saved with the file for record in records: if record['rectype'] == "DESCRIPTION": print("-"*50) print("Description: %s" % record['description']) break print("-"*50) print("Successfully read %i records of which:" % (len(records))) # Create convenience list of record types rectypes = [r['rectype'] for r in records] for rt in set(rectypes): if rt != 'END_MARKER': print(" - %i are of type %s" % (rectypes.count(rt), rt)) print("-"*50) if 'VARIABLE' in rectypes: print("Available variables:") for var in variables: print(" - %s [%s]" % (var, type(variables[var]))) print("-"*50) if idict: for var in variables: idict[var] = variables[var] return idict else: return variables
25,814
28.235561
89
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/__init__.py
# -*- encoding:utf-8 -*- """ ================================== Input and output (:mod:`scipy.io`) ================================== .. currentmodule:: scipy.io SciPy has many modules, classes, and functions available to read data from and write data to a variety of file formats. .. seealso:: :ref:`numpy-reference.routines.io` (in Numpy) MATLAB® files ============= .. autosummary:: :toctree: generated/ loadmat - Read a MATLAB style mat file (version 4 through 7.1) savemat - Write a MATLAB style mat file (version 4 through 7.1) whosmat - List contents of a MATLAB style mat file (version 4 through 7.1) IDL® files ========== .. autosummary:: :toctree: generated/ readsav - Read an IDL 'save' file Matrix Market files =================== .. autosummary:: :toctree: generated/ mminfo - Query matrix info from Matrix Market formatted file mmread - Read matrix from Matrix Market formatted file mmwrite - Write matrix to Matrix Market formatted file Unformatted Fortran files =============================== .. autosummary:: :toctree: generated/ FortranFile - A file object for unformatted sequential Fortran files Netcdf ====== .. autosummary:: :toctree: generated/ netcdf_file - A file object for NetCDF data netcdf_variable - A data object for the netcdf module Harwell-Boeing files ==================== .. autosummary:: :toctree: generated/ hb_read -- read H-B file hb_write -- write H-B file Wav sound files (:mod:`scipy.io.wavfile`) ========================================= .. module:: scipy.io.wavfile .. autosummary:: :toctree: generated/ read write WavFileWarning Arff files (:mod:`scipy.io.arff`) ================================= .. module:: scipy.io.arff .. autosummary:: :toctree: generated/ loadarff MetaData ArffError ParseArffError """ from __future__ import division, print_function, absolute_import # matfile read and write from .matlab import loadmat, savemat, whosmat, byteordercodes # netCDF file support from .netcdf import netcdf_file, netcdf_variable # Fortran file support from ._fortran import FortranFile from .mmio import mminfo, mmread, mmwrite from .idl import readsav from .harwell_boeing import hb_read, hb_write __all__ = [s for s in dir() if not s.startswith('_')] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
2,416
20.201754
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/byteordercodes.py
''' Byteorder utilities for system - numpy byteorder encoding Converts a variety of string codes for little endian, big endian, native byte order and swapped byte order to explicit numpy endian codes - one of '<' (little endian) or '>' (big endian) ''' from __future__ import division, print_function, absolute_import import sys sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' swapped_code = sys_is_le and '>' or '<' aliases = {'little': ('little', '<', 'l', 'le'), 'big': ('big', '>', 'b', 'be'), 'native': ('native', '='), 'swapped': ('swapped', 'S')} def to_numpy_code(code): """ Convert various order codings to numpy format. Parameters ---------- code : str The code to convert. It is converted to lower case before parsing. Legal values are: 'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=', 'swapped', 's'. Returns ------- out_code : {'<', '>'} Here '<' is the numpy dtype code for little endian, and '>' is the code for big endian. Examples -------- >>> import sys >>> sys_is_le == (sys.byteorder == 'little') True >>> to_numpy_code('big') '>' >>> to_numpy_code('little') '<' >>> nc = to_numpy_code('native') >>> nc == '<' if sys_is_le else nc == '>' True >>> sc = to_numpy_code('swapped') >>> sc == '>' if sys_is_le else sc == '<' True """ code = code.lower() if code is None: return native_code if code in aliases['little']: return '<' elif code in aliases['big']: return '>' elif code in aliases['native']: return native_code elif code in aliases['swapped']: return swapped_code else: raise ValueError( 'We cannot handle byte order %s' % code)
1,874
25.408451
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/mio5.py
''' Classes for read / write of matlab (TM) 5 files The matfile specification last found here: http://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf (as of December 5 2008) ''' from __future__ import division, print_function, absolute_import ''' ================================= Note on functions and mat files ================================= The document above does not give any hints as to the storage of matlab function handles, or anonymous function handles. I had therefore to guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and ``mxOPAQUE_CLASS`` by looking at example mat files. ``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to contain a struct matrix with a set pattern of fields. For anonymous functions, a sub-fields of one of these fields seems to contain the well-named ``mxOPAQUE_CLASS``. This seems to cotain: * array flags as for any matlab matrix * 3 int8 strings * a matrix It seems that, whenever the mat file contains a ``mxOPAQUE_CLASS`` instance, there is also an un-named matrix (name == '') at the end of the mat file. I'll call this the ``__function_workspace__`` matrix. When I saved two anonymous functions in a mat file, or appended another anonymous function to the mat file, there was still only one ``__function_workspace__`` un-named matrix at the end, but larger than that for a mat file with a single anonymous function, suggesting that the workspaces for the two functions had been merged. The ``__function_workspace__`` matrix appears to be of double class (``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in the format of a mini .mat file, without the first 124 bytes of the file header (the description and the subsystem_offset), but with the version U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes, presumably for 8 byte padding, and then a series of ``miMATRIX`` entries, as in a standard mat file. The ``miMATRIX`` entries appear to be series of un-named (name == '') matrices, and may also contain arrays of this same mini-mat format. I guess that: * saving an anonymous function back to a mat file will need the associated ``__function_workspace__`` matrix saved as well for the anonymous function to work correctly. * appending to a mat file that has a ``__function_workspace__`` would involve first pulling off this workspace, appending, checking whether there were any more anonymous functions appended, and then somehow merging the relevant workspaces, and saving at the end of the mat file. The mat files I was playing with are in ``tests/data``: * sqr.mat * parabola.mat * some_functions.mat See ``tests/test_mio.py:test_mio_funcs.py`` for a debugging script I was working with. ''' # Small fragments of current code adapted from matfile.py by Heiko # Henkelmann import os import time import sys import zlib from io import BytesIO import warnings import numpy as np from numpy.compat import asbytes, asstr import scipy.sparse from scipy._lib.six import string_types from .byteordercodes import native_code, swapped_code from .miobase import (MatFileReader, docfiller, matdims, read_dtype, arr_to_chars, arr_dtype_number, MatWriteError, MatReadError, MatReadWarning) # Reader object for matlab 5 format variables from .mio5_utils import VarReader5 # Constants and helper objects from .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES, NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8, miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS, mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS, mxDOUBLE_CLASS, mclass_info) from .streams import ZlibInputStream class MatFile5Reader(MatFileReader): ''' Reader for Mat 5 mat files Adds the following attribute to base class uint16_codec - char codec to use for uint16 char arrays (defaults to system default codec) Uses variable reader that has the following stardard interface (see abstract class in ``miobase``:: __init__(self, file_reader) read_header(self) array_from_header(self) and added interface:: set_stream(self, stream) read_full_tag(self) ''' @docfiller def __init__(self, mat_stream, byte_order=None, mat_dtype=False, squeeze_me=False, chars_as_strings=True, matlab_compatible=False, struct_as_record=True, verify_compressed_data_integrity=True, uint16_codec=None ): '''Initializer for matlab 5 file format reader %(matstream_arg)s %(load_args)s %(struct_arg)s uint16_codec : {None, string} Set codec to use for uint16 char arrays (e.g. 'utf-8'). Use system default codec if None ''' super(MatFile5Reader, self).__init__( mat_stream, byte_order, mat_dtype, squeeze_me, chars_as_strings, matlab_compatible, struct_as_record, verify_compressed_data_integrity ) # Set uint16 codec if not uint16_codec: uint16_codec = sys.getdefaultencoding() self.uint16_codec = uint16_codec # placeholders for readers - see initialize_read method self._file_reader = None self._matrix_reader = None def guess_byte_order(self): ''' Guess byte order. Sets stream pointer to 0 ''' self.mat_stream.seek(126) mi = self.mat_stream.read(2) self.mat_stream.seek(0) return mi == b'IM' and '<' or '>' def read_file_header(self): ''' Read in mat 5 file header ''' hdict = {} hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header'] hdr = read_dtype(self.mat_stream, hdr_dtype) hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000') v_major = hdr['version'] >> 8 v_minor = hdr['version'] & 0xFF hdict['__version__'] = '%d.%d' % (v_major, v_minor) return hdict def initialize_read(self): ''' Run when beginning read of variables Sets up readers from parameters in `self` ''' # reader for top level stream. We need this extra top-level # reader because we use the matrix_reader object to contain # compressed matrices (so they have their own stream) self._file_reader = VarReader5(self) # reader for matrix streams self._matrix_reader = VarReader5(self) def read_var_header(self): ''' Read header, return header, next position Header has to define at least .name and .is_global Parameters ---------- None Returns ------- header : object object that can be passed to self.read_var_array, and that has attributes .name and .is_global next_position : int position in stream of next variable ''' mdtype, byte_count = self._file_reader.read_full_tag() if not byte_count > 0: raise ValueError("Did not read any bytes") next_pos = self.mat_stream.tell() + byte_count if mdtype == miCOMPRESSED: # Make new stream from compressed data stream = ZlibInputStream(self.mat_stream, byte_count) self._matrix_reader.set_stream(stream) check_stream_limit = self.verify_compressed_data_integrity mdtype, byte_count = self._matrix_reader.read_full_tag() else: check_stream_limit = False self._matrix_reader.set_stream(self.mat_stream) if not mdtype == miMATRIX: raise TypeError('Expecting miMATRIX type here, got %d' % mdtype) header = self._matrix_reader.read_header(check_stream_limit) return header, next_pos def read_var_array(self, header, process=True): ''' Read array, given `header` Parameters ---------- header : header object object with fields defining variable header process : {True, False} bool, optional If True, apply recursive post-processing during loading of array. Returns ------- arr : array array with post-processing applied or not according to `process`. ''' return self._matrix_reader.array_from_header(header, process) def get_variables(self, variable_names=None): ''' get variables from stream as dictionary variable_names - optional list of variable names to get If variable_names is None, then get all variables in file ''' if isinstance(variable_names, string_types): variable_names = [variable_names] elif variable_names is not None: variable_names = list(variable_names) self.mat_stream.seek(0) # Here we pass all the parameters in self to the reading objects self.initialize_read() mdict = self.read_file_header() mdict['__globals__'] = [] while not self.end_of_stream(): hdr, next_position = self.read_var_header() name = asstr(hdr.name) if name in mdict: warnings.warn('Duplicate variable name "%s" in stream' ' - replacing previous with new\n' 'Consider mio5.varmats_from_mat to split ' 'file into single variable files' % name, MatReadWarning, stacklevel=2) if name == '': # can only be a matlab 7 function workspace name = '__function_workspace__' # We want to keep this raw because mat_dtype processing # will break the format (uint8 as mxDOUBLE_CLASS) process = False else: process = True if variable_names is not None and name not in variable_names: self.mat_stream.seek(next_position) continue try: res = self.read_var_array(hdr, process) except MatReadError as err: warnings.warn( 'Unreadable variable "%s", because "%s"' % (name, err), Warning, stacklevel=2) res = "Read error: %s" % err self.mat_stream.seek(next_position) mdict[name] = res if hdr.is_global: mdict['__globals__'].append(name) if variable_names is not None: variable_names.remove(name) if len(variable_names) == 0: break return mdict def list_variables(self): ''' list variables from stream ''' self.mat_stream.seek(0) # Here we pass all the parameters in self to the reading objects self.initialize_read() self.read_file_header() vars = [] while not self.end_of_stream(): hdr, next_position = self.read_var_header() name = asstr(hdr.name) if name == '': # can only be a matlab 7 function workspace name = '__function_workspace__' shape = self._matrix_reader.shape_from_header(hdr) if hdr.is_logical: info = 'logical' else: info = mclass_info.get(hdr.mclass, 'unknown') vars.append((name, shape, info)) self.mat_stream.seek(next_position) return vars def varmats_from_mat(file_obj): """ Pull variables out of mat 5 file as a sequence of mat file objects This can be useful with a difficult mat file, containing unreadable variables. This routine pulls the variables out in raw form and puts them, unread, back into a file stream for saving or reading. Another use is the pathological case where there is more than one variable of the same name in the file; this routine returns the duplicates, whereas the standard reader will overwrite duplicates in the returned dictionary. The file pointer in `file_obj` will be undefined. File pointers for the returned file-like objects are set at 0. Parameters ---------- file_obj : file-like file object containing mat file Returns ------- named_mats : list list contains tuples of (name, BytesIO) where BytesIO is a file-like object containing mat file contents as for a single variable. The BytesIO contains a string with the original header and a single var. If ``var_file_obj`` is an individual BytesIO instance, then save as a mat file with something like ``open('test.mat', 'wb').write(var_file_obj.read())`` Examples -------- >>> import scipy.io BytesIO is from the ``io`` module in python 3, and is ``cStringIO`` for python < 3. >>> mat_fileobj = BytesIO() >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'}) >>> varmats = varmats_from_mat(mat_fileobj) >>> sorted([name for name, str_obj in varmats]) ['a', 'b'] """ rdr = MatFile5Reader(file_obj) file_obj.seek(0) # Raw read of top-level file header hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize raw_hdr = file_obj.read(hdr_len) # Initialize variable reading file_obj.seek(0) rdr.initialize_read() mdict = rdr.read_file_header() next_position = file_obj.tell() named_mats = [] while not rdr.end_of_stream(): start_position = next_position hdr, next_position = rdr.read_var_header() name = asstr(hdr.name) # Read raw variable string file_obj.seek(start_position) byte_count = next_position - start_position var_str = file_obj.read(byte_count) # write to stringio object out_obj = BytesIO() out_obj.write(raw_hdr) out_obj.write(var_str) out_obj.seek(0) named_mats.append((name, out_obj)) return named_mats class EmptyStructMarker(object): """ Class to indicate presence of empty matlab struct on output """ def to_writeable(source): ''' Convert input object ``source`` to something we can write Parameters ---------- source : object Returns ------- arr : None or ndarray or EmptyStructMarker If `source` cannot be converted to something we can write to a matfile, return None. If `source` is equivalent to an empty dictionary, return ``EmptyStructMarker``. Otherwise return `source` converted to an ndarray with contents for writing to matfile. ''' if isinstance(source, np.ndarray): return source if source is None: return None # Objects that implement mappings is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and hasattr(source, 'items')) # Objects that don't implement mappings, but do have dicts if isinstance(source, np.generic): # Numpy scalars are never mappings (pypy issue workaround) pass elif not is_mapping and hasattr(source, '__dict__'): source = dict((key, value) for key, value in source.__dict__.items() if not key.startswith('_')) is_mapping = True if is_mapping: dtype = [] values = [] for field, value in source.items(): if (isinstance(field, string_types) and field[0] not in '_0123456789'): dtype.append((str(field), object)) values.append(value) if dtype: return np.array([tuple(values)], dtype) else: return EmptyStructMarker # Next try and convert to an array narr = np.asanyarray(source) if narr.dtype.type in (object, np.object_) and \ narr.shape == () and narr == source: # No interesting conversion possible return None return narr # Native byte ordered dtypes for convenience for writers NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header'] NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full'] NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata'] NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags'] class VarWriter5(object): ''' Generic matlab matrix writing class ''' mat_tag = np.zeros((), NDT_TAG_FULL) mat_tag['mdtype'] = miMATRIX def __init__(self, file_writer): self.file_stream = file_writer.file_stream self.unicode_strings = file_writer.unicode_strings self.long_field_names = file_writer.long_field_names self.oned_as = file_writer.oned_as # These are used for top level writes, and unset after self._var_name = None self._var_is_global = False def write_bytes(self, arr): self.file_stream.write(arr.tostring(order='F')) def write_string(self, s): self.file_stream.write(s) def write_element(self, arr, mdtype=None): ''' write tag and data ''' if mdtype is None: mdtype = NP_TO_MTYPES[arr.dtype.str[1:]] # Array needs to be in native byte order if arr.dtype.byteorder == swapped_code: arr = arr.byteswap().newbyteorder() byte_count = arr.size*arr.itemsize if byte_count <= 4: self.write_smalldata_element(arr, mdtype, byte_count) else: self.write_regular_element(arr, mdtype, byte_count) def write_smalldata_element(self, arr, mdtype, byte_count): # write tag with embedded data tag = np.zeros((), NDT_TAG_SMALL) tag['byte_count_mdtype'] = (byte_count << 16) + mdtype # if arr.tostring is < 4, the element will be zero-padded as needed. tag['data'] = arr.tostring(order='F') self.write_bytes(tag) def write_regular_element(self, arr, mdtype, byte_count): # write tag, data tag = np.zeros((), NDT_TAG_FULL) tag['mdtype'] = mdtype tag['byte_count'] = byte_count self.write_bytes(tag) self.write_bytes(arr) # pad to next 64-bit boundary bc_mod_8 = byte_count % 8 if bc_mod_8: self.file_stream.write(b'\x00' * (8-bc_mod_8)) def write_header(self, shape, mclass, is_complex=False, is_logical=False, nzmax=0): ''' Write header for given data options shape : sequence array shape mclass - mat5 matrix class is_complex - True if matrix is complex is_logical - True if matrix is logical nzmax - max non zero elements for sparse arrays We get the name and the global flag from the object, and reset them to defaults after we've used them ''' # get name and is_global from one-shot object store name = self._var_name is_global = self._var_is_global # initialize the top-level matrix tag, store position self._mat_tag_pos = self.file_stream.tell() self.write_bytes(self.mat_tag) # write array flags (complex, global, logical, class, nzmax) af = np.zeros((), NDT_ARRAY_FLAGS) af['data_type'] = miUINT32 af['byte_count'] = 8 flags = is_complex << 3 | is_global << 2 | is_logical << 1 af['flags_class'] = mclass | flags << 8 af['nzmax'] = nzmax self.write_bytes(af) # shape self.write_element(np.array(shape, dtype='i4')) # write name name = np.asarray(name) if name == '': # empty string zero-terminated self.write_smalldata_element(name, miINT8, 0) else: self.write_element(name, miINT8) # reset the one-shot store to defaults self._var_name = '' self._var_is_global = False def update_matrix_tag(self, start_pos): curr_pos = self.file_stream.tell() self.file_stream.seek(start_pos) byte_count = curr_pos - start_pos - 8 if byte_count >= 2**32: raise MatWriteError("Matrix too large to save with Matlab " "5 format") self.mat_tag['byte_count'] = byte_count self.write_bytes(self.mat_tag) self.file_stream.seek(curr_pos) def write_top(self, arr, name, is_global): """ Write variable at top level of mat file Parameters ---------- arr : array_like array-like object to create writer for name : str, optional name as it will appear in matlab workspace default is empty string is_global : {False, True}, optional whether variable will be global on load into matlab """ # these are set before the top-level header write, and unset at # the end of the same write, because they do not apply for lower levels self._var_is_global = is_global self._var_name = name # write the header and data self.write(arr) def write(self, arr): ''' Write `arr` to stream at top and sub levels Parameters ---------- arr : array_like array-like object to create writer for ''' # store position, so we can update the matrix tag mat_tag_pos = self.file_stream.tell() # First check if these are sparse if scipy.sparse.issparse(arr): self.write_sparse(arr) self.update_matrix_tag(mat_tag_pos) return # Try to convert things that aren't arrays narr = to_writeable(arr) if narr is None: raise TypeError('Could not convert %s (type %s) to array' % (arr, type(arr))) if isinstance(narr, MatlabObject): self.write_object(narr) elif isinstance(narr, MatlabFunction): raise MatWriteError('Cannot write matlab functions') elif narr is EmptyStructMarker: # empty struct array self.write_empty_struct() elif narr.dtype.fields: # struct array self.write_struct(narr) elif narr.dtype.hasobject: # cell array self.write_cells(narr) elif narr.dtype.kind in ('U', 'S'): if self.unicode_strings: codec = 'UTF8' else: codec = 'ascii' self.write_char(narr, codec) else: self.write_numeric(narr) self.update_matrix_tag(mat_tag_pos) def write_numeric(self, arr): imagf = arr.dtype.kind == 'c' logif = arr.dtype.kind == 'b' try: mclass = NP_TO_MXTYPES[arr.dtype.str[1:]] except KeyError: # No matching matlab type, probably complex256 / float128 / float96 # Cast data to complex128 / float64. if imagf: arr = arr.astype('c128') elif logif: arr = arr.astype('i1') # Should only contain 0/1 else: arr = arr.astype('f8') mclass = mxDOUBLE_CLASS self.write_header(matdims(arr, self.oned_as), mclass, is_complex=imagf, is_logical=logif) if imagf: self.write_element(arr.real) self.write_element(arr.imag) else: self.write_element(arr) def write_char(self, arr, codec='ascii'): ''' Write string array `arr` with given `codec` ''' if arr.size == 0 or np.all(arr == ''): # This an empty string array or a string array containing # only empty strings. Matlab cannot distiguish between a # string array that is empty, and a string array containing # only empty strings, because it stores strings as arrays of # char. There is no way of having an array of char that is # not empty, but contains an empty string. We have to # special-case the array-with-empty-strings because even # empty strings have zero padding, which would otherwise # appear in matlab as a string with a space. shape = (0,) * np.max([arr.ndim, 2]) self.write_header(shape, mxCHAR_CLASS) self.write_smalldata_element(arr, miUTF8, 0) return # non-empty string. # # Convert to char array arr = arr_to_chars(arr) # We have to write the shape directly, because we are going # recode the characters, and the resulting stream of chars # may have a different length shape = arr.shape self.write_header(shape, mxCHAR_CLASS) if arr.dtype.kind == 'U' and arr.size: # Make one long string from all the characters. We need to # transpose here, because we're flattening the array, before # we write the bytes. The bytes have to be written in # Fortran order. n_chars = np.product(shape) st_arr = np.ndarray(shape=(), dtype=arr_dtype_number(arr, n_chars), buffer=arr.T.copy()) # Fortran order # Recode with codec to give byte string st = st_arr.item().encode(codec) # Reconstruct as one-dimensional byte array arr = np.ndarray(shape=(len(st),), dtype='S1', buffer=st) self.write_element(arr, mdtype=miUTF8) def write_sparse(self, arr): ''' Sparse matrices are 2D ''' A = arr.tocsc() # convert to sparse CSC format A.sort_indices() # MATLAB expects sorted row indices is_complex = (A.dtype.kind == 'c') is_logical = (A.dtype.kind == 'b') nz = A.nnz self.write_header(matdims(arr, self.oned_as), mxSPARSE_CLASS, is_complex=is_complex, is_logical=is_logical, # matlab won't load file with 0 nzmax nzmax=1 if nz == 0 else nz) self.write_element(A.indices.astype('i4')) self.write_element(A.indptr.astype('i4')) self.write_element(A.data.real) if is_complex: self.write_element(A.data.imag) def write_cells(self, arr): self.write_header(matdims(arr, self.oned_as), mxCELL_CLASS) # loop over data, column major A = np.atleast_2d(arr).flatten('F') for el in A: self.write(el) def write_empty_struct(self): self.write_header((1, 1), mxSTRUCT_CLASS) # max field name length set to 1 in an example matlab struct self.write_element(np.array(1, dtype=np.int32)) # Field names element is empty self.write_element(np.array([], dtype=np.int8)) def write_struct(self, arr): self.write_header(matdims(arr, self.oned_as), mxSTRUCT_CLASS) self._write_items(arr) def _write_items(self, arr): # write fieldnames fieldnames = [f[0] for f in arr.dtype.descr] length = max([len(fieldname) for fieldname in fieldnames])+1 max_length = (self.long_field_names and 64) or 32 if length > max_length: raise ValueError("Field names are restricted to %d characters" % (max_length-1)) self.write_element(np.array([length], dtype='i4')) self.write_element( np.array(fieldnames, dtype='S%d' % (length)), mdtype=miINT8) A = np.atleast_2d(arr).flatten('F') for el in A: for f in fieldnames: self.write(el[f]) def write_object(self, arr): '''Same as writing structs, except different mx class, and extra classname element after header ''' self.write_header(matdims(arr, self.oned_as), mxOBJECT_CLASS) self.write_element(np.array(arr.classname, dtype='S'), mdtype=miINT8) self._write_items(arr) class MatFile5Writer(object): ''' Class for writing mat5 files ''' @docfiller def __init__(self, file_stream, do_compression=False, unicode_strings=False, global_vars=None, long_field_names=False, oned_as='row'): ''' Initialize writer for matlab 5 format files Parameters ---------- %(do_compression)s %(unicode_strings)s global_vars : None or sequence of strings, optional Names of variables to be marked as global for matlab %(long_fields)s %(oned_as)s ''' self.file_stream = file_stream self.do_compression = do_compression self.unicode_strings = unicode_strings if global_vars: self.global_vars = global_vars else: self.global_vars = [] self.long_field_names = long_field_names self.oned_as = oned_as self._matrix_writer = None def write_file_header(self): # write header hdr = np.zeros((), NDT_FILE_HDR) hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \ % (os.name,time.asctime()) hdr['version'] = 0x0100 hdr['endian_test'] = np.ndarray(shape=(), dtype='S2', buffer=np.uint16(0x4d49)) self.file_stream.write(hdr.tostring()) def put_variables(self, mdict, write_header=None): ''' Write variables in `mdict` to stream Parameters ---------- mdict : mapping mapping with method ``items`` returns name, contents pairs where ``name`` which will appear in the matlab workspace in file load, and ``contents`` is something writeable to a matlab file, such as a numpy array. write_header : {None, True, False}, optional If True, then write the matlab file header before writing the variables. If None (the default) then write the file header if we are at position 0 in the stream. By setting False here, and setting the stream position to the end of the file, you can append variables to a matlab file ''' # write header if requested, or None and start of file if write_header is None: write_header = self.file_stream.tell() == 0 if write_header: self.write_file_header() self._matrix_writer = VarWriter5(self) for name, var in mdict.items(): if name[0] == '_': continue is_global = name in self.global_vars if self.do_compression: stream = BytesIO() self._matrix_writer.file_stream = stream self._matrix_writer.write_top(var, asbytes(name), is_global) out_str = zlib.compress(stream.getvalue()) tag = np.empty((), NDT_TAG_FULL) tag['mdtype'] = miCOMPRESSED tag['byte_count'] = len(out_str) self.file_stream.write(tag.tostring()) self.file_stream.write(out_str) else: # not compressing self._matrix_writer.write_top(var, asbytes(name), is_global)
31,895
36.524706
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/setup.py
from __future__ import division, print_function, absolute_import def configuration(parent_package='io',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('matlab', parent_package, top_path) config.add_extension('streams', sources=['streams.c']) config.add_extension('mio_utils', sources=['mio_utils.c']) config.add_extension('mio5_utils', sources=['mio5_utils.c']) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
599
34.294118
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/mio5_params.py
''' Constants and classes for matlab 5 read and write See also mio5_utils.pyx where these same constants arise as c enums. If you make changes in this file, don't forget to change mio5_utils.pyx ''' from __future__ import division, print_function, absolute_import import numpy as np from .miobase import convert_dtypes miINT8 = 1 miUINT8 = 2 miINT16 = 3 miUINT16 = 4 miINT32 = 5 miUINT32 = 6 miSINGLE = 7 miDOUBLE = 9 miINT64 = 12 miUINT64 = 13 miMATRIX = 14 miCOMPRESSED = 15 miUTF8 = 16 miUTF16 = 17 miUTF32 = 18 mxCELL_CLASS = 1 mxSTRUCT_CLASS = 2 # The March 2008 edition of "Matlab 7 MAT-File Format" says that # mxOBJECT_CLASS = 3, whereas matrix.h says that mxLOGICAL = 3. # Matlab 2008a appears to save logicals as type 9, so we assume that # the document is correct. See type 18, below. mxOBJECT_CLASS = 3 mxCHAR_CLASS = 4 mxSPARSE_CLASS = 5 mxDOUBLE_CLASS = 6 mxSINGLE_CLASS = 7 mxINT8_CLASS = 8 mxUINT8_CLASS = 9 mxINT16_CLASS = 10 mxUINT16_CLASS = 11 mxINT32_CLASS = 12 mxUINT32_CLASS = 13 # The following are not in the March 2008 edition of "Matlab 7 # MAT-File Format," but were guessed from matrix.h. mxINT64_CLASS = 14 mxUINT64_CLASS = 15 mxFUNCTION_CLASS = 16 # Not doing anything with these at the moment. mxOPAQUE_CLASS = 17 # This appears to be a function workspace # Thread 'saveing/loading symbol table of annymous functions', octave-maintainers, April-May 2007 # https://lists.gnu.org/archive/html/octave-maintainers/2007-04/msg00031.html # https://lists.gnu.org/archive/html/octave-maintainers/2007-05/msg00032.html # (Was/Deprecated: https://www-old.cae.wisc.edu/pipermail/octave-maintainers/2007-May/002824.html) mxOBJECT_CLASS_FROM_MATRIX_H = 18 mdtypes_template = { miINT8: 'i1', miUINT8: 'u1', miINT16: 'i2', miUINT16: 'u2', miINT32: 'i4', miUINT32: 'u4', miSINGLE: 'f4', miDOUBLE: 'f8', miINT64: 'i8', miUINT64: 'u8', miUTF8: 'u1', miUTF16: 'u2', miUTF32: 'u4', 'file_header': [('description', 'S116'), ('subsystem_offset', 'i8'), ('version', 'u2'), ('endian_test', 'S2')], 'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')], 'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')], 'array_flags': [('data_type', 'u4'), ('byte_count', 'u4'), ('flags_class','u4'), ('nzmax', 'u4')], 'U1': 'U1', } mclass_dtypes_template = { mxINT8_CLASS: 'i1', mxUINT8_CLASS: 'u1', mxINT16_CLASS: 'i2', mxUINT16_CLASS: 'u2', mxINT32_CLASS: 'i4', mxUINT32_CLASS: 'u4', mxINT64_CLASS: 'i8', mxUINT64_CLASS: 'u8', mxSINGLE_CLASS: 'f4', mxDOUBLE_CLASS: 'f8', } mclass_info = { mxINT8_CLASS: 'int8', mxUINT8_CLASS: 'uint8', mxINT16_CLASS: 'int16', mxUINT16_CLASS: 'uint16', mxINT32_CLASS: 'int32', mxUINT32_CLASS: 'uint32', mxINT64_CLASS: 'int64', mxUINT64_CLASS: 'uint64', mxSINGLE_CLASS: 'single', mxDOUBLE_CLASS: 'double', mxCELL_CLASS: 'cell', mxSTRUCT_CLASS: 'struct', mxOBJECT_CLASS: 'object', mxCHAR_CLASS: 'char', mxSPARSE_CLASS: 'sparse', mxFUNCTION_CLASS: 'function', mxOPAQUE_CLASS: 'opaque', } NP_TO_MTYPES = { 'f8': miDOUBLE, 'c32': miDOUBLE, 'c24': miDOUBLE, 'c16': miDOUBLE, 'f4': miSINGLE, 'c8': miSINGLE, 'i8': miINT64, 'i4': miINT32, 'i2': miINT16, 'i1': miINT8, 'u8': miUINT64, 'u4': miUINT32, 'u2': miUINT16, 'u1': miUINT8, 'S1': miUINT8, 'U1': miUTF16, 'b1': miUINT8, # not standard but seems MATLAB uses this (gh-4022) } NP_TO_MXTYPES = { 'f8': mxDOUBLE_CLASS, 'c32': mxDOUBLE_CLASS, 'c24': mxDOUBLE_CLASS, 'c16': mxDOUBLE_CLASS, 'f4': mxSINGLE_CLASS, 'c8': mxSINGLE_CLASS, 'i8': mxINT64_CLASS, 'i4': mxINT32_CLASS, 'i2': mxINT16_CLASS, 'i1': mxINT8_CLASS, 'u8': mxUINT64_CLASS, 'u4': mxUINT32_CLASS, 'u2': mxUINT16_CLASS, 'u1': mxUINT8_CLASS, 'S1': mxUINT8_CLASS, 'b1': mxUINT8_CLASS, # not standard but seems MATLAB uses this } ''' Before release v7.1 (release 14) matlab (TM) used the system default character encoding scheme padded out to 16-bits. Release 14 and later use Unicode. When saving character data, R14 checks if it can be encoded in 7-bit ascii, and saves in that format if so.''' codecs_template = { miUTF8: {'codec': 'utf_8', 'width': 1}, miUTF16: {'codec': 'utf_16', 'width': 2}, miUTF32: {'codec': 'utf_32','width': 4}, } def _convert_codecs(template, byte_order): ''' Convert codec template mapping to byte order Set codecs not on this system to None Parameters ---------- template : mapping key, value are respectively codec name, and root name for codec (without byte order suffix) byte_order : {'<', '>'} code for little or big endian Returns ------- codecs : dict key, value are name, codec (as in .encode(codec)) ''' codecs = {} postfix = byte_order == '<' and '_le' or '_be' for k, v in template.items(): codec = v['codec'] try: " ".encode(codec) except LookupError: codecs[k] = None continue if v['width'] > 1: codec += postfix codecs[k] = codec return codecs.copy() MDTYPES = {} for _bytecode in '<>': _def = {'dtypes': convert_dtypes(mdtypes_template, _bytecode), 'classes': convert_dtypes(mclass_dtypes_template, _bytecode), 'codecs': _convert_codecs(codecs_template, _bytecode)} MDTYPES[_bytecode] = _def class mat_struct(object): ''' Placeholder for holding read data from structs We use instances of this class when the user passes False as a value to the ``struct_as_record`` parameter of the :func:`scipy.io.matlab.loadmat` function. ''' pass class MatlabObject(np.ndarray): ''' ndarray Subclass to contain matlab object ''' def __new__(cls, input_array, classname=None): # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array).view(cls) # add the new attribute to the created instance obj.classname = classname # Finally, we must return the newly created object: return obj def __array_finalize__(self,obj): # reset the attribute from passed original object self.classname = getattr(obj, 'classname', None) # We do not need to return anything class MatlabFunction(np.ndarray): ''' Subclass to signal this is a matlab function ''' def __new__(cls, input_array): obj = np.asarray(input_array).view(cls) return obj class MatlabOpaque(np.ndarray): ''' Subclass to signal this is a matlab opaque matrix ''' def __new__(cls, input_array): obj = np.asarray(input_array).view(cls) return obj OPAQUE_DTYPE = np.dtype( [('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')])
7,079
26.764706
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/mio.py
""" Module for reading and writing matlab (TM) .mat files """ # Authors: Travis Oliphant, Matthew Brett from __future__ import division, print_function, absolute_import import numpy as np from scipy._lib.six import string_types from .miobase import get_matfile_version, docfiller from .mio4 import MatFile4Reader, MatFile4Writer from .mio5 import MatFile5Reader, MatFile5Writer __all__ = ['mat_reader_factory', 'loadmat', 'savemat', 'whosmat'] def _open_file(file_like, appendmat): """ Open `file_like` and return as file-like object. First, check if object is already file-like; if so, return it as-is. Otherwise, try to pass it to open(). If that fails, and `file_like` is a string, and `appendmat` is true, append '.mat' and try again. """ try: file_like.read(0) return file_like, False except AttributeError: pass try: return open(file_like, 'rb'), True except IOError: # Probably "not found" if isinstance(file_like, string_types): if appendmat and not file_like.endswith('.mat'): file_like += '.mat' return open(file_like, 'rb'), True else: raise IOError('Reader needs file name or open file-like object') @docfiller def mat_reader_factory(file_name, appendmat=True, **kwargs): """ Create reader for matlab .mat format files. Parameters ---------- %(file_arg)s %(append_arg)s %(load_args)s %(struct_arg)s Returns ------- matreader : MatFileReader object Initialized instance of MatFileReader class matching the mat file type detected in `filename`. file_opened : bool Whether the file was opened by this routine. """ byte_stream, file_opened = _open_file(file_name, appendmat) mjv, mnv = get_matfile_version(byte_stream) if mjv == 0: return MatFile4Reader(byte_stream, **kwargs), file_opened elif mjv == 1: return MatFile5Reader(byte_stream, **kwargs), file_opened elif mjv == 2: raise NotImplementedError('Please use HDF reader for matlab v7.3 files') else: raise TypeError('Did not recognize version %s' % mjv) @docfiller def loadmat(file_name, mdict=None, appendmat=True, **kwargs): """ Load MATLAB file. Parameters ---------- file_name : str Name of the mat file (do not need .mat extension if appendmat==True). Can also pass open file-like object. mdict : dict, optional Dictionary in which to insert matfile variables. appendmat : bool, optional True to append the .mat extension to the end of the given filename, if not already present. byte_order : str or None, optional None by default, implying byte order guessed from mat file. Otherwise can be one of ('native', '=', 'little', '<', 'BIG', '>'). mat_dtype : bool, optional If True, return arrays in same dtype as would be loaded into MATLAB (instead of the dtype with which they are saved). squeeze_me : bool, optional Whether to squeeze unit matrix dimensions or not. chars_as_strings : bool, optional Whether to convert char arrays to string arrays. matlab_compatible : bool, optional Returns matrices as would be loaded by MATLAB (implies squeeze_me=False, chars_as_strings=False, mat_dtype=True, struct_as_record=True). struct_as_record : bool, optional Whether to load MATLAB structs as numpy record arrays, or as old-style numpy arrays with dtype=object. Setting this flag to False replicates the behavior of scipy version 0.7.x (returning numpy object arrays). The default setting is True, because it allows easier round-trip load and save of MATLAB files. verify_compressed_data_integrity : bool, optional Whether the length of compressed sequences in the MATLAB file should be checked, to ensure that they are not longer than we expect. It is advisable to enable this (the default) because overlong compressed sequences in MATLAB files generally indicate that the files have experienced some sort of corruption. variable_names : None or sequence If None (the default) - read all variables in file. Otherwise `variable_names` should be a sequence of strings, giving names of the matlab variables to read from the file. The reader will skip any variable with a name not in this sequence, possibly saving some read processing. Returns ------- mat_dict : dict dictionary with variable names as keys, and loaded matrices as values. Notes ----- v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. You will need an HDF5 python library to read matlab 7.3 format mat files. Because scipy does not supply one, we do not implement the HDF5 / 7.3 interface here. """ variable_names = kwargs.pop('variable_names', None) MR, file_opened = mat_reader_factory(file_name, appendmat, **kwargs) matfile_dict = MR.get_variables(variable_names) if mdict is not None: mdict.update(matfile_dict) else: mdict = matfile_dict if file_opened: MR.mat_stream.close() return mdict @docfiller def savemat(file_name, mdict, appendmat=True, format='5', long_field_names=False, do_compression=False, oned_as='row'): """ Save a dictionary of names and arrays into a MATLAB-style .mat file. This saves the array objects in the given dictionary to a MATLAB- style .mat file. Parameters ---------- file_name : str or file-like object Name of the .mat file (.mat extension not needed if ``appendmat == True``). Can also pass open file_like object. mdict : dict Dictionary from which to save matfile variables. appendmat : bool, optional True (the default) to append the .mat extension to the end of the given filename, if not already present. format : {'5', '4'}, string, optional '5' (the default) for MATLAB 5 and up (to 7.2), '4' for MATLAB 4 .mat files. long_field_names : bool, optional False (the default) - maximum field name length in a structure is 31 characters which is the documented maximum length. True - maximum field name length in a structure is 63 characters which works for MATLAB 7.6+. do_compression : bool, optional Whether or not to compress matrices on write. Default is False. oned_as : {'row', 'column'}, optional If 'column', write 1-D numpy arrays as column vectors. If 'row', write 1-D numpy arrays as row vectors. See also -------- mio4.MatFile4Writer mio5.MatFile5Writer """ file_opened = False if hasattr(file_name, 'write'): # File-like object already; use as-is file_stream = file_name else: if isinstance(file_name, string_types): if appendmat and not file_name.endswith('.mat'): file_name = file_name + ".mat" file_stream = open(file_name, 'wb') file_opened = True if format == '4': if long_field_names: raise ValueError("Long field names are not available for version 4 files") MW = MatFile4Writer(file_stream, oned_as) elif format == '5': MW = MatFile5Writer(file_stream, do_compression=do_compression, unicode_strings=True, long_field_names=long_field_names, oned_as=oned_as) else: raise ValueError("Format should be '4' or '5'") MW.put_variables(mdict) if file_opened: file_stream.close() @docfiller def whosmat(file_name, appendmat=True, **kwargs): """ List variables inside a MATLAB file. Parameters ---------- %(file_arg)s %(append_arg)s %(load_args)s %(struct_arg)s Returns ------- variables : list of tuples A list of tuples, where each tuple holds the matrix name (a string), its shape (tuple of ints), and its data class (a string). Possible data classes are: int8, uint8, int16, uint16, int32, uint32, int64, uint64, single, double, cell, struct, object, char, sparse, function, opaque, logical, unknown. Notes ----- v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. You will need an HDF5 python library to read matlab 7.3 format mat files. Because scipy does not supply one, we do not implement the HDF5 / 7.3 interface here. .. versionadded:: 0.12.0 """ ML, file_opened = mat_reader_factory(file_name, **kwargs) variables = ML.list_variables() if file_opened: ML.mat_stream.close() return variables
8,958
33.32567
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/mio4.py
''' Classes for read / write of matlab (TM) 4 files ''' from __future__ import division, print_function, absolute_import import sys import warnings import numpy as np from numpy.compat import asbytes, asstr import scipy.sparse from scipy._lib.six import string_types from .miobase import (MatFileReader, docfiller, matdims, read_dtype, convert_dtypes, arr_to_chars, arr_dtype_number) from .mio_utils import squeeze_element, chars_to_strings from functools import reduce SYS_LITTLE_ENDIAN = sys.byteorder == 'little' miDOUBLE = 0 miSINGLE = 1 miINT32 = 2 miINT16 = 3 miUINT16 = 4 miUINT8 = 5 mdtypes_template = { miDOUBLE: 'f8', miSINGLE: 'f4', miINT32: 'i4', miINT16: 'i2', miUINT16: 'u2', miUINT8: 'u1', 'header': [('mopt', 'i4'), ('mrows', 'i4'), ('ncols', 'i4'), ('imagf', 'i4'), ('namlen', 'i4')], 'U1': 'U1', } np_to_mtypes = { 'f8': miDOUBLE, 'c32': miDOUBLE, 'c24': miDOUBLE, 'c16': miDOUBLE, 'f4': miSINGLE, 'c8': miSINGLE, 'i4': miINT32, 'i2': miINT16, 'u2': miUINT16, 'u1': miUINT8, 'S1': miUINT8, } # matrix classes mxFULL_CLASS = 0 mxCHAR_CLASS = 1 mxSPARSE_CLASS = 2 order_codes = { 0: '<', 1: '>', 2: 'VAX D-float', # ! 3: 'VAX G-float', 4: 'Cray', # !! } mclass_info = { mxFULL_CLASS: 'double', mxCHAR_CLASS: 'char', mxSPARSE_CLASS: 'sparse', } class VarHeader4(object): # Mat4 variables never logical or global is_logical = False is_global = False def __init__(self, name, dtype, mclass, dims, is_complex): self.name = name self.dtype = dtype self.mclass = mclass self.dims = dims self.is_complex = is_complex class VarReader4(object): ''' Class to read matlab 4 variables ''' def __init__(self, file_reader): self.file_reader = file_reader self.mat_stream = file_reader.mat_stream self.dtypes = file_reader.dtypes self.chars_as_strings = file_reader.chars_as_strings self.squeeze_me = file_reader.squeeze_me def read_header(self): ''' Read and return header for variable ''' data = read_dtype(self.mat_stream, self.dtypes['header']) name = self.mat_stream.read(int(data['namlen'])).strip(b'\x00') if data['mopt'] < 0 or data['mopt'] > 5000: raise ValueError('Mat 4 mopt wrong format, byteswapping problem?') M, rest = divmod(data['mopt'], 1000) # order code if M not in (0, 1): warnings.warn("We do not support byte ordering '%s'; returned " "data may be corrupt" % order_codes[M], UserWarning) O, rest = divmod(rest, 100) # unused, should be 0 if O != 0: raise ValueError('O in MOPT integer should be 0, wrong format?') P, rest = divmod(rest, 10) # data type code e.g miDOUBLE (see above) T = rest # matrix type code e.g. mxFULL_CLASS (see above) dims = (data['mrows'], data['ncols']) is_complex = data['imagf'] == 1 dtype = self.dtypes[P] return VarHeader4( name, dtype, T, dims, is_complex) def array_from_header(self, hdr, process=True): mclass = hdr.mclass if mclass == mxFULL_CLASS: arr = self.read_full_array(hdr) elif mclass == mxCHAR_CLASS: arr = self.read_char_array(hdr) if process and self.chars_as_strings: arr = chars_to_strings(arr) elif mclass == mxSPARSE_CLASS: # no current processing (below) makes sense for sparse return self.read_sparse_array(hdr) else: raise TypeError('No reader for class code %s' % mclass) if process and self.squeeze_me: return squeeze_element(arr) return arr def read_sub_array(self, hdr, copy=True): ''' Mat4 read using header `hdr` dtype and dims Parameters ---------- hdr : object object with attributes ``dtype``, ``dims``. dtype is assumed to be the correct endianness copy : bool, optional copies array before return if True (default True) (buffer is usually read only) Returns ------- arr : ndarray of dtype givem by `hdr` ``dtype`` and shape givem by `hdr` ``dims`` ''' dt = hdr.dtype dims = hdr.dims num_bytes = dt.itemsize for d in dims: num_bytes *= d buffer = self.mat_stream.read(int(num_bytes)) if len(buffer) != num_bytes: raise ValueError("Not enough bytes to read matrix '%s'; is this " "a badly-formed file? Consider listing matrices " "with `whosmat` and loading named matrices with " "`variable_names` kwarg to `loadmat`" % hdr.name) arr = np.ndarray(shape=dims, dtype=dt, buffer=buffer, order='F') if copy: arr = arr.copy() return arr def read_full_array(self, hdr): ''' Full (rather than sparse) matrix getter Read matrix (array) can be real or complex Parameters ---------- hdr : ``VarHeader4`` instance Returns ------- arr : ndarray complex array if ``hdr.is_complex`` is True, otherwise a real numeric array ''' if hdr.is_complex: # avoid array copy to save memory res = self.read_sub_array(hdr, copy=False) res_j = self.read_sub_array(hdr, copy=False) return res + (res_j * 1j) return self.read_sub_array(hdr) def read_char_array(self, hdr): ''' latin-1 text matrix (char matrix) reader Parameters ---------- hdr : ``VarHeader4`` instance Returns ------- arr : ndarray with dtype 'U1', shape given by `hdr` ``dims`` ''' arr = self.read_sub_array(hdr).astype(np.uint8) S = arr.tostring().decode('latin-1') return np.ndarray(shape=hdr.dims, dtype=np.dtype('U1'), buffer=np.array(S)).copy() def read_sparse_array(self, hdr): ''' Read and return sparse matrix type Parameters ---------- hdr : ``VarHeader4`` instance Returns ------- arr : ``scipy.sparse.coo_matrix`` with dtype ``float`` and shape read from the sparse matrix data Notes ----- MATLAB 4 real sparse arrays are saved in a N+1 by 3 array format, where N is the number of non-zero values. Column 1 values [0:N] are the (1-based) row indices of the each non-zero value, column 2 [0:N] are the column indices, column 3 [0:N] are the (real) values. The last values [-1,0:2] of the rows, column indices are shape[0] and shape[1] respectively of the output matrix. The last value for the values column is a padding 0. mrows and ncols values from the header give the shape of the stored matrix, here [N+1, 3]. Complex data is saved as a 4 column matrix, where the fourth column contains the imaginary component; the last value is again 0. Complex sparse data do *not* have the header ``imagf`` field set to True; the fact that the data are complex is only detectable because there are 4 storage columns ''' res = self.read_sub_array(hdr) tmp = res[:-1,:] # All numbers are float64 in Matlab, but Scipy sparse expects int shape dims = (int(res[-1,0]), int(res[-1,1])) I = np.ascontiguousarray(tmp[:,0],dtype='intc') # fixes byte order also J = np.ascontiguousarray(tmp[:,1],dtype='intc') I -= 1 # for 1-based indexing J -= 1 if res.shape[1] == 3: V = np.ascontiguousarray(tmp[:,2],dtype='float') else: V = np.ascontiguousarray(tmp[:,2],dtype='complex') V.imag = tmp[:,3] return scipy.sparse.coo_matrix((V,(I,J)), dims) def shape_from_header(self, hdr): '''Read the shape of the array described by the header. The file position after this call is unspecified. ''' mclass = hdr.mclass if mclass == mxFULL_CLASS: shape = tuple(map(int, hdr.dims)) elif mclass == mxCHAR_CLASS: shape = tuple(map(int, hdr.dims)) if self.chars_as_strings: shape = shape[:-1] elif mclass == mxSPARSE_CLASS: dt = hdr.dtype dims = hdr.dims if not (len(dims) == 2 and dims[0] >= 1 and dims[1] >= 1): return () # Read only the row and column counts self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1) rows = np.ndarray(shape=(1,), dtype=dt, buffer=self.mat_stream.read(dt.itemsize)) self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1) cols = np.ndarray(shape=(1,), dtype=dt, buffer=self.mat_stream.read(dt.itemsize)) shape = (int(rows), int(cols)) else: raise TypeError('No reader for class code %s' % mclass) if self.squeeze_me: shape = tuple([x for x in shape if x != 1]) return shape class MatFile4Reader(MatFileReader): ''' Reader for Mat4 files ''' @docfiller def __init__(self, mat_stream, *args, **kwargs): ''' Initialize matlab 4 file reader %(matstream_arg)s %(load_args)s ''' super(MatFile4Reader, self).__init__(mat_stream, *args, **kwargs) self._matrix_reader = None def guess_byte_order(self): self.mat_stream.seek(0) mopt = read_dtype(self.mat_stream, np.dtype('i4')) self.mat_stream.seek(0) if mopt == 0: return '<' if mopt < 0 or mopt > 5000: # Number must have been byteswapped return SYS_LITTLE_ENDIAN and '>' or '<' # Not byteswapped return SYS_LITTLE_ENDIAN and '<' or '>' def initialize_read(self): ''' Run when beginning read of variables Sets up readers from parameters in `self` ''' self.dtypes = convert_dtypes(mdtypes_template, self.byte_order) self._matrix_reader = VarReader4(self) def read_var_header(self): ''' Read and return header, next position Parameters ---------- None Returns ------- header : object object that can be passed to self.read_var_array, and that has attributes ``name`` and ``is_global`` next_position : int position in stream of next variable ''' hdr = self._matrix_reader.read_header() n = reduce(lambda x, y: x*y, hdr.dims, 1) # fast product remaining_bytes = hdr.dtype.itemsize * n if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS: remaining_bytes *= 2 next_position = self.mat_stream.tell() + remaining_bytes return hdr, next_position def read_var_array(self, header, process=True): ''' Read array, given `header` Parameters ---------- header : header object object with fields defining variable header process : {True, False}, optional If True, apply recursive post-processing during loading of array. Returns ------- arr : array array with post-processing applied or not according to `process`. ''' return self._matrix_reader.array_from_header(header, process) def get_variables(self, variable_names=None): ''' get variables from stream as dictionary Parameters ---------- variable_names : None or str or sequence of str, optional variable name, or sequence of variable names to get from Mat file / file stream. If None, then get all variables in file ''' if isinstance(variable_names, string_types): variable_names = [variable_names] elif variable_names is not None: variable_names = list(variable_names) self.mat_stream.seek(0) # set up variable reader self.initialize_read() mdict = {} while not self.end_of_stream(): hdr, next_position = self.read_var_header() name = asstr(hdr.name) if variable_names is not None and name not in variable_names: self.mat_stream.seek(next_position) continue mdict[name] = self.read_var_array(hdr) self.mat_stream.seek(next_position) if variable_names is not None: variable_names.remove(name) if len(variable_names) == 0: break return mdict def list_variables(self): ''' list variables from stream ''' self.mat_stream.seek(0) # set up variable reader self.initialize_read() vars = [] while not self.end_of_stream(): hdr, next_position = self.read_var_header() name = asstr(hdr.name) shape = self._matrix_reader.shape_from_header(hdr) info = mclass_info.get(hdr.mclass, 'unknown') vars.append((name, shape, info)) self.mat_stream.seek(next_position) return vars def arr_to_2d(arr, oned_as='row'): ''' Make ``arr`` exactly two dimensional If `arr` has more than 2 dimensions, raise a ValueError Parameters ---------- arr : array oned_as : {'row', 'column'}, optional Whether to reshape 1D vectors as row vectors or column vectors. See documentation for ``matdims`` for more detail Returns ------- arr2d : array 2D version of the array ''' dims = matdims(arr, oned_as) if len(dims) > 2: raise ValueError('Matlab 4 files cannot save arrays with more than ' '2 dimensions') return arr.reshape(dims) class VarWriter4(object): def __init__(self, file_writer): self.file_stream = file_writer.file_stream self.oned_as = file_writer.oned_as def write_bytes(self, arr): self.file_stream.write(arr.tostring(order='F')) def write_string(self, s): self.file_stream.write(s) def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0): ''' Write header for given data options Parameters ---------- name : str name of variable shape : sequence Shape of array as it will be read in matlab P : int, optional code for mat4 data type, one of ``miDOUBLE, miSINGLE, miINT32, miINT16, miUINT16, miUINT8`` T : int, optional code for mat4 matrix class, one of ``mxFULL_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS`` imagf : int, optional flag indicating complex ''' header = np.empty((), mdtypes_template['header']) M = not SYS_LITTLE_ENDIAN O = 0 header['mopt'] = (M * 1000 + O * 100 + P * 10 + T) header['mrows'] = shape[0] header['ncols'] = shape[1] header['imagf'] = imagf header['namlen'] = len(name) + 1 self.write_bytes(header) self.write_string(asbytes(name + '\0')) def write(self, arr, name): ''' Write matrix `arr`, with name `name` Parameters ---------- arr : array_like array to write name : str name in matlab workspace ''' # we need to catch sparse first, because np.asarray returns an # an object array for scipy.sparse if scipy.sparse.issparse(arr): self.write_sparse(arr, name) return arr = np.asarray(arr) dt = arr.dtype if not dt.isnative: arr = arr.astype(dt.newbyteorder('=')) dtt = dt.type if dtt is np.object_: raise TypeError('Cannot save object arrays in Mat4') elif dtt is np.void: raise TypeError('Cannot save void type arrays') elif dtt in (np.unicode_, np.string_): self.write_char(arr, name) return self.write_numeric(arr, name) def write_numeric(self, arr, name): arr = arr_to_2d(arr, self.oned_as) imagf = arr.dtype.kind == 'c' try: P = np_to_mtypes[arr.dtype.str[1:]] except KeyError: if imagf: arr = arr.astype('c128') else: arr = arr.astype('f8') P = miDOUBLE self.write_header(name, arr.shape, P=P, T=mxFULL_CLASS, imagf=imagf) if imagf: self.write_bytes(arr.real) self.write_bytes(arr.imag) else: self.write_bytes(arr) def write_char(self, arr, name): arr = arr_to_chars(arr) arr = arr_to_2d(arr, self.oned_as) dims = arr.shape self.write_header( name, dims, P=miUINT8, T=mxCHAR_CLASS) if arr.dtype.kind == 'U': # Recode unicode to latin1 n_chars = np.product(dims) st_arr = np.ndarray(shape=(), dtype=arr_dtype_number(arr, n_chars), buffer=arr) st = st_arr.item().encode('latin-1') arr = np.ndarray(shape=dims, dtype='S1', buffer=st) self.write_bytes(arr) def write_sparse(self, arr, name): ''' Sparse matrices are 2D See docstring for VarReader4.read_sparse_array ''' A = arr.tocoo() # convert to sparse COO format (ijv) imagf = A.dtype.kind == 'c' ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8') ijv[:-1,0] = A.row ijv[:-1,1] = A.col ijv[:-1,0:2] += 1 # 1 based indexing if imagf: ijv[:-1,2] = A.data.real ijv[:-1,3] = A.data.imag else: ijv[:-1,2] = A.data ijv[-1,0:2] = A.shape self.write_header( name, ijv.shape, P=miDOUBLE, T=mxSPARSE_CLASS) self.write_bytes(ijv) class MatFile4Writer(object): ''' Class for writing matlab 4 format files ''' def __init__(self, file_stream, oned_as=None): self.file_stream = file_stream if oned_as is None: oned_as = 'row' self.oned_as = oned_as self._matrix_writer = None def put_variables(self, mdict, write_header=None): ''' Write variables in `mdict` to stream Parameters ---------- mdict : mapping mapping with method ``items`` return name, contents pairs where ``name`` which will appeak in the matlab workspace in file load, and ``contents`` is something writeable to a matlab file, such as a numpy array. write_header : {None, True, False} If True, then write the matlab file header before writing the variables. If None (the default) then write the file header if we are at position 0 in the stream. By setting False here, and setting the stream position to the end of the file, you can append variables to a matlab file ''' # there is no header for a matlab 4 mat file, so we ignore the # ``write_header`` input argument. It's there for compatibility # with the matlab 5 version of this method self._matrix_writer = VarWriter4(self) for name, var in mdict.items(): self._matrix_writer.write(var, name)
20,384
31.932149
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/__init__.py
""" Utilities for dealing with MATLAB(R) files Notes ----- MATLAB(R) is a registered trademark of The MathWorks, Inc., 3 Apple Hill Drive, Natick, MA 01760-2098, USA. """ from __future__ import division, print_function, absolute_import # Matlab file read and write utilities from .mio import loadmat, savemat, whosmat from . import byteordercodes __all__ = ['loadmat', 'savemat', 'whosmat', 'byteordercodes'] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
508
23.238095
72
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/miobase.py
# Authors: Travis Oliphant, Matthew Brett """ Base classes for MATLAB file stream reading. MATLAB is a registered trademark of the Mathworks inc. """ from __future__ import division, print_function, absolute_import import sys import operator from scipy._lib.six import reduce import numpy as np if sys.version_info[0] >= 3: byteord = int else: byteord = ord from scipy.misc import doccer from . import byteordercodes as boc class MatReadError(Exception): pass class MatWriteError(Exception): pass class MatReadWarning(UserWarning): pass doc_dict = \ {'file_arg': '''file_name : str Name of the mat file (do not need .mat extension if appendmat==True) Can also pass open file-like object.''', 'append_arg': '''appendmat : bool, optional True to append the .mat extension to the end of the given filename, if not already present.''', 'load_args': '''byte_order : str or None, optional None by default, implying byte order guessed from mat file. Otherwise can be one of ('native', '=', 'little', '<', 'BIG', '>'). mat_dtype : bool, optional If True, return arrays in same dtype as would be loaded into MATLAB (instead of the dtype with which they are saved). squeeze_me : bool, optional Whether to squeeze unit matrix dimensions or not. chars_as_strings : bool, optional Whether to convert char arrays to string arrays. matlab_compatible : bool, optional Returns matrices as would be loaded by MATLAB (implies squeeze_me=False, chars_as_strings=False, mat_dtype=True, struct_as_record=True).''', 'struct_arg': '''struct_as_record : bool, optional Whether to load MATLAB structs as numpy record arrays, or as old-style numpy arrays with dtype=object. Setting this flag to False replicates the behavior of scipy version 0.7.x (returning numpy object arrays). The default setting is True, because it allows easier round-trip load and save of MATLAB files.''', 'matstream_arg': '''mat_stream : file-like Object with file API, open for reading.''', 'long_fields': '''long_field_names : bool, optional * False - maximum field name length in a structure is 31 characters which is the documented maximum length. This is the default. * True - maximum field name length in a structure is 63 characters which works for MATLAB 7.6''', 'do_compression': '''do_compression : bool, optional Whether to compress matrices on write. Default is False.''', 'oned_as': '''oned_as : {'row', 'column'}, optional If 'column', write 1-D numpy arrays as column vectors. If 'row', write 1D numpy arrays as row vectors.''', 'unicode_strings': '''unicode_strings : bool, optional If True, write strings as Unicode, else MATLAB usual encoding.'''} docfiller = doccer.filldoc(doc_dict) ''' Note on architecture ====================== There are three sets of parameters relevant for reading files. The first are *file read parameters* - containing options that are common for reading the whole file, and therefore every variable within that file. At the moment these are: * mat_stream * dtypes (derived from byte code) * byte_order * chars_as_strings * squeeze_me * struct_as_record (MATLAB 5 files) * class_dtypes (derived from order code, MATLAB 5 files) * codecs (MATLAB 5 files) * uint16_codec (MATLAB 5 files) Another set of parameters are those that apply only to the current variable being read - the *header*: * header related variables (different for v4 and v5 mat files) * is_complex * mclass * var_stream With the header, we need ``next_position`` to tell us where the next variable in the stream is. Then, for each element in a matrix, there can be *element read parameters*. An element is, for example, one element in a MATLAB cell array. At the moment these are: * mat_dtype The file-reading object contains the *file read parameters*. The *header* is passed around as a data object, or may be read and discarded in a single function. The *element read parameters* - the mat_dtype in this instance, is passed into a general post-processing function - see ``mio_utils`` for details. ''' def convert_dtypes(dtype_template, order_code): ''' Convert dtypes in mapping to given order Parameters ---------- dtype_template : mapping mapping with values returning numpy dtype from ``np.dtype(val)`` order_code : str an order code suitable for using in ``dtype.newbyteorder()`` Returns ------- dtypes : mapping mapping where values have been replaced by ``np.dtype(val).newbyteorder(order_code)`` ''' dtypes = dtype_template.copy() for k in dtypes: dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code) return dtypes def read_dtype(mat_stream, a_dtype): """ Generic get of byte stream data of known type Parameters ---------- mat_stream : file_like object MATLAB (tm) mat file stream a_dtype : dtype dtype of array to read. `a_dtype` is assumed to be correct endianness. Returns ------- arr : ndarray Array of dtype `a_dtype` read from stream. """ num_bytes = a_dtype.itemsize arr = np.ndarray(shape=(), dtype=a_dtype, buffer=mat_stream.read(num_bytes), order='F') return arr def get_matfile_version(fileobj): """ Return major, minor tuple depending on apparent mat file type Where: #. 0,x -> version 4 format mat files #. 1,x -> version 5 format mat files #. 2,x -> version 7.3 format mat files (HDF format) Parameters ---------- fileobj : file_like object implementing seek() and read() Returns ------- major_version : {0, 1, 2} major MATLAB File format version minor_version : int minor MATLAB file format version Raises ------ MatReadError If the file is empty. ValueError The matfile version is unknown. Notes ----- Has the side effect of setting the file read pointer to 0 """ # Mat4 files have a zero somewhere in first 4 bytes fileobj.seek(0) mopt_bytes = fileobj.read(4) if len(mopt_bytes) == 0: raise MatReadError("Mat file appears to be empty") mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=mopt_bytes) if 0 in mopt_ints: fileobj.seek(0) return (0,0) # For 5 format or 7.3 format we need to read an integer in the # header. Bytes 124 through 128 contain a version integer and an # endian test string fileobj.seek(124) tst_str = fileobj.read(4) fileobj.seek(0) maj_ind = int(tst_str[2] == b'I'[0]) maj_val = byteord(tst_str[maj_ind]) min_val = byteord(tst_str[1-maj_ind]) ret = (maj_val, min_val) if maj_val in (1, 2): return ret raise ValueError('Unknown mat file type, version %s, %s' % ret) def matdims(arr, oned_as='column'): """ Determine equivalent MATLAB dimensions for given array Parameters ---------- arr : ndarray Input array oned_as : {'column', 'row'}, optional Whether 1-D arrays are returned as MATLAB row or column matrices. Default is 'column'. Returns ------- dims : tuple Shape tuple, in the form MATLAB expects it. Notes ----- We had to decide what shape a 1 dimensional array would be by default. ``np.atleast_2d`` thinks it is a row vector. The default for a vector in MATLAB (e.g. ``>> 1:12``) is a row vector. Versions of scipy up to and including 0.11 resulted (accidentally) in 1-D arrays being read as column vectors. For the moment, we maintain the same tradition here. Examples -------- >>> matdims(np.array(1)) # numpy scalar (1, 1) >>> matdims(np.array([1])) # 1d array, 1 element (1, 1) >>> matdims(np.array([1,2])) # 1d array, 2 elements (2, 1) >>> matdims(np.array([[2],[3]])) # 2d array, column vector (2, 1) >>> matdims(np.array([[2,3]])) # 2d array, row vector (1, 2) >>> matdims(np.array([[[2,3]]])) # 3d array, rowish vector (1, 1, 2) >>> matdims(np.array([])) # empty 1d array (0, 0) >>> matdims(np.array([[]])) # empty 2d (0, 0) >>> matdims(np.array([[[]]])) # empty 3d (0, 0, 0) Optional argument flips 1-D shape behavior. >>> matdims(np.array([1,2]), 'row') # 1d array, 2 elements (1, 2) The argument has to make sense though >>> matdims(np.array([1,2]), 'bizarre') Traceback (most recent call last): ... ValueError: 1D option "bizarre" is strange """ shape = arr.shape if shape == (): # scalar return (1,1) if reduce(operator.mul, shape) == 0: # zero elememts return (0,) * np.max([arr.ndim, 2]) if len(shape) == 1: # 1D if oned_as == 'column': return shape + (1,) elif oned_as == 'row': return (1,) + shape else: raise ValueError('1D option "%s" is strange' % oned_as) return shape class MatVarReader(object): ''' Abstract class defining required interface for var readers''' def __init__(self, file_reader): pass def read_header(self): ''' Returns header ''' pass def array_from_header(self, header): ''' Reads array given header ''' pass class MatFileReader(object): """ Base object for reading mat files To make this class functional, you will need to override the following methods: matrix_getter_factory - gives object to fetch next matrix from stream guess_byte_order - guesses file byte order from file """ @docfiller def __init__(self, mat_stream, byte_order=None, mat_dtype=False, squeeze_me=False, chars_as_strings=True, matlab_compatible=False, struct_as_record=True, verify_compressed_data_integrity=True ): ''' Initializer for mat file reader mat_stream : file-like object with file API, open for reading %(load_args)s ''' # Initialize stream self.mat_stream = mat_stream self.dtypes = {} if not byte_order: byte_order = self.guess_byte_order() else: byte_order = boc.to_numpy_code(byte_order) self.byte_order = byte_order self.struct_as_record = struct_as_record if matlab_compatible: self.set_matlab_compatible() else: self.squeeze_me = squeeze_me self.chars_as_strings = chars_as_strings self.mat_dtype = mat_dtype self.verify_compressed_data_integrity = verify_compressed_data_integrity def set_matlab_compatible(self): ''' Sets options to return arrays as MATLAB loads them ''' self.mat_dtype = True self.squeeze_me = False self.chars_as_strings = False def guess_byte_order(self): ''' As we do not know what file type we have, assume native ''' return boc.native_code def end_of_stream(self): b = self.mat_stream.read(1) curpos = self.mat_stream.tell() self.mat_stream.seek(curpos-1) return len(b) == 0 def arr_dtype_number(arr, num): ''' Return dtype for given number of items per element''' return np.dtype(arr.dtype.str[:2] + str(num)) def arr_to_chars(arr): ''' Convert string array to char array ''' dims = list(arr.shape) if not dims: dims = [1] dims.append(int(arr.dtype.str[2:])) arr = np.ndarray(shape=dims, dtype=arr_dtype_number(arr, 1), buffer=arr) empties = [arr == ''] if not np.any(empties): return arr arr = arr.copy() arr[empties] = ' ' return arr
12,083
28.048077
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/test_byteordercodes.py
''' Tests for byteorder module ''' from __future__ import division, print_function, absolute_import import sys from numpy.testing import assert_ from pytest import raises as assert_raises import scipy.io.matlab.byteordercodes as sibc def test_native(): native_is_le = sys.byteorder == 'little' assert_(sibc.sys_is_le == native_is_le) def test_to_numpy(): if sys.byteorder == 'little': assert_(sibc.to_numpy_code('native') == '<') assert_(sibc.to_numpy_code('swapped') == '>') else: assert_(sibc.to_numpy_code('native') == '>') assert_(sibc.to_numpy_code('swapped') == '<') assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('=')) assert_(sibc.to_numpy_code('big') == '>') for code in ('little', '<', 'l', 'L', 'le'): assert_(sibc.to_numpy_code(code) == '<') for code in ('big', '>', 'b', 'B', 'be'): assert_(sibc.to_numpy_code(code) == '>') assert_raises(ValueError, sibc.to_numpy_code, 'silly string')
1,003
30.375
68
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/test_streams.py
""" Testing """ from __future__ import division, print_function, absolute_import import os import sys import zlib from io import BytesIO if sys.version_info[0] >= 3: cStringIO = BytesIO else: from cStringIO import StringIO as cStringIO from tempfile import mkstemp from contextlib import contextmanager import numpy as np from numpy.testing import assert_, assert_equal from pytest import raises as assert_raises from scipy.io.matlab.streams import (make_stream, GenericStream, cStringStream, FileStream, ZlibInputStream, _read_into, _read_string) IS_PYPY = ('__pypy__' in sys.modules) @contextmanager def setup_test_file(): val = b'a\x00string' fd, fname = mkstemp() with os.fdopen(fd, 'wb') as fs: fs.write(val) with open(fname, 'rb') as fs: gs = BytesIO(val) cs = cStringIO(val) yield fs, gs, cs os.unlink(fname) def test_make_stream(): with setup_test_file() as (fs, gs, cs): # test stream initialization assert_(isinstance(make_stream(gs), GenericStream)) if sys.version_info[0] < 3 and not IS_PYPY: assert_(isinstance(make_stream(cs), cStringStream)) assert_(isinstance(make_stream(fs), FileStream)) def test_tell_seek(): with setup_test_file() as (fs, gs, cs): for s in (fs, gs, cs): st = make_stream(s) res = st.seek(0) assert_equal(res, 0) assert_equal(st.tell(), 0) res = st.seek(5) assert_equal(res, 0) assert_equal(st.tell(), 5) res = st.seek(2, 1) assert_equal(res, 0) assert_equal(st.tell(), 7) res = st.seek(-2, 2) assert_equal(res, 0) assert_equal(st.tell(), 6) def test_read(): with setup_test_file() as (fs, gs, cs): for s in (fs, gs, cs): st = make_stream(s) st.seek(0) res = st.read(-1) assert_equal(res, b'a\x00string') st.seek(0) res = st.read(4) assert_equal(res, b'a\x00st') # read into st.seek(0) res = _read_into(st, 4) assert_equal(res, b'a\x00st') res = _read_into(st, 4) assert_equal(res, b'ring') assert_raises(IOError, _read_into, st, 2) # read alloc st.seek(0) res = _read_string(st, 4) assert_equal(res, b'a\x00st') res = _read_string(st, 4) assert_equal(res, b'ring') assert_raises(IOError, _read_string, st, 2) class TestZlibInputStream(object): def _get_data(self, size): data = np.random.randint(0, 256, size).astype(np.uint8).tostring() compressed_data = zlib.compress(data) stream = BytesIO(compressed_data) return stream, len(compressed_data), data def test_read(self): block_size = 131072 SIZES = [0, 1, 10, block_size//2, block_size-1, block_size, block_size+1, 2*block_size-1] READ_SIZES = [block_size//2, block_size-1, block_size, block_size+1] def check(size, read_size): compressed_stream, compressed_data_len, data = self._get_data(size) stream = ZlibInputStream(compressed_stream, compressed_data_len) data2 = b'' so_far = 0 while True: block = stream.read(min(read_size, size - so_far)) if not block: break so_far += len(block) data2 += block assert_equal(data, data2) for size in SIZES: for read_size in READ_SIZES: check(size, read_size) def test_read_max_length(self): size = 1234 data = np.random.randint(0, 256, size).astype(np.uint8).tostring() compressed_data = zlib.compress(data) compressed_stream = BytesIO(compressed_data + b"abbacaca") stream = ZlibInputStream(compressed_stream, len(compressed_data)) stream.read(len(data)) assert_equal(compressed_stream.tell(), len(compressed_data)) assert_raises(IOError, stream.read, 1) def test_seek(self): compressed_stream, compressed_data_len, data = self._get_data(1024) stream = ZlibInputStream(compressed_stream, compressed_data_len) stream.seek(123) p = 123 assert_equal(stream.tell(), p) d1 = stream.read(11) assert_equal(d1, data[p:p+11]) stream.seek(321, 1) p = 123+11+321 assert_equal(stream.tell(), p) d2 = stream.read(21) assert_equal(d2, data[p:p+21]) stream.seek(641, 0) p = 641 assert_equal(stream.tell(), p) d3 = stream.read(11) assert_equal(d3, data[p:p+11]) assert_raises(IOError, stream.seek, 10, 2) assert_raises(IOError, stream.seek, -1, 1) assert_raises(ValueError, stream.seek, 1, 123) stream.seek(10000, 1) assert_raises(IOError, stream.read, 12) def test_all_data_read(self): compressed_stream, compressed_data_len, data = self._get_data(1024) stream = ZlibInputStream(compressed_stream, compressed_data_len) assert_(not stream.all_data_read()) stream.seek(512) assert_(not stream.all_data_read()) stream.seek(1024) assert_(stream.all_data_read())
5,515
28.816216
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/afunc.m
function [a, b] = afunc(c, d) % A function a = c + 1; b = d + 10;
66
12.4
29
m
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/test_miobase.py
""" Testing miobase module """ import numpy as np from numpy.testing import assert_equal from pytest import raises as assert_raises from scipy.io.matlab.miobase import matdims def test_matdims(): # Test matdims dimension finder assert_equal(matdims(np.array(1)), (1, 1)) # numpy scalar assert_equal(matdims(np.array([1])), (1, 1)) # 1d array, 1 element assert_equal(matdims(np.array([1,2])), (2, 1)) # 1d array, 2 elements assert_equal(matdims(np.array([[2],[3]])), (2, 1)) # 2d array, column vector assert_equal(matdims(np.array([[2,3]])), (1, 2)) # 2d array, row vector # 3d array, rowish vector assert_equal(matdims(np.array([[[2,3]]])), (1, 1, 2)) assert_equal(matdims(np.array([])), (0, 0)) # empty 1d array assert_equal(matdims(np.array([[]])), (0, 0)) # empty 2d assert_equal(matdims(np.array([[[]]])), (0, 0, 0)) # empty 3d # Optional argument flips 1-D shape behavior. assert_equal(matdims(np.array([1,2]), 'row'), (1, 2)) # 1d array, 2 elements # The argument has to make sense though assert_raises(ValueError, matdims, np.array([1,2]), 'bizarre') # Check empty sparse matrices get their own shape from scipy.sparse import csr_matrix, csc_matrix assert_equal(matdims(csr_matrix(np.zeros((3, 3)))), (3, 3)) assert_equal(matdims(csc_matrix(np.zeros((2, 2)))), (2, 2))
1,366
41.71875
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/save_matfile.m
function save_matfile(test_name, v) % saves variable passed in m with filename from prefix global FILEPREFIX FILESUFFIX eval([test_name ' = v;']); save([FILEPREFIX test_name FILESUFFIX], test_name)
200
32.5
54
m
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/gen_mat5files.m
% Generates mat files for loadmat unit tests % This is the version for matlab 5 and higher % Uses save_matfile.m function % work out matlab version and file suffix for test files global FILEPREFIX FILESUFFIX FILEPREFIX = [fullfile(pwd, 'data') filesep]; temp = ver('MATLAB'); mlv = temp.Version; FILESUFFIX = ['_' mlv '_' computer '.mat']; % basic double array theta = 0:pi/4:2*pi; save_matfile('testdouble', theta); % string save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.') % complex save_matfile('testcomplex', cos(theta) + 1j*sin(theta)); % asymmetric array to check indexing a = zeros(3, 5); a(:,1) = [1:3]'; a(1,:) = 1:5; % 2D matrix save_matfile('testmatrix', a); % minus number - tests signed int save_matfile('testminus', -1); % single character save_matfile('testonechar', 'r'); % string array save_matfile('teststringarray', ['one '; 'two '; 'three']); % sparse array save_matfile('testsparse', sparse(a)); % sparse complex array b = sparse(a); b(1,1) = b(1,1) + j; save_matfile('testsparsecomplex', b); % Two variables in same file save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') % struct save_matfile('teststruct', ... struct('stringfield','Rats live on no evil star.',... 'doublefield',[sqrt(2) exp(1) pi],... 'complexfield',(1+1j)*[sqrt(2) exp(1) pi])); % cell save_matfile('testcell', ... {['This cell contains this string and 3 arrays of increasing' ... ' length'], 1., 1.:2., 1.:3.}); % scalar cell save_matfile('testscalarcell', {1}) % Empty cells in two cell matrices save_matfile('testemptycell', {1, 2, [], [], 3}); % 3D matrix save_matfile('test3dmatrix', reshape(1:24,[2 3 4])) % nested cell array save_matfile('testcellnest', {1, {2, 3, {4, 5}}}); % nested struct save_matfile('teststructnest', struct('one', 1, 'two', ... struct('three', 'number 3'))); % array of struct save_matfile('teststructarr', [struct('one', 1, 'two', 2) ... struct('one', 'number 1', 'two', 'number 2')]); % matlab object save_matfile('testobject', inline('x')) % array of matlab objects %save_matfile('testobjarr', [inline('x') inline('x')]) % unicode test if str2num(mlv) > 7 % function added 7.0.1 fid = fopen([FILEPREFIX 'japanese_utf8.txt']); from_japan = fread(fid, 'uint8')'; fclose(fid); save_matfile('testunicode', native2unicode(from_japan, 'utf-8')); end % func if str2num(mlv) > 7 % function pointers added recently func = @afunc; save_matfile('testfunc', func); end
2,485
23.86
73
m
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/test_mio_funcs.py
''' Jottings to work out format for __function_workspace__ matrix at end of mat file. ''' from __future__ import division, print_function, absolute_import import os.path import sys import io from numpy.compat import asstr from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader, MatlabFunction) test_data_path = os.path.join(os.path.dirname(__file__), 'data') def read_minimat_vars(rdr): rdr.initialize_read() mdict = {'__globals__': []} i = 0 while not rdr.end_of_stream(): hdr, next_position = rdr.read_var_header() name = asstr(hdr.name) if name == '': name = 'var_%d' % i i += 1 res = rdr.read_var_array(hdr, process=False) rdr.mat_stream.seek(next_position) mdict[name] = res if hdr.is_global: mdict['__globals__'].append(name) return mdict def read_workspace_vars(fname): fp = open(fname, 'rb') rdr = MatFile5Reader(fp, struct_as_record=True) vars = rdr.get_variables() fws = vars['__function_workspace__'] ws_bs = io.BytesIO(fws.tostring()) ws_bs.seek(2) rdr.mat_stream = ws_bs # Guess byte order. mi = rdr.mat_stream.read(2) rdr.byte_order = mi == b'IM' and '<' or '>' rdr.mat_stream.read(4) # presumably byte padding mdict = read_minimat_vars(rdr) fp.close() return mdict def test_jottings(): # example fname = os.path.join(test_data_path, 'parabola.mat') ws_vars = read_workspace_vars(fname)
1,551
25.758621
72
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/test_mio_utils.py
""" Testing """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_array_equal, assert_array_almost_equal, \ assert_ from scipy.io.matlab.mio_utils import squeeze_element, chars_to_strings def test_squeeze_element(): a = np.zeros((1,3)) assert_array_equal(np.squeeze(a), squeeze_element(a)) # 0d output from squeeze gives scalar sq_int = squeeze_element(np.zeros((1,1), dtype=float)) assert_(isinstance(sq_int, float)) # Unless it's a structured array sq_sa = squeeze_element(np.zeros((1,1),dtype=[('f1', 'f')])) assert_(isinstance(sq_sa, np.ndarray)) def test_chars_strings(): # chars as strings strings = ['learn ', 'python', 'fast ', 'here '] str_arr = np.array(strings, dtype='U6') # shape (4,) chars = [list(s) for s in strings] char_arr = np.array(chars, dtype='U1') # shape (4,6) assert_array_equal(chars_to_strings(char_arr), str_arr) ca2d = char_arr.reshape((2,2,6)) sa2d = str_arr.reshape((2,2)) assert_array_equal(chars_to_strings(ca2d), sa2d) ca3d = char_arr.reshape((1,2,2,6)) sa3d = str_arr.reshape((1,2,2)) assert_array_equal(chars_to_strings(ca3d), sa3d) # Fortran ordered arrays char_arrf = np.array(chars, dtype='U1', order='F') # shape (4,6) assert_array_equal(chars_to_strings(char_arrf), str_arr) # empty array arr = np.array([['']], dtype='U1') out_arr = np.array([''], dtype='U1') assert_array_equal(chars_to_strings(arr), out_arr)
1,549
31.978723
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/test_mio5_utils.py
""" Testing mio5_utils Cython module """ from __future__ import division, print_function, absolute_import import sys from io import BytesIO cStringIO = BytesIO import numpy as np from numpy.testing import assert_array_equal, assert_equal, assert_ from pytest import raises as assert_raises from scipy._lib.six import u import scipy.io.matlab.byteordercodes as boc import scipy.io.matlab.streams as streams import scipy.io.matlab.mio5_params as mio5p import scipy.io.matlab.mio5_utils as m5u def test_byteswap(): for val in ( 1, 0x100, 0x10000): a = np.array(val, dtype=np.uint32) b = a.byteswap() c = m5u.byteswap_u4(a) assert_equal(b.item(), c) d = m5u.byteswap_u4(c) assert_equal(a.item(), d) def _make_tag(base_dt, val, mdtype, sde=False): ''' Makes a simple matlab tag, full or sde ''' base_dt = np.dtype(base_dt) bo = boc.to_numpy_code(base_dt.byteorder) byte_count = base_dt.itemsize if not sde: udt = bo + 'u4' padding = 8 - (byte_count % 8) all_dt = [('mdtype', udt), ('byte_count', udt), ('val', base_dt)] if padding: all_dt.append(('padding', 'u1', padding)) else: # is sde udt = bo + 'u2' padding = 4-byte_count if bo == '<': # little endian all_dt = [('mdtype', udt), ('byte_count', udt), ('val', base_dt)] else: # big endian all_dt = [('byte_count', udt), ('mdtype', udt), ('val', base_dt)] if padding: all_dt.append(('padding', 'u1', padding)) tag = np.zeros((1,), dtype=all_dt) tag['mdtype'] = mdtype tag['byte_count'] = byte_count tag['val'] = val return tag def _write_stream(stream, *strings): stream.truncate(0) stream.seek(0) for s in strings: stream.write(s) stream.seek(0) def _make_readerlike(stream, byte_order=boc.native_code): class R(object): pass r = R() r.mat_stream = stream r.byte_order = byte_order r.struct_as_record = True r.uint16_codec = sys.getdefaultencoding() r.chars_as_strings = False r.mat_dtype = False r.squeeze_me = False return r def test_read_tag(): # mainly to test errors # make reader-like thing str_io = BytesIO() r = _make_readerlike(str_io) c_reader = m5u.VarReader5(r) # This works for StringIO but _not_ cStringIO assert_raises(IOError, c_reader.read_tag) # bad SDE tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) tag['byte_count'] = 5 _write_stream(str_io, tag.tostring()) assert_raises(ValueError, c_reader.read_tag) def test_read_stream(): tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) tag_str = tag.tostring() str_io = cStringIO(tag_str) st = streams.make_stream(str_io) s = streams._read_into(st, tag.itemsize) assert_equal(s, tag.tostring()) def test_read_numeric(): # make reader-like thing str_io = cStringIO() r = _make_readerlike(str_io) # check simplest of tags for base_dt, val, mdtype in (('u2', 30, mio5p.miUINT16), ('i4', 1, mio5p.miINT32), ('i2', -1, mio5p.miINT16)): for byte_code in ('<', '>'): r.byte_order = byte_code c_reader = m5u.VarReader5(r) assert_equal(c_reader.little_endian, byte_code == '<') assert_equal(c_reader.is_swapped, byte_code != boc.native_code) for sde_f in (False, True): dt = np.dtype(base_dt).newbyteorder(byte_code) a = _make_tag(dt, val, mdtype, sde_f) a_str = a.tostring() _write_stream(str_io, a_str) el = c_reader.read_numeric() assert_equal(el, val) # two sequential reads _write_stream(str_io, a_str, a_str) el = c_reader.read_numeric() assert_equal(el, val) el = c_reader.read_numeric() assert_equal(el, val) def test_read_numeric_writeable(): # make reader-like thing str_io = cStringIO() r = _make_readerlike(str_io, '<') c_reader = m5u.VarReader5(r) dt = np.dtype('<u2') a = _make_tag(dt, 30, mio5p.miUINT16, 0) a_str = a.tostring() _write_stream(str_io, a_str) el = c_reader.read_numeric() assert_(el.flags.writeable is True) def test_zero_byte_string(): # Tests hack to allow chars of non-zero length, but 0 bytes # make reader-like thing str_io = cStringIO() r = _make_readerlike(str_io, boc.native_code) c_reader = m5u.VarReader5(r) tag_dt = np.dtype([('mdtype', 'u4'), ('byte_count', 'u4')]) tag = np.zeros((1,), dtype=tag_dt) tag['mdtype'] = mio5p.miINT8 tag['byte_count'] = 1 hdr = m5u.VarHeader5() # Try when string is 1 length hdr.set_dims([1,]) _write_stream(str_io, tag.tostring() + b' ') str_io.seek(0) val = c_reader.read_char(hdr) assert_equal(val, u(' ')) # Now when string has 0 bytes 1 length tag['byte_count'] = 0 _write_stream(str_io, tag.tostring()) str_io.seek(0) val = c_reader.read_char(hdr) assert_equal(val, u(' ')) # Now when string has 0 bytes 4 length str_io.seek(0) hdr.set_dims([4,]) val = c_reader.read_char(hdr) assert_array_equal(val, [u(' ')] * 4)
5,536
28.768817
75
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/test_mio.py
# -*- coding: latin-1 -*- ''' Nose test generators Need function load / save / roundtrip tests ''' from __future__ import division, print_function, absolute_import import os from collections import OrderedDict from os.path import join as pjoin, dirname from glob import glob from io import BytesIO from tempfile import mkdtemp from scipy._lib.six import u, text_type, string_types import warnings import shutil import gzip from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_equal, assert_) from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings import numpy as np from numpy import array import scipy.sparse as SP import scipy.io.matlab.byteordercodes as boc from scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError from scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat) from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader, MatlabFunction, varmats_from_mat, to_writeable, EmptyStructMarker) from scipy.io.matlab import mio5_params as mio5p test_data_path = pjoin(dirname(__file__), 'data') def mlarr(*args, **kwargs): """Convenience function to return matlab-compatible 2D array.""" arr = np.array(*args, **kwargs) arr.shape = matdims(arr) return arr # Define cases to test theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9) case_table4 = [ {'name': 'double', 'classes': {'testdouble': 'double'}, 'expected': {'testdouble': theta} }] case_table4.append( {'name': 'string', 'classes': {'teststring': 'char'}, 'expected': {'teststring': array([u('"Do nine men interpret?" "Nine men," I nod.')])} }) case_table4.append( {'name': 'complex', 'classes': {'testcomplex': 'double'}, 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)} }) A = np.zeros((3,5)) A[0] = list(range(1,6)) A[:,0] = list(range(1,4)) case_table4.append( {'name': 'matrix', 'classes': {'testmatrix': 'double'}, 'expected': {'testmatrix': A}, }) case_table4.append( {'name': 'sparse', 'classes': {'testsparse': 'sparse'}, 'expected': {'testsparse': SP.coo_matrix(A)}, }) B = A.astype(complex) B[0,0] += 1j case_table4.append( {'name': 'sparsecomplex', 'classes': {'testsparsecomplex': 'sparse'}, 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, }) case_table4.append( {'name': 'multi', 'classes': {'theta': 'double', 'a': 'double'}, 'expected': {'theta': theta, 'a': A}, }) case_table4.append( {'name': 'minus', 'classes': {'testminus': 'double'}, 'expected': {'testminus': mlarr(-1)}, }) case_table4.append( {'name': 'onechar', 'classes': {'testonechar': 'char'}, 'expected': {'testonechar': array([u('r')])}, }) # Cell arrays stored as object arrays CA = mlarr(( # tuple for object array creation [], mlarr([1]), mlarr([[1,2]]), mlarr([[1,2,3]])), dtype=object).reshape(1,-1) CA[0,0] = array( [u('This cell contains this string and 3 arrays of increasing length')]) case_table5 = [ {'name': 'cell', 'classes': {'testcell': 'cell'}, 'expected': {'testcell': CA}}] CAE = mlarr(( # tuple for object array creation mlarr(1), mlarr(2), mlarr([]), mlarr([]), mlarr(3)), dtype=object).reshape(1,-1) objarr = np.empty((1,1),dtype=object) objarr[0,0] = mlarr(1) case_table5.append( {'name': 'scalarcell', 'classes': {'testscalarcell': 'cell'}, 'expected': {'testscalarcell': objarr} }) case_table5.append( {'name': 'emptycell', 'classes': {'testemptycell': 'cell'}, 'expected': {'testemptycell': CAE}}) case_table5.append( {'name': 'stringarray', 'classes': {'teststringarray': 'char'}, 'expected': {'teststringarray': array( [u('one '), u('two '), u('three')])}, }) case_table5.append( {'name': '3dmatrix', 'classes': {'test3dmatrix': 'double'}, 'expected': { 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))} }) st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3) dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']] st1 = np.zeros((1,1), dtype) st1['stringfield'][0,0] = array([u('Rats live on no evil star.')]) st1['doublefield'][0,0] = st_sub_arr st1['complexfield'][0,0] = st_sub_arr * (1 + 1j) case_table5.append( {'name': 'struct', 'classes': {'teststruct': 'struct'}, 'expected': {'teststruct': st1} }) CN = np.zeros((1,2), dtype=object) CN[0,0] = mlarr(1) CN[0,1] = np.zeros((1,3), dtype=object) CN[0,1][0,0] = mlarr(2, dtype=np.uint8) CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8) CN[0,1][0,2] = np.zeros((1,2), dtype=object) CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8) CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8) case_table5.append( {'name': 'cellnest', 'classes': {'testcellnest': 'cell'}, 'expected': {'testcellnest': CN}, }) st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']]) st2[0,0]['one'] = mlarr(1) st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)]) st2[0,0]['two'][0,0]['three'] = array([u('number 3')]) case_table5.append( {'name': 'structnest', 'classes': {'teststructnest': 'struct'}, 'expected': {'teststructnest': st2} }) a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']]) a[0,0]['one'] = mlarr(1) a[0,0]['two'] = mlarr(2) a[0,1]['one'] = array([u('number 1')]) a[0,1]['two'] = array([u('number 2')]) case_table5.append( {'name': 'structarr', 'classes': {'teststructarr': 'struct'}, 'expected': {'teststructarr': a} }) ODT = np.dtype([(n, object) for n in ['expr', 'inputExpr', 'args', 'isEmpty', 'numArgs', 'version']]) MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline') m0 = MO[0,0] m0['expr'] = array([u('x')]) m0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')]) m0['args'] = array([u('x')]) m0['isEmpty'] = mlarr(0) m0['numArgs'] = mlarr(1) m0['version'] = mlarr(1) case_table5.append( {'name': 'object', 'classes': {'testobject': 'object'}, 'expected': {'testobject': MO} }) fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb') u_str = fp_u_str.read().decode('utf-8') fp_u_str.close() case_table5.append( {'name': 'unicode', 'classes': {'testunicode': 'char'}, 'expected': {'testunicode': array([u_str])} }) case_table5.append( {'name': 'sparse', 'classes': {'testsparse': 'sparse'}, 'expected': {'testsparse': SP.coo_matrix(A)}, }) case_table5.append( {'name': 'sparsecomplex', 'classes': {'testsparsecomplex': 'sparse'}, 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, }) case_table5.append( {'name': 'bool', 'classes': {'testbools': 'logical'}, 'expected': {'testbools': array([[True], [False]])}, }) case_table5_rt = case_table5[:] # Inline functions can't be concatenated in matlab, so RT only case_table5_rt.append( {'name': 'objectarray', 'classes': {'testobjectarray': 'object'}, 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}}) def types_compatible(var1, var2): """Check if types are same or compatible. 0-D numpy scalars are compatible with bare python scalars. """ type1 = type(var1) type2 = type(var2) if type1 is type2: return True if type1 is np.ndarray and var1.shape == (): return type(var1.item()) is type2 if type2 is np.ndarray and var2.shape == (): return type(var2.item()) is type1 return False def _check_level(label, expected, actual): """ Check one level of a potentially nested array """ if SP.issparse(expected): # allow different types of sparse matrices assert_(SP.issparse(actual)) assert_array_almost_equal(actual.todense(), expected.todense(), err_msg=label, decimal=5) return # Check types are as expected assert_(types_compatible(expected, actual), "Expected type %s, got %s at %s" % (type(expected), type(actual), label)) # A field in a record array may not be an ndarray # A scalar from a record array will be type np.void if not isinstance(expected, (np.void, np.ndarray, MatlabObject)): assert_equal(expected, actual) return # This is an ndarray-like thing assert_(expected.shape == actual.shape, msg='Expected shape %s, got %s at %s' % (expected.shape, actual.shape, label)) ex_dtype = expected.dtype if ex_dtype.hasobject: # array of objects if isinstance(expected, MatlabObject): assert_equal(expected.classname, actual.classname) for i, ev in enumerate(expected): level_label = "%s, [%d], " % (label, i) _check_level(level_label, ev, actual[i]) return if ex_dtype.fields: # probably recarray for fn in ex_dtype.fields: level_label = "%s, field %s, " % (label, fn) _check_level(level_label, expected[fn], actual[fn]) return if ex_dtype.type in (text_type, # string or bool np.unicode_, np.bool_): assert_equal(actual, expected, err_msg=label) return # Something numeric assert_array_almost_equal(actual, expected, err_msg=label, decimal=5) def _load_check_case(name, files, case): for file_name in files: matdict = loadmat(file_name, struct_as_record=True) label = "test %s; file %s" % (name, file_name) for k, expected in case.items(): k_label = "%s, variable %s" % (label, k) assert_(k in matdict, "Missing key at %s" % k_label) _check_level(k_label, expected, matdict[k]) def _whos_check_case(name, files, case, classes): for file_name in files: label = "test %s; file %s" % (name, file_name) whos = whosmat(file_name) expected_whos = [] for k, expected in case.items(): expected_whos.append((k, expected.shape, classes[k])) whos.sort() expected_whos.sort() assert_equal(whos, expected_whos, "%s: %r != %r" % (label, whos, expected_whos) ) # Round trip tests def _rt_check_case(name, expected, format): mat_stream = BytesIO() savemat(mat_stream, expected, format=format) mat_stream.seek(0) _load_check_case(name, [mat_stream], expected) # generator for load tests def test_load(): for case in case_table4 + case_table5: name = case['name'] expected = case['expected'] filt = pjoin(test_data_path, 'test%s_*.mat' % name) files = glob(filt) assert_(len(files) > 0, "No files for test %s using filter %s" % (name, filt)) _load_check_case(name, files, expected) # generator for whos tests def test_whos(): for case in case_table4 + case_table5: name = case['name'] expected = case['expected'] classes = case['classes'] filt = pjoin(test_data_path, 'test%s_*.mat' % name) files = glob(filt) assert_(len(files) > 0, "No files for test %s using filter %s" % (name, filt)) _whos_check_case(name, files, expected, classes) # generator for round trip tests def test_round_trip(): for case in case_table4 + case_table5_rt: case_table4_names = [case['name'] for case in case_table4] name = case['name'] + '_round_trip' expected = case['expected'] for format in (['4', '5'] if case['name'] in case_table4_names else ['5']): _rt_check_case(name, expected, format) def test_gzip_simple(): xdense = np.zeros((20,20)) xdense[2,3] = 2.3 xdense[4,5] = 4.5 x = SP.csc_matrix(xdense) name = 'gzip_test' expected = {'x':x} format = '4' tmpdir = mkdtemp() try: fname = pjoin(tmpdir,name) mat_stream = gzip.open(fname,mode='wb') savemat(mat_stream, expected, format=format) mat_stream.close() mat_stream = gzip.open(fname,mode='rb') actual = loadmat(mat_stream, struct_as_record=True) mat_stream.close() finally: shutil.rmtree(tmpdir) assert_array_almost_equal(actual['x'].todense(), expected['x'].todense(), err_msg=repr(actual)) def test_multiple_open(): # Ticket #1039, on Windows: check that files are not left open tmpdir = mkdtemp() try: x = dict(x=np.zeros((2, 2))) fname = pjoin(tmpdir, "a.mat") # Check that file is not left open savemat(fname, x) os.unlink(fname) savemat(fname, x) loadmat(fname) os.unlink(fname) # Check that stream is left open f = open(fname, 'wb') savemat(f, x) f.seek(0) f.close() f = open(fname, 'rb') loadmat(f) f.seek(0) f.close() finally: shutil.rmtree(tmpdir) def test_mat73(): # Check any hdf5 files raise an error filenames = glob( pjoin(test_data_path, 'testhdf5*.mat')) assert_(len(filenames) > 0) for filename in filenames: fp = open(filename, 'rb') assert_raises(NotImplementedError, loadmat, fp, struct_as_record=True) fp.close() def test_warnings(): # This test is an echo of the previous behavior, which was to raise a # warning if the user triggered a search for mat files on the Python system # path. We can remove the test in the next version after upcoming (0.13) fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat') with warnings.catch_warnings(): warnings.simplefilter('error') # This should not generate a warning mres = loadmat(fname, struct_as_record=True) # This neither mres = loadmat(fname, struct_as_record=False) def test_regression_653(): # Saving a dictionary with only invalid keys used to raise an error. Now we # save this as an empty struct in matlab space. sio = BytesIO() savemat(sio, {'d':{1:2}}, format='5') back = loadmat(sio)['d'] # Check we got an empty struct equivalent assert_equal(back.shape, (1,1)) assert_equal(back.dtype, np.dtype(object)) assert_(back[0,0] is None) def test_structname_len(): # Test limit for length of field names in structs lim = 31 fldname = 'a' * lim st1 = np.zeros((1,1), dtype=[(fldname, object)]) savemat(BytesIO(), {'longstruct': st1}, format='5') fldname = 'a' * (lim+1) st1 = np.zeros((1,1), dtype=[(fldname, object)]) assert_raises(ValueError, savemat, BytesIO(), {'longstruct': st1}, format='5') def test_4_and_long_field_names_incompatible(): # Long field names option not supported in 4 my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)]) assert_raises(ValueError, savemat, BytesIO(), {'my_struct':my_struct}, format='4', long_field_names=True) def test_long_field_names(): # Test limit for length of field names in structs lim = 63 fldname = 'a' * lim st1 = np.zeros((1,1), dtype=[(fldname, object)]) savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) fldname = 'a' * (lim+1) st1 = np.zeros((1,1), dtype=[(fldname, object)]) assert_raises(ValueError, savemat, BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) def test_long_field_names_in_struct(): # Regression test - long_field_names was erased if you passed a struct # within a struct lim = 63 fldname = 'a' * lim cell = np.ndarray((1,2),dtype=object) st1 = np.zeros((1,1), dtype=[(fldname, object)]) cell[0,0] = st1 cell[0,1] = st1 savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True) # # Check to make sure it fails with long field names off # assert_raises(ValueError, savemat, BytesIO(), {'longstruct': cell}, format='5', long_field_names=False) def test_cell_with_one_thing_in_it(): # Regression test - make a cell array that's 1 x 2 and put two # strings in it. It works. Make a cell array that's 1 x 1 and put # a string in it. It should work but, in the old days, it didn't. cells = np.ndarray((1,2),dtype=object) cells[0,0] = 'Hello' cells[0,1] = 'World' savemat(BytesIO(), {'x': cells}, format='5') cells = np.ndarray((1,1),dtype=object) cells[0,0] = 'Hello, world' savemat(BytesIO(), {'x': cells}, format='5') def test_writer_properties(): # Tests getting, setting of properties of matrix writer mfw = MatFile5Writer(BytesIO()) assert_equal(mfw.global_vars, []) mfw.global_vars = ['avar'] assert_equal(mfw.global_vars, ['avar']) assert_equal(mfw.unicode_strings, False) mfw.unicode_strings = True assert_equal(mfw.unicode_strings, True) assert_equal(mfw.long_field_names, False) mfw.long_field_names = True assert_equal(mfw.long_field_names, True) def test_use_small_element(): # Test whether we're using small data element or not sio = BytesIO() wtr = MatFile5Writer(sio) # First check size for no sde for name arr = np.zeros(10) wtr.put_variables({'aaaaa': arr}) w_sz = len(sio.getvalue()) # Check small name results in largish difference in size sio.truncate(0) sio.seek(0) wtr.put_variables({'aaaa': arr}) assert_(w_sz - len(sio.getvalue()) > 4) # Whereas increasing name size makes less difference sio.truncate(0) sio.seek(0) wtr.put_variables({'aaaaaa': arr}) assert_(len(sio.getvalue()) - w_sz < 4) def test_save_dict(): # Test that dict can be saved (as recarray), loaded as matstruct dict_types = ((dict, False), (OrderedDict, True),) ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)]) ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)]) for dict_type, is_ordered in dict_types: # Initialize with tuples to keep order for OrderedDict d = dict_type([('a', 1), ('b', 2)]) stream = BytesIO() savemat(stream, {'dict': d}) stream.seek(0) vals = loadmat(stream)['dict'] assert_equal(set(vals.dtype.names), set(['a', 'b'])) if is_ordered: # Input was ordered, output in ab order assert_array_equal(vals, ab_exp) else: # Not ordered input, either order output if vals.dtype.names[0] == 'a': assert_array_equal(vals, ab_exp) else: assert_array_equal(vals, ba_exp) def test_1d_shape(): # New 5 behavior is 1D -> row vector arr = np.arange(5) for format in ('4', '5'): # Column is the default stream = BytesIO() savemat(stream, {'oned': arr}, format=format) vals = loadmat(stream) assert_equal(vals['oned'].shape, (1, 5)) # can be explicitly 'column' for oned_as stream = BytesIO() savemat(stream, {'oned':arr}, format=format, oned_as='column') vals = loadmat(stream) assert_equal(vals['oned'].shape, (5,1)) # but different from 'row' stream = BytesIO() savemat(stream, {'oned':arr}, format=format, oned_as='row') vals = loadmat(stream) assert_equal(vals['oned'].shape, (1,5)) def test_compression(): arr = np.zeros(100).reshape((5,20)) arr[2,10] = 1 stream = BytesIO() savemat(stream, {'arr':arr}) raw_len = len(stream.getvalue()) vals = loadmat(stream) assert_array_equal(vals['arr'], arr) stream = BytesIO() savemat(stream, {'arr':arr}, do_compression=True) compressed_len = len(stream.getvalue()) vals = loadmat(stream) assert_array_equal(vals['arr'], arr) assert_(raw_len > compressed_len) # Concatenate, test later arr2 = arr.copy() arr2[0,0] = 1 stream = BytesIO() savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False) vals = loadmat(stream) assert_array_equal(vals['arr2'], arr2) stream = BytesIO() savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True) vals = loadmat(stream) assert_array_equal(vals['arr2'], arr2) def test_single_object(): stream = BytesIO() savemat(stream, {'A':np.array(1, dtype=object)}) def test_skip_variable(): # Test skipping over the first of two variables in a MAT file # using mat_reader_factory and put_variables to read them in. # # This is a regression test of a problem that's caused by # using the compressed file reader seek instead of the raw file # I/O seek when skipping over a compressed chunk. # # The problem arises when the chunk is large: this file has # a 256x256 array of random (uncompressible) doubles. # filename = pjoin(test_data_path,'test_skip_variable.mat') # # Prove that it loads with loadmat # d = loadmat(filename, struct_as_record=True) assert_('first' in d) assert_('second' in d) # # Make the factory # factory, file_opened = mat_reader_factory(filename, struct_as_record=True) # # This is where the factory breaks with an error in MatMatrixGetter.to_next # d = factory.get_variables('second') assert_('second' in d) factory.mat_stream.close() def test_empty_struct(): # ticket 885 filename = pjoin(test_data_path,'test_empty_struct.mat') # before ticket fix, this would crash with ValueError, empty data # type d = loadmat(filename, struct_as_record=True) a = d['a'] assert_equal(a.shape, (1,1)) assert_equal(a.dtype, np.dtype(object)) assert_(a[0,0] is None) stream = BytesIO() arr = np.array((), dtype='U') # before ticket fix, this used to give data type not understood savemat(stream, {'arr':arr}) d = loadmat(stream) a2 = d['arr'] assert_array_equal(a2, arr) def test_save_empty_dict(): # saving empty dict also gives empty struct stream = BytesIO() savemat(stream, {'arr': {}}) d = loadmat(stream) a = d['arr'] assert_equal(a.shape, (1,1)) assert_equal(a.dtype, np.dtype(object)) assert_(a[0,0] is None) def assert_any_equal(output, alternatives): """ Assert `output` is equal to at least one element in `alternatives` """ one_equal = False for expected in alternatives: if np.all(output == expected): one_equal = True break assert_(one_equal) def test_to_writeable(): # Test to_writeable function res = to_writeable(np.array([1])) # pass through ndarrays assert_equal(res.shape, (1,)) assert_array_equal(res, 1) # Dict fields can be written in any order expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')]) expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')]) alternatives = (expected1, expected2) assert_any_equal(to_writeable({'a':1,'b':2}), alternatives) # Fields with underscores discarded assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives) # Not-string fields discarded assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives) # String fields that are valid Python identifiers discarded assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives) # Object with field names is equivalent class klass(object): pass c = klass c.a = 1 c.b = 2 assert_any_equal(to_writeable(c), alternatives) # empty list and tuple go to empty array res = to_writeable([]) assert_equal(res.shape, (0,)) assert_equal(res.dtype.type, np.float64) res = to_writeable(()) assert_equal(res.shape, (0,)) assert_equal(res.dtype.type, np.float64) # None -> None assert_(to_writeable(None) is None) # String to strings assert_equal(to_writeable('a string').dtype.type, np.str_) # Scalars to numpy to numpy scalars res = to_writeable(1) assert_equal(res.shape, ()) assert_equal(res.dtype.type, np.array(1).dtype.type) assert_array_equal(res, 1) # Empty dict returns EmptyStructMarker assert_(to_writeable({}) is EmptyStructMarker) # Object does not have (even empty) __dict__ assert_(to_writeable(object()) is None) # Custom object does have empty __dict__, returns EmptyStructMarker class C(object): pass assert_(to_writeable(c()) is EmptyStructMarker) # dict keys with legal characters are convertible res = to_writeable({'a': 1})['a'] assert_equal(res.shape, (1,)) assert_equal(res.dtype.type, np.object_) # Only fields with illegal characters, falls back to EmptyStruct assert_(to_writeable({'1':1}) is EmptyStructMarker) assert_(to_writeable({'_a':1}) is EmptyStructMarker) # Unless there are valid fields, in which case structured array assert_equal(to_writeable({'1':1, 'f': 2}), np.array([(2,)], dtype=[('f', '|O8')])) def test_recarray(): # check roundtrip of structured array dt = [('f1', 'f8'), ('f2', 'S10')] arr = np.zeros((2,), dtype=dt) arr[0]['f1'] = 0.5 arr[0]['f2'] = 'python' arr[1]['f1'] = 99 arr[1]['f2'] = 'not perl' stream = BytesIO() savemat(stream, {'arr': arr}) d = loadmat(stream, struct_as_record=False) a20 = d['arr'][0,0] assert_equal(a20.f1, 0.5) assert_equal(a20.f2, 'python') d = loadmat(stream, struct_as_record=True) a20 = d['arr'][0,0] assert_equal(a20['f1'], 0.5) assert_equal(a20['f2'], 'python') # structs always come back as object types assert_equal(a20.dtype, np.dtype([('f1', 'O'), ('f2', 'O')])) a21 = d['arr'].flat[1] assert_equal(a21['f1'], 99) assert_equal(a21['f2'], 'not perl') def test_save_object(): class C(object): pass c = C() c.field1 = 1 c.field2 = 'a string' stream = BytesIO() savemat(stream, {'c': c}) d = loadmat(stream, struct_as_record=False) c2 = d['c'][0,0] assert_equal(c2.field1, 1) assert_equal(c2.field2, 'a string') d = loadmat(stream, struct_as_record=True) c2 = d['c'][0,0] assert_equal(c2['field1'], 1) assert_equal(c2['field2'], 'a string') def test_read_opts(): # tests if read is seeing option sets, at initialization and after # initialization arr = np.arange(6).reshape(1,6) stream = BytesIO() savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) back_dict = rdr.get_variables() rarr = back_dict['a'] assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, squeeze_me=True) assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) rdr.squeeze_me = False assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, byte_order=boc.native_code) assert_array_equal(rdr.get_variables()['a'], arr) # inverted byte code leads to error on read because of swapped # header etc rdr = MatFile5Reader(stream, byte_order=boc.swapped_code) assert_raises(Exception, rdr.get_variables) rdr.byte_order = boc.native_code assert_array_equal(rdr.get_variables()['a'], arr) arr = np.array(['a string']) stream.truncate(0) stream.seek(0) savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) assert_array_equal(rdr.get_variables()['a'], arr) rdr = MatFile5Reader(stream, chars_as_strings=False) carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) assert_array_equal(rdr.get_variables()['a'], carr) rdr.chars_as_strings = True assert_array_equal(rdr.get_variables()['a'], arr) def test_empty_string(): # make sure reading empty string does not raise error estring_fname = pjoin(test_data_path, 'single_empty_string.mat') fp = open(estring_fname, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_array_equal(d['a'], np.array([], dtype='U1')) # empty string round trip. Matlab cannot distiguish # between a string array that is empty, and a string array # containing a single empty string, because it stores strings as # arrays of char. There is no way of having an array of char that # is not empty, but contains an empty string. stream = BytesIO() savemat(stream, {'a': np.array([''])}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['a'], np.array([], dtype='U1')) stream.truncate(0) stream.seek(0) savemat(stream, {'a': np.array([], dtype='U1')}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['a'], np.array([], dtype='U1')) stream.close() def test_corrupted_data(): import zlib for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'), (zlib.error, 'corrupted_zlib_checksum.mat')]: with open(pjoin(test_data_path, fname), 'rb') as fp: rdr = MatFile5Reader(fp) assert_raises(exc, rdr.get_variables) def test_corrupted_data_check_can_be_disabled(): with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp: rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False) rdr.get_variables() def test_read_both_endian(): # make sure big- and little- endian data is read correctly for fname in ('big_endian.mat', 'little_endian.mat'): fp = open(pjoin(test_data_path, fname), 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_array_equal(d['strings'], np.array([['hello'], ['world']], dtype=object)) assert_array_equal(d['floats'], np.array([[2., 3.], [3., 4.]], dtype=np.float32)) def test_write_opposite_endian(): # We don't support writing opposite endian .mat files, but we need to behave # correctly if the user supplies an other-endian numpy array to write out float_arr = np.array([[2., 3.], [3., 4.]]) int_arr = np.arange(6).reshape((2, 3)) uni_arr = np.array(['hello', 'world'], dtype='U') stream = BytesIO() savemat(stream, {'floats': float_arr.byteswap().newbyteorder(), 'ints': int_arr.byteswap().newbyteorder(), 'uni_arr': uni_arr.byteswap().newbyteorder()}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['floats'], float_arr) assert_array_equal(d['ints'], int_arr) assert_array_equal(d['uni_arr'], uni_arr) stream.close() def test_logical_array(): # The roundtrip test doesn't verify that we load the data up with the # correct (bool) dtype with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj: rdr = MatFile5Reader(fobj, mat_dtype=True) d = rdr.get_variables() x = np.array([[True], [False]], dtype=np.bool_) assert_array_equal(d['testbools'], x) assert_equal(d['testbools'].dtype, x.dtype) def test_logical_out_type(): # Confirm that bool type written as uint8, uint8 class # See gh-4022 stream = BytesIO() barr = np.array([False, True, False]) savemat(stream, {'barray': barr}) stream.seek(0) reader = MatFile5Reader(stream) reader.initialize_read() reader.read_file_header() hdr, _ = reader.read_var_header() assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS) assert_equal(hdr.is_logical, True) var = reader.read_var_array(hdr, False) assert_equal(var.dtype.type, np.uint8) def test_mat4_3d(): # test behavior when writing 3D arrays to matlab 4 files stream = BytesIO() arr = np.arange(24).reshape((2,3,4)) assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4') def test_func_read(): func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat') fp = open(func_eg, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_(isinstance(d['testfunc'], MatlabFunction)) stream = BytesIO() wtr = MatFile5Writer(stream) assert_raises(MatWriteError, wtr.put_variables, d) def test_mat_dtype(): double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat') fp = open(double_eg, 'rb') rdr = MatFile5Reader(fp, mat_dtype=False) d = rdr.get_variables() fp.close() assert_equal(d['testmatrix'].dtype.kind, 'u') fp = open(double_eg, 'rb') rdr = MatFile5Reader(fp, mat_dtype=True) d = rdr.get_variables() fp.close() assert_equal(d['testmatrix'].dtype.kind, 'f') def test_sparse_in_struct(): # reproduces bug found by DC where Cython code was insisting on # ndarray return type, but getting sparse matrix st = {'sparsefield': SP.coo_matrix(np.eye(4))} stream = BytesIO() savemat(stream, {'a':st}) d = loadmat(stream, struct_as_record=True) assert_array_equal(d['a'][0,0]['sparsefield'].todense(), np.eye(4)) def test_mat_struct_squeeze(): stream = BytesIO() in_d = {'st':{'one':1, 'two':2}} savemat(stream, in_d) # no error without squeeze out_d = loadmat(stream, struct_as_record=False) # previous error was with squeeze, with mat_struct out_d = loadmat(stream, struct_as_record=False, squeeze_me=True, ) def test_scalar_squeeze(): stream = BytesIO() in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}} savemat(stream, in_d) out_d = loadmat(stream, squeeze_me=True) assert_(isinstance(out_d['scalar'], float)) assert_(isinstance(out_d['string'], string_types)) assert_(isinstance(out_d['st'], np.ndarray)) def test_str_round(): # from report by Angus McMorland on mailing list 3 May 2010 stream = BytesIO() in_arr = np.array(['Hello', 'Foob']) out_arr = np.array(['Hello', 'Foob ']) savemat(stream, dict(a=in_arr)) res = loadmat(stream) # resulted in ['HloolFoa', 'elWrdobr'] assert_array_equal(res['a'], out_arr) stream.truncate(0) stream.seek(0) # Make Fortran ordered version of string in_str = in_arr.tostring(order='F') in_from_str = np.ndarray(shape=a.shape, dtype=in_arr.dtype, order='F', buffer=in_str) savemat(stream, dict(a=in_from_str)) assert_array_equal(res['a'], out_arr) # unicode save did lead to buffer too small error stream.truncate(0) stream.seek(0) in_arr_u = in_arr.astype('U') out_arr_u = out_arr.astype('U') savemat(stream, {'a': in_arr_u}) res = loadmat(stream) assert_array_equal(res['a'], out_arr_u) def test_fieldnames(): # Check that field names are as expected stream = BytesIO() savemat(stream, {'a': {'a':1, 'b':2}}) res = loadmat(stream) field_names = res['a'].dtype.names assert_equal(set(field_names), set(('a', 'b'))) def test_loadmat_varnames(): # Test that we can get just one variable from a mat file using loadmat mat5_sys_names = ['__globals__', '__header__', '__version__'] for eg_file, sys_v_names in ( (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin( test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)): vars = loadmat(eg_file) assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names='a') assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) vars = loadmat(eg_file, variable_names=['a']) assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) vars = loadmat(eg_file, variable_names=['theta']) assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names=('theta',)) assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names=[]) assert_equal(set(vars.keys()), set(sys_v_names)) vnames = ['theta'] vars = loadmat(eg_file, variable_names=vnames) assert_equal(vnames, ['theta']) def test_round_types(): # Check that saving, loading preserves dtype in most cases arr = np.arange(10) stream = BytesIO() for dts in ('f8','f4','i8','i4','i2','i1', 'u8','u4','u2','u1','c16','c8'): stream.truncate(0) stream.seek(0) # needed for BytesIO in python 3 savemat(stream, {'arr': arr.astype(dts)}) vars = loadmat(stream) assert_equal(np.dtype(dts), vars['arr'].dtype) def test_varmats_from_mat(): # Make a mat file with several variables, write it, read it back names_vars = (('arr', mlarr(np.arange(10))), ('mystr', mlarr('a string')), ('mynum', mlarr(10))) # Dict like thing to give variables in defined order class C(object): def items(self): return names_vars stream = BytesIO() savemat(stream, C()) varmats = varmats_from_mat(stream) assert_equal(len(varmats), 3) for i in range(3): name, var_stream = varmats[i] exp_name, exp_res = names_vars[i] assert_equal(name, exp_name) res = loadmat(var_stream) assert_array_equal(res[name], exp_res) def test_one_by_zero(): # Test 1x0 chars get read correctly func_eg = pjoin(test_data_path, 'one_by_zero_char.mat') fp = open(func_eg, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_equal(d['var'].shape, (0,)) def test_load_mat4_le(): # We were getting byte order wrong when reading little-endian floa64 dense # matrices on big-endian platforms mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat') vars = loadmat(mat4_fname) assert_array_equal(vars['a'], [[0.1, 1.2]]) def test_unicode_mat4(): # Mat4 should save unicode as latin1 bio = BytesIO() var = {'second_cat': u('Schrödinger')} savemat(bio, var, format='4') var_back = loadmat(bio) assert_equal(var_back['second_cat'], var['second_cat']) def test_logical_sparse(): # Test we can read logical sparse stored in mat file as bytes. # See https://github.com/scipy/scipy/issues/3539. # In some files saved by MATLAB, the sparse data elements (Real Part # Subelement in MATLAB speak) are stored with apparent type double # (miDOUBLE) but are in fact single bytes. filename = pjoin(test_data_path,'logical_sparse.mat') # Before fix, this would crash with: # ValueError: indices and data should have the same size d = loadmat(filename, struct_as_record=True) log_sp = d['sp_log_5_4'] assert_(isinstance(log_sp, SP.csc_matrix)) assert_equal(log_sp.dtype.type, np.bool_) assert_array_equal(log_sp.toarray(), [[True, True, True, False], [False, False, True, False], [False, False, True, False], [False, False, False, False], [False, False, False, False]]) def test_empty_sparse(): # Can we read empty sparse matrices? sio = BytesIO() import scipy.sparse empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]]) savemat(sio, dict(x=empty_sparse)) sio.seek(0) res = loadmat(sio) assert_array_equal(res['x'].shape, empty_sparse.shape) assert_array_equal(res['x'].todense(), 0) # Do empty sparse matrices get written with max nnz 1? # See https://github.com/scipy/scipy/issues/4208 sio.seek(0) reader = MatFile5Reader(sio) reader.initialize_read() reader.read_file_header() hdr, _ = reader.read_var_header() assert_equal(hdr.nzmax, 1) def test_empty_mat_error(): # Test we get a specific warning for an empty mat file sio = BytesIO() assert_raises(MatReadError, loadmat, sio) def test_miuint32_compromise(): # Reader should accept miUINT32 for miINT32, but check signs # mat file with miUINT32 for miINT32, but OK values filename = pjoin(test_data_path, 'miuint32_for_miint32.mat') res = loadmat(filename) assert_equal(res['an_array'], np.arange(10)[None, :]) # mat file with miUINT32 for miINT32, with negative value filename = pjoin(test_data_path, 'bad_miuint32.mat') with suppress_warnings() as sup: sup.filter(message="unclosed file") # Py3k ResourceWarning assert_raises(ValueError, loadmat, filename) def test_miutf8_for_miint8_compromise(): # Check reader accepts ascii as miUTF8 for array names filename = pjoin(test_data_path, 'miutf8_array_name.mat') res = loadmat(filename) assert_equal(res['array_name'], [[1]]) # mat file with non-ascii utf8 name raises error filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat') with suppress_warnings() as sup: sup.filter(message="unclosed file") # Py3k ResourceWarning assert_raises(ValueError, loadmat, filename) def test_bad_utf8(): # Check that reader reads bad UTF with 'replace' option filename = pjoin(test_data_path,'broken_utf8.mat') res = loadmat(filename) assert_equal(res['bad_string'], b'\x80 am broken'.decode('utf8', 'replace')) def test_save_unicode_field(tmpdir): filename = os.path.join(str(tmpdir), 'test.mat') test_dict = {u'a':{u'b':1,u'c':'test_str'}} savemat(filename, test_dict) def test_filenotfound(): # Check the correct error is thrown assert_raises(IOError, loadmat, "NotExistentFile00.mat") assert_raises(IOError, loadmat, "NotExistentFile00")
42,289
33.132365
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/test_pathological.py
""" Test reading of files not conforming to matlab specification We try and read any file that matlab reads, these files included """ from __future__ import division, print_function, absolute_import from os.path import dirname, join as pjoin from numpy.testing import assert_ from pytest import raises as assert_raises from scipy.io.matlab.mio import loadmat TEST_DATA_PATH = pjoin(dirname(__file__), 'data') def test_multiple_fieldnames(): # Example provided by Dharhas Pothina # Extracted using mio5.varmats_from_mat multi_fname = pjoin(TEST_DATA_PATH, 'nasty_duplicate_fieldnames.mat') vars = loadmat(multi_fname) funny_names = vars['Summary'].dtype.names assert_(set(['_1_Station_Q', '_2_Station_Q', '_3_Station_Q']).issubset(funny_names)) def test_malformed1(): # Example from gh-6072 # Contains malformed header data, which previously resulted into a # buffer overflow. # # Should raise an exception, not segfault fname = pjoin(TEST_DATA_PATH, 'malformed1.mat') with open(fname, 'rb') as f: assert_raises(ValueError, loadmat, f)
1,125
30.277778
73
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/matlab/tests/gen_mat4files.m
% Generates mat files for loadmat unit tests % Uses save_matfile.m function % This is the version for matlab 4 % work out matlab version and file suffix for test files global FILEPREFIX FILESUFFIX sepchar = '/'; if strcmp(computer, 'PCWIN'), sepchar = '\'; end FILEPREFIX = [pwd sepchar 'data' sepchar]; mlv = version; FILESUFFIX = ['_' mlv '_' computer '.mat']; % basic double array theta = 0:pi/4:2*pi; save_matfile('testdouble', theta); % string save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.') % complex save_matfile('testcomplex', cos(theta) + 1j*sin(theta)); % asymmetric array to check indexing a = zeros(3, 5); a(:,1) = [1:3]'; a(1,:) = 1:5; % 2D matrix save_matfile('testmatrix', a); % minus number - tests signed int save_matfile('testminus', -1); % single character save_matfile('testonechar', 'r'); % string array save_matfile('teststringarray', ['one '; 'two '; 'three']); % sparse array save_matfile('testsparse', sparse(a)); % sparse complex array b = sparse(a); b(1,1) = b(1,1) + j; save_matfile('testsparsecomplex', b); % Two variables in same file save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta')
1,163
21.823529
73
m
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/harwell_boeing/setup.py
from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('harwell_boeing',parent_package,top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
417
26.866667
68
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/harwell_boeing/hb.py
""" Implementation of Harwell-Boeing read/write. At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format """ from __future__ import division, print_function, absolute_import # TODO: # - Add more support (symmetric/complex matrices, non-assembled matrices ?) # XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but # takes a lot of memory. Being faster would require compiled code. # write is not efficient. Although not a terribly exciting task, # having reusable facilities to efficiently read/write fortran-formatted files # would be useful outside this module. import warnings import numpy as np from scipy.sparse import csc_matrix from scipy.io.harwell_boeing._fortran_format_parser import \ FortranFormatParser, IntFormat, ExpFormat from scipy._lib.six import string_types __all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile", "HBMatrixType"] class MalformedHeader(Exception): pass class LineOverflow(Warning): pass def _nbytes_full(fmt, nlines): """Return the number of bytes to read to get every full lines for the given parsed fortran format.""" return (fmt.repeat * fmt.width + 1) * (nlines - 1) class HBInfo(object): @classmethod def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None): """Create a HBInfo instance from an existing sparse matrix. Parameters ---------- m : sparse matrix the HBInfo instance will derive its parameters from m title : str Title to put in the HB header key : str Key mxtype : HBMatrixType type of the input matrix fmt : dict not implemented Returns ------- hb_info : HBInfo instance """ pointer = m.indptr indices = m.indices values = m.data nrows, ncols = m.shape nnon_zeros = m.nnz if fmt is None: # +1 because HB use one-based indexing (Fortran), and we will write # the indices /pointer as such pointer_fmt = IntFormat.from_number(np.max(pointer+1)) indices_fmt = IntFormat.from_number(np.max(indices+1)) if values.dtype.kind in np.typecodes["AllFloat"]: values_fmt = ExpFormat.from_number(-np.max(np.abs(values))) elif values.dtype.kind in np.typecodes["AllInteger"]: values_fmt = IntFormat.from_number(-np.max(np.abs(values))) else: raise NotImplementedError("type %s not implemented yet" % values.dtype.kind) else: raise NotImplementedError("fmt argument not supported yet.") if mxtype is None: if not np.isrealobj(values): raise ValueError("Complex values not supported yet") if values.dtype.kind in np.typecodes["AllInteger"]: tp = "integer" elif values.dtype.kind in np.typecodes["AllFloat"]: tp = "real" else: raise NotImplementedError("type %s for values not implemented" % values.dtype) mxtype = HBMatrixType(tp, "unsymmetric", "assembled") else: raise ValueError("mxtype argument not handled yet.") def _nlines(fmt, size): nlines = size // fmt.repeat if nlines * fmt.repeat != size: nlines += 1 return nlines pointer_nlines = _nlines(pointer_fmt, pointer.size) indices_nlines = _nlines(indices_fmt, indices.size) values_nlines = _nlines(values_fmt, values.size) total_nlines = pointer_nlines + indices_nlines + values_nlines return cls(title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, pointer_fmt.fortran_format, indices_fmt.fortran_format, values_fmt.fortran_format) @classmethod def from_file(cls, fid): """Create a HBInfo instance from a file object containing a matrix in the HB format. Parameters ---------- fid : file-like matrix File or file-like object containing a matrix in the HB format. Returns ------- hb_info : HBInfo instance """ # First line line = fid.readline().strip("\n") if not len(line) > 72: raise ValueError("Expected at least 72 characters for first line, " "got: \n%s" % line) title = line[:72] key = line[72:] # Second line line = fid.readline().strip("\n") if not len(line.rstrip()) >= 56: raise ValueError("Expected at least 56 characters for second line, " "got: \n%s" % line) total_nlines = _expect_int(line[:14]) pointer_nlines = _expect_int(line[14:28]) indices_nlines = _expect_int(line[28:42]) values_nlines = _expect_int(line[42:56]) rhs_nlines = line[56:72].strip() if rhs_nlines == '': rhs_nlines = 0 else: rhs_nlines = _expect_int(rhs_nlines) if not rhs_nlines == 0: raise ValueError("Only files without right hand side supported for " "now.") # Third line line = fid.readline().strip("\n") if not len(line) >= 70: raise ValueError("Expected at least 72 character for third line, got:\n" "%s" % line) mxtype_s = line[:3].upper() if not len(mxtype_s) == 3: raise ValueError("mxtype expected to be 3 characters long") mxtype = HBMatrixType.from_fortran(mxtype_s) if mxtype.value_type not in ["real", "integer"]: raise ValueError("Only real or integer matrices supported for " "now (detected %s)" % mxtype) if not mxtype.structure == "unsymmetric": raise ValueError("Only unsymmetric matrices supported for " "now (detected %s)" % mxtype) if not mxtype.storage == "assembled": raise ValueError("Only assembled matrices supported for now") if not line[3:14] == " " * 11: raise ValueError("Malformed data for third line: %s" % line) nrows = _expect_int(line[14:28]) ncols = _expect_int(line[28:42]) nnon_zeros = _expect_int(line[42:56]) nelementals = _expect_int(line[56:70]) if not nelementals == 0: raise ValueError("Unexpected value %d for nltvl (last entry of line 3)" % nelementals) # Fourth line line = fid.readline().strip("\n") ct = line.split() if not len(ct) == 3: raise ValueError("Expected 3 formats, got %s" % ct) return cls(title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, ct[0], ct[1], ct[2], rhs_nlines, nelementals) def __init__(self, title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, pointer_format_str, indices_format_str, values_format_str, right_hand_sides_nlines=0, nelementals=0): """Do not use this directly, but the class ctrs (from_* functions).""" self.title = title self.key = key if title is None: title = "No Title" if len(title) > 72: raise ValueError("title cannot be > 72 characters") if key is None: key = "|No Key" if len(key) > 8: warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow) self.total_nlines = total_nlines self.pointer_nlines = pointer_nlines self.indices_nlines = indices_nlines self.values_nlines = values_nlines parser = FortranFormatParser() pointer_format = parser.parse(pointer_format_str) if not isinstance(pointer_format, IntFormat): raise ValueError("Expected int format for pointer format, got %s" % pointer_format) indices_format = parser.parse(indices_format_str) if not isinstance(indices_format, IntFormat): raise ValueError("Expected int format for indices format, got %s" % indices_format) values_format = parser.parse(values_format_str) if isinstance(values_format, ExpFormat): if mxtype.value_type not in ["real", "complex"]: raise ValueError("Inconsistency between matrix type %s and " "value type %s" % (mxtype, values_format)) values_dtype = np.float64 elif isinstance(values_format, IntFormat): if mxtype.value_type not in ["integer"]: raise ValueError("Inconsistency between matrix type %s and " "value type %s" % (mxtype, values_format)) # XXX: fortran int -> dtype association ? values_dtype = int else: raise ValueError("Unsupported format for values %r" % (values_format,)) self.pointer_format = pointer_format self.indices_format = indices_format self.values_format = values_format self.pointer_dtype = np.int32 self.indices_dtype = np.int32 self.values_dtype = values_dtype self.pointer_nlines = pointer_nlines self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines) self.indices_nlines = indices_nlines self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines) self.values_nlines = values_nlines self.values_nbytes_full = _nbytes_full(values_format, values_nlines) self.nrows = nrows self.ncols = ncols self.nnon_zeros = nnon_zeros self.nelementals = nelementals self.mxtype = mxtype def dump(self): """Gives the header corresponding to this instance as a string.""" header = [self.title.ljust(72) + self.key.ljust(8)] header.append("%14d%14d%14d%14d" % (self.total_nlines, self.pointer_nlines, self.indices_nlines, self.values_nlines)) header.append("%14s%14d%14d%14d%14d" % (self.mxtype.fortran_format.ljust(14), self.nrows, self.ncols, self.nnon_zeros, 0)) pffmt = self.pointer_format.fortran_format iffmt = self.indices_format.fortran_format vffmt = self.values_format.fortran_format header.append("%16s%16s%20s" % (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20))) return "\n".join(header) def _expect_int(value, msg=None): try: return int(value) except ValueError: if msg is None: msg = "Expected an int, got %s" raise ValueError(msg % value) def _read_hb_data(content, header): # XXX: look at a way to reduce memory here (big string creation) ptr_string = "".join([content.read(header.pointer_nbytes_full), content.readline()]) ptr = np.fromstring(ptr_string, dtype=int, sep=' ') ind_string = "".join([content.read(header.indices_nbytes_full), content.readline()]) ind = np.fromstring(ind_string, dtype=int, sep=' ') val_string = "".join([content.read(header.values_nbytes_full), content.readline()]) val = np.fromstring(val_string, dtype=header.values_dtype, sep=' ') try: return csc_matrix((val, ind-1, ptr-1), shape=(header.nrows, header.ncols)) except ValueError as e: raise e def _write_data(m, fid, header): def write_array(f, ar, nlines, fmt): # ar_nlines is the number of full lines, n is the number of items per # line, ffmt the fortran format pyfmt = fmt.python_format pyfmt_full = pyfmt * fmt.repeat # for each array to write, we first write the full lines, and special # case for partial line full = ar[:(nlines - 1) * fmt.repeat] for row in full.reshape((nlines-1, fmt.repeat)): f.write(pyfmt_full % tuple(row) + "\n") nremain = ar.size - full.size if nremain > 0: f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n") fid.write(header.dump()) fid.write("\n") # +1 is for fortran one-based indexing write_array(fid, m.indptr+1, header.pointer_nlines, header.pointer_format) write_array(fid, m.indices+1, header.indices_nlines, header.indices_format) write_array(fid, m.data, header.values_nlines, header.values_format) class HBMatrixType(object): """Class to hold the matrix type.""" # q2f* translates qualified names to fortran character _q2f_type = { "real": "R", "complex": "C", "pattern": "P", "integer": "I", } _q2f_structure = { "symmetric": "S", "unsymmetric": "U", "hermitian": "H", "skewsymmetric": "Z", "rectangular": "R" } _q2f_storage = { "assembled": "A", "elemental": "E", } _f2q_type = dict([(j, i) for i, j in _q2f_type.items()]) _f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()]) _f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()]) @classmethod def from_fortran(cls, fmt): if not len(fmt) == 3: raise ValueError("Fortran format for matrix type should be 3 " "characters long") try: value_type = cls._f2q_type[fmt[0]] structure = cls._f2q_structure[fmt[1]] storage = cls._f2q_storage[fmt[2]] return cls(value_type, structure, storage) except KeyError: raise ValueError("Unrecognized format %s" % fmt) def __init__(self, value_type, structure, storage="assembled"): self.value_type = value_type self.structure = structure self.storage = storage if value_type not in self._q2f_type: raise ValueError("Unrecognized type %s" % value_type) if structure not in self._q2f_structure: raise ValueError("Unrecognized structure %s" % structure) if storage not in self._q2f_storage: raise ValueError("Unrecognized storage %s" % storage) @property def fortran_format(self): return self._q2f_type[self.value_type] + \ self._q2f_structure[self.structure] + \ self._q2f_storage[self.storage] def __repr__(self): return "HBMatrixType(%s, %s, %s)" % \ (self.value_type, self.structure, self.storage) class HBFile(object): def __init__(self, file, hb_info=None): """Create a HBFile instance. Parameters ---------- file : file-object StringIO work as well hb_info : HBInfo, optional Should be given as an argument for writing, in which case the file should be writable. """ self._fid = file if hb_info is None: self._hb_info = HBInfo.from_file(file) else: #raise IOError("file %s is not writable, and hb_info " # "was given." % file) self._hb_info = hb_info @property def title(self): return self._hb_info.title @property def key(self): return self._hb_info.key @property def type(self): return self._hb_info.mxtype.value_type @property def structure(self): return self._hb_info.mxtype.structure @property def storage(self): return self._hb_info.mxtype.storage def read_matrix(self): return _read_hb_data(self._fid, self._hb_info) def write_matrix(self, m): return _write_data(m, self._fid, self._hb_info) def hb_read(path_or_open_file): """Read HB-format file. Parameters ---------- path_or_open_file : path-like or file-like If a file-like object, it is used as-is. Otherwise it is opened before reading. Returns ------- data : scipy.sparse.csc_matrix instance The data read from the HB file as a sparse matrix. Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format """ def _get_matrix(fid): hb = HBFile(fid) return hb.read_matrix() if hasattr(path_or_open_file, 'read'): return _get_matrix(path_or_open_file) else: with open(path_or_open_file) as f: return _get_matrix(f) def hb_write(path_or_open_file, m, hb_info=None): """Write HB-format file. Parameters ---------- path_or_open_file : path-like or file-like If a file-like object, it is used as-is. Otherwise it is opened before writing. m : sparse-matrix the sparse matrix to write hb_info : HBInfo contains the meta-data for write Returns ------- None Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format """ if hb_info is None: hb_info = HBInfo.from_data(m) def _set_matrix(fid): hb = HBFile(fid, hb_info) return hb.write_matrix(m) if hasattr(path_or_open_file, 'write'): return _set_matrix(path_or_open_file) else: with open(path_or_open_file, 'w') as f: return _set_matrix(f)
18,422
32.865809
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/harwell_boeing/_fortran_format_parser.py
""" Preliminary module to handle fortran formats for IO. Does not use this outside scipy.sparse io for now, until the API is deemed reasonable. The *Format classes handle conversion between fortran and python format, and FortranFormatParser can create *Format instances from raw fortran format strings (e.g. '(3I4)', '(10I3)', etc...) """ from __future__ import division, print_function, absolute_import import re import warnings import numpy as np __all__ = ["BadFortranFormat", "FortranFormatParser", "IntFormat", "ExpFormat"] TOKENS = { "LPAR": r"\(", "RPAR": r"\)", "INT_ID": r"I", "EXP_ID": r"E", "INT": r"\d+", "DOT": r"\.", } class BadFortranFormat(SyntaxError): pass def number_digits(n): return int(np.floor(np.log10(np.abs(n))) + 1) class IntFormat(object): @classmethod def from_number(cls, n, min=None): """Given an integer, returns a "reasonable" IntFormat instance to represent any number between 0 and n if n > 0, -n and n if n < 0 Parameters ---------- n : int max number one wants to be able to represent min : int minimum number of characters to use for the format Returns ------- res : IntFormat IntFormat instance with reasonable (see Notes) computed width Notes ----- Reasonable should be understood as the minimal string length necessary without losing precision. For example, IntFormat.from_number(1) will return an IntFormat instance of width 2, so that any 0 and 1 may be represented as 1-character strings without loss of information. """ width = number_digits(n) + 1 if n < 0: width += 1 repeat = 80 // width return cls(width, min, repeat=repeat) def __init__(self, width, min=None, repeat=None): self.width = width self.repeat = repeat self.min = min def __repr__(self): r = "IntFormat(" if self.repeat: r += "%d" % self.repeat r += "I%d" % self.width if self.min: r += ".%d" % self.min return r + ")" @property def fortran_format(self): r = "(" if self.repeat: r += "%d" % self.repeat r += "I%d" % self.width if self.min: r += ".%d" % self.min return r + ")" @property def python_format(self): return "%" + str(self.width) + "d" class ExpFormat(object): @classmethod def from_number(cls, n, min=None): """Given a float number, returns a "reasonable" ExpFormat instance to represent any number between -n and n. Parameters ---------- n : float max number one wants to be able to represent min : int minimum number of characters to use for the format Returns ------- res : ExpFormat ExpFormat instance with reasonable (see Notes) computed width Notes ----- Reasonable should be understood as the minimal string length necessary to avoid losing precision. """ # len of one number in exp format: sign + 1|0 + "." + # number of digit for fractional part + 'E' + sign of exponent + # len of exponent finfo = np.finfo(n.dtype) # Number of digits for fractional part n_prec = finfo.precision + 1 # Number of digits for exponential part n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp]))) width = 1 + 1 + n_prec + 1 + n_exp + 1 if n < 0: width += 1 repeat = int(np.floor(80 / width)) return cls(width, n_prec, min, repeat=repeat) def __init__(self, width, significand, min=None, repeat=None): """\ Parameters ---------- width : int number of characters taken by the string (includes space). """ self.width = width self.significand = significand self.repeat = repeat self.min = min def __repr__(self): r = "ExpFormat(" if self.repeat: r += "%d" % self.repeat r += "E%d.%d" % (self.width, self.significand) if self.min: r += "E%d" % self.min return r + ")" @property def fortran_format(self): r = "(" if self.repeat: r += "%d" % self.repeat r += "E%d.%d" % (self.width, self.significand) if self.min: r += "E%d" % self.min return r + ")" @property def python_format(self): return "%" + str(self.width-1) + "." + str(self.significand) + "E" class Token(object): def __init__(self, type, value, pos): self.type = type self.value = value self.pos = pos def __str__(self): return """Token('%s', "%s")""" % (self.type, self.value) def __repr__(self): return self.__str__() class Tokenizer(object): def __init__(self): self.tokens = list(TOKENS.keys()) self.res = [re.compile(TOKENS[i]) for i in self.tokens] def input(self, s): self.data = s self.curpos = 0 self.len = len(s) def next_token(self): curpos = self.curpos tokens = self.tokens while curpos < self.len: for i, r in enumerate(self.res): m = r.match(self.data, curpos) if m is None: continue else: self.curpos = m.end() return Token(self.tokens[i], m.group(), self.curpos) raise SyntaxError("Unknown character at position %d (%s)" % (self.curpos, self.data[curpos])) # Grammar for fortran format: # format : LPAR format_string RPAR # format_string : repeated | simple # repeated : repeat simple # simple : int_fmt | exp_fmt # int_fmt : INT_ID width # exp_fmt : simple_exp_fmt # simple_exp_fmt : EXP_ID width DOT significand # extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits # repeat : INT # width : INT # significand : INT # ndigits : INT # Naive fortran formatter - parser is hand-made class FortranFormatParser(object): """Parser for fortran format strings. The parse method returns a *Format instance. Notes ----- Only ExpFormat (exponential format for floating values) and IntFormat (integer format) for now. """ def __init__(self): self.tokenizer = Tokenizer() def parse(self, s): self.tokenizer.input(s) tokens = [] try: while True: t = self.tokenizer.next_token() if t is None: break else: tokens.append(t) return self._parse_format(tokens) except SyntaxError as e: raise BadFortranFormat(str(e)) def _get_min(self, tokens): next = tokens.pop(0) if not next.type == "DOT": raise SyntaxError() next = tokens.pop(0) return next.value def _expect(self, token, tp): if not token.type == tp: raise SyntaxError() def _parse_format(self, tokens): if not tokens[0].type == "LPAR": raise SyntaxError("Expected left parenthesis at position " "%d (got '%s')" % (0, tokens[0].value)) elif not tokens[-1].type == "RPAR": raise SyntaxError("Expected right parenthesis at position " "%d (got '%s')" % (len(tokens), tokens[-1].value)) tokens = tokens[1:-1] types = [t.type for t in tokens] if types[0] == "INT": repeat = int(tokens.pop(0).value) else: repeat = None next = tokens.pop(0) if next.type == "INT_ID": next = self._next(tokens, "INT") width = int(next.value) if tokens: min = int(self._get_min(tokens)) else: min = None return IntFormat(width, min, repeat) elif next.type == "EXP_ID": next = self._next(tokens, "INT") width = int(next.value) next = self._next(tokens, "DOT") next = self._next(tokens, "INT") significand = int(next.value) if tokens: next = self._next(tokens, "EXP_ID") next = self._next(tokens, "INT") min = int(next.value) else: min = None return ExpFormat(width, significand, min, repeat) else: raise SyntaxError("Invalid formater type %s" % next.value) def _next(self, tokens, tp): if not len(tokens) > 0: raise SyntaxError() next = tokens.pop(0) self._expect(next, tp) return next
9,066
27.875796
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/harwell_boeing/__init__.py
from __future__ import division, print_function, absolute_import from scipy.io.harwell_boeing.hb import MalformedHeader, HBInfo, HBFile, \ HBMatrixType, hb_read, hb_write
176
34.4
73
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/harwell_boeing/tests/test_fortran_format.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_equal from pytest import raises as assert_raises from scipy.io.harwell_boeing._fortran_format_parser import ( FortranFormatParser, IntFormat, ExpFormat, BadFortranFormat, number_digits) class TestFortranFormatParser(object): def setup_method(self): self.parser = FortranFormatParser() def _test_equal(self, format, ref): ret = self.parser.parse(format) assert_equal(ret.__dict__, ref.__dict__) def test_simple_int(self): self._test_equal("(I4)", IntFormat(4)) def test_simple_repeated_int(self): self._test_equal("(3I4)", IntFormat(4, repeat=3)) def test_simple_exp(self): self._test_equal("(E4.3)", ExpFormat(4, 3)) def test_exp_exp(self): self._test_equal("(E8.3E3)", ExpFormat(8, 3, 3)) def test_repeat_exp(self): self._test_equal("(2E4.3)", ExpFormat(4, 3, repeat=2)) def test_repeat_exp_exp(self): self._test_equal("(2E8.3E3)", ExpFormat(8, 3, 3, repeat=2)) def test_wrong_formats(self): def _test_invalid(bad_format): assert_raises(BadFortranFormat, lambda: self.parser.parse(bad_format)) _test_invalid("I4") _test_invalid("(E4)") _test_invalid("(E4.)") _test_invalid("(E4.E3)") class TestIntFormat(object): def test_to_fortran(self): f = [IntFormat(10), IntFormat(12, 10), IntFormat(12, 10, 3)] res = ["(I10)", "(I12.10)", "(3I12.10)"] for i, j in zip(f, res): assert_equal(i.fortran_format, j) def test_from_number(self): f = [10, -12, 123456789] r_f = [IntFormat(3, repeat=26), IntFormat(4, repeat=20), IntFormat(10, repeat=8)] for i, j in zip(f, r_f): assert_equal(IntFormat.from_number(i).__dict__, j.__dict__) class TestExpFormat(object): def test_to_fortran(self): f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3), ExpFormat(10, 5, repeat=3)] res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"] for i, j in zip(f, res): assert_equal(i.fortran_format, j) def test_from_number(self): f = np.array([1.0, -1.2]) r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)] for i, j in zip(f, r_f): assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__)
2,495
31
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/harwell_boeing/tests/test_hb.py
from __future__ import division, print_function, absolute_import import os import sys if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO import tempfile import numpy as np from numpy.testing import assert_equal, \ assert_array_almost_equal_nulp from scipy.sparse import coo_matrix, csc_matrix, rand from scipy.io import hb_read, hb_write from scipy.io.harwell_boeing import HBFile, HBInfo SIMPLE = """\ No Title |No Key 9 4 1 4 RUA 100 100 10 0 (26I3) (26I3) (3E23.15) 1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11 37 71 89 18 30 45 70 19 25 52 2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01 6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01 4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01 6.912334991524289e-01 """ SIMPLE_MATRIX = coo_matrix( ( (0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799, 0.0661749042483, 0.887037034319, 0.419647859016, 0.564960307211, 0.993442388709, 0.691233499152,), (np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51], [0, 4, 58, 61, 61, 72, 72, 73, 99, 99]])))) def assert_csc_almost_equal(r, l): r = csc_matrix(r) l = csc_matrix(l) assert_equal(r.indptr, l.indptr) assert_equal(r.indices, l.indices) assert_array_almost_equal_nulp(r.data, l.data, 10000) class TestHBReader(object): def test_simple(self): m = hb_read(StringIO(SIMPLE)) assert_csc_almost_equal(m, SIMPLE_MATRIX) class TestRBRoundtrip(object): def test_simple(self): rm = rand(100, 1000, 0.05).tocsc() fd, filename = tempfile.mkstemp(suffix="rb") try: hb_write(filename, rm, HBInfo.from_data(rm)) m = hb_read(filename) finally: os.close(fd) os.remove(filename) assert_csc_almost_equal(m, rm)
2,375
31.547945
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/harwell_boeing/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/arff/arffread.py
# Last Change: Mon Aug 20 08:00 PM 2007 J from __future__ import division, print_function, absolute_import import re import itertools import datetime from functools import partial import numpy as np from scipy._lib.six import next """A module to read arff files.""" __all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError'] # An Arff file is basically two parts: # - header # - data # # A header has each of its components starting by @META where META is one of # the keyword (attribute of relation, for now). # TODO: # - both integer and reals are treated as numeric -> the integer info # is lost! # - Replace ValueError by ParseError or something # We know can handle the following: # - numeric and nominal attributes # - missing values for numeric attributes r_meta = re.compile(r'^\s*@') # Match a comment r_comment = re.compile(r'^%') # Match an empty line r_empty = re.compile(r'^\s+$') # Match a header line, that is a line which starts by @ + a word r_headerline = re.compile(r'^@\S*') r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]') r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)') r_attribute = re.compile(r'^@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)') # To get attributes name enclosed with '' r_comattrval = re.compile(r"'(..+)'\s+(..+$)") # To get normal attributes r_wcomattrval = re.compile(r"(\S+)\s+(..+$)") #------------------------- # Module defined exception #------------------------- class ArffError(IOError): pass class ParseArffError(ArffError): pass #------------------ # Various utilities #------------------ # An attribute is defined as @attribute name value def parse_type(attrtype): """Given an arff attribute value (meta data), returns its type. Expect the value to be a name.""" uattribute = attrtype.lower().strip() if uattribute[0] == '{': return 'nominal' elif uattribute[:len('real')] == 'real': return 'numeric' elif uattribute[:len('integer')] == 'integer': return 'numeric' elif uattribute[:len('numeric')] == 'numeric': return 'numeric' elif uattribute[:len('string')] == 'string': return 'string' elif uattribute[:len('relational')] == 'relational': return 'relational' elif uattribute[:len('date')] == 'date': return 'date' else: raise ParseArffError("unknown attribute %s" % uattribute) def get_nominal(attribute): """If attribute is nominal, returns a list of the values""" return attribute.split(',') def read_data_list(ofile): """Read each line of the iterable and put it in a list.""" data = [next(ofile)] if data[0].strip()[0] == '{': raise ValueError("This looks like a sparse ARFF: not supported yet") data.extend([i for i in ofile]) return data def get_ndata(ofile): """Read the whole file to get number of data attributes.""" data = [next(ofile)] loc = 1 if data[0].strip()[0] == '{': raise ValueError("This looks like a sparse ARFF: not supported yet") for i in ofile: loc += 1 return loc def maxnomlen(atrv): """Given a string containing a nominal type definition, returns the string len of the biggest component. A nominal type is defined as seomthing framed between brace ({}). Parameters ---------- atrv : str Nominal type definition Returns ------- slen : int length of longest component Examples -------- maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of ratata, the longest nominal value). >>> maxnomlen("{floup, bouga, fl, ratata}") 6 """ nomtp = get_nom_val(atrv) return max(len(i) for i in nomtp) def get_nom_val(atrv): """Given a string containing a nominal type, returns a tuple of the possible values. A nominal type is defined as something framed between braces ({}). Parameters ---------- atrv : str Nominal type definition Returns ------- poss_vals : tuple possible values Examples -------- >>> get_nom_val("{floup, bouga, fl, ratata}") ('floup', 'bouga', 'fl', 'ratata') """ r_nominal = re.compile('{(.+)}') m = r_nominal.match(atrv) if m: return tuple(i.strip() for i in m.group(1).split(',')) else: raise ValueError("This does not look like a nominal string") def get_date_format(atrv): r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$") m = r_date.match(atrv) if m: pattern = m.group(1).strip() # convert time pattern from Java's SimpleDateFormat to C's format datetime_unit = None if "yyyy" in pattern: pattern = pattern.replace("yyyy", "%Y") datetime_unit = "Y" elif "yy": pattern = pattern.replace("yy", "%y") datetime_unit = "Y" if "MM" in pattern: pattern = pattern.replace("MM", "%m") datetime_unit = "M" if "dd" in pattern: pattern = pattern.replace("dd", "%d") datetime_unit = "D" if "HH" in pattern: pattern = pattern.replace("HH", "%H") datetime_unit = "h" if "mm" in pattern: pattern = pattern.replace("mm", "%M") datetime_unit = "m" if "ss" in pattern: pattern = pattern.replace("ss", "%S") datetime_unit = "s" if "z" in pattern or "Z" in pattern: raise ValueError("Date type attributes with time zone not " "supported, yet") if datetime_unit is None: raise ValueError("Invalid or unsupported date format") return pattern, datetime_unit else: raise ValueError("Invalid or no date format") def go_data(ofile): """Skip header. the first next() call of the returned iterator will be the @data line""" return itertools.dropwhile(lambda x: not r_datameta.match(x), ofile) #---------------- # Parsing header #---------------- def tokenize_attribute(iterable, attribute): """Parse a raw string in header (eg starts by @attribute). Given a raw string attribute, try to get the name and type of the attribute. Constraints: * The first line must start with @attribute (case insensitive, and space like characters before @attribute are allowed) * Works also if the attribute is spread on multilines. * Works if empty lines or comments are in between Parameters ---------- attribute : str the attribute string. Returns ------- name : str name of the attribute value : str value of the attribute next : str next line to be parsed Examples -------- If attribute is a string defined in python as r"floupi real", will return floupi as name, and real as value. >>> iterable = iter([0] * 10) # dummy iterator >>> tokenize_attribute(iterable, r"@attribute floupi real") ('floupi', 'real', 0) If attribute is r"'floupi 2' real", will return 'floupi 2' as name, and real as value. >>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ") ('floupi 2', 'real', 0) """ sattr = attribute.strip() mattr = r_attribute.match(sattr) if mattr: # atrv is everything after @attribute atrv = mattr.group(1) if r_comattrval.match(atrv): name, type = tokenize_single_comma(atrv) next_item = next(iterable) elif r_wcomattrval.match(atrv): name, type = tokenize_single_wcomma(atrv) next_item = next(iterable) else: # Not sure we should support this, as it does not seem supported by # weka. raise ValueError("multi line not supported yet") #name, type, next_item = tokenize_multilines(iterable, atrv) else: raise ValueError("First line unparsable: %s" % sattr) if type == 'relational': raise ValueError("relational attributes not supported yet") return name, type, next_item def tokenize_single_comma(val): # XXX we match twice the same string (here and at the caller level). It is # stupid, but it is easier for now... m = r_comattrval.match(val) if m: try: name = m.group(1).strip() type = m.group(2).strip() except IndexError: raise ValueError("Error while tokenizing attribute") else: raise ValueError("Error while tokenizing single %s" % val) return name, type def tokenize_single_wcomma(val): # XXX we match twice the same string (here and at the caller level). It is # stupid, but it is easier for now... m = r_wcomattrval.match(val) if m: try: name = m.group(1).strip() type = m.group(2).strip() except IndexError: raise ValueError("Error while tokenizing attribute") else: raise ValueError("Error while tokenizing single %s" % val) return name, type def read_header(ofile): """Read the header of the iterable ofile.""" i = next(ofile) # Pass first comments while r_comment.match(i): i = next(ofile) # Header is everything up to DATA attribute ? relation = None attributes = [] while not r_datameta.match(i): m = r_headerline.match(i) if m: isattr = r_attribute.match(i) if isattr: name, type, i = tokenize_attribute(ofile, i) attributes.append((name, type)) else: isrel = r_relation.match(i) if isrel: relation = isrel.group(1) else: raise ValueError("Error parsing line %s" % i) i = next(ofile) else: i = next(ofile) return relation, attributes #-------------------- # Parsing actual data #-------------------- def safe_float(x): """given a string x, convert it to a float. If the stripped string is a ?, return a Nan (missing value). Parameters ---------- x : str string to convert Returns ------- f : float where float can be nan Examples -------- >>> safe_float('1') 1.0 >>> safe_float('1\\n') 1.0 >>> safe_float('?\\n') nan """ if '?' in x: return np.nan else: return float(x) def safe_nominal(value, pvalue): svalue = value.strip() if svalue in pvalue: return svalue elif svalue == '?': return svalue else: raise ValueError("%s value not in %s" % (str(svalue), str(pvalue))) def safe_date(value, date_format, datetime_unit): date_str = value.strip().strip("'").strip('"') if date_str == '?': return np.datetime64('NaT', datetime_unit) else: dt = datetime.datetime.strptime(date_str, date_format) return np.datetime64(dt).astype("datetime64[%s]" % datetime_unit) class MetaData(object): """Small container to keep useful information on a ARFF dataset. Knows about attributes names and types. Examples -------- :: data, meta = loadarff('iris.arff') # This will print the attributes names of the iris.arff dataset for i in meta: print(i) # This works too meta.names() # Getting attribute type types = meta.types() Notes ----- Also maintains the list of attributes in order, i.e. doing for i in meta, where meta is an instance of MetaData, will return the different attribute names in the order they were defined. """ def __init__(self, rel, attr): self.name = rel # We need the dictionary to be ordered # XXX: may be better to implement an ordered dictionary self._attributes = {} self._attrnames = [] for name, value in attr: tp = parse_type(value) self._attrnames.append(name) if tp == 'nominal': self._attributes[name] = (tp, get_nom_val(value)) elif tp == 'date': self._attributes[name] = (tp, get_date_format(value)[0]) else: self._attributes[name] = (tp, None) def __repr__(self): msg = "" msg += "Dataset: %s\n" % self.name for i in self._attrnames: msg += "\t%s's type is %s" % (i, self._attributes[i][0]) if self._attributes[i][1]: msg += ", range is %s" % str(self._attributes[i][1]) msg += '\n' return msg def __iter__(self): return iter(self._attrnames) def __getitem__(self, key): return self._attributes[key] def names(self): """Return the list of attribute names.""" return self._attrnames def types(self): """Return the list of attribute types.""" attr_types = [self._attributes[name][0] for name in self._attrnames] return attr_types def loadarff(f): """ Read an arff file. The data is returned as a record array, which can be accessed much like a dictionary of numpy arrays. For example, if one of the attributes is called 'pressure', then its first 10 data points can be accessed from the ``data`` record array like so: ``data['pressure'][0:10]`` Parameters ---------- f : file-like or str File-like object to read from, or filename to open. Returns ------- data : record array The data of the arff file, accessible by attribute names. meta : `MetaData` Contains information about the arff file such as name and type of attributes, the relation (name of the dataset), etc... Raises ------ ParseArffError This is raised if the given file is not ARFF-formatted. NotImplementedError The ARFF file has an attribute which is not supported yet. Notes ----- This function should be able to read most arff files. Not implemented functionality include: * date type attributes * string type attributes It can read files with numeric and nominal attributes. It cannot read files with sparse data ({} in the file). However, this function can read files with missing data (? in the file), representing the data points as NaNs. Examples -------- >>> from scipy.io import arff >>> from io import StringIO >>> content = \"\"\" ... @relation foo ... @attribute width numeric ... @attribute height numeric ... @attribute color {red,green,blue,yellow,black} ... @data ... 5.0,3.25,blue ... 4.5,3.75,green ... 3.0,4.00,red ... \"\"\" >>> f = StringIO(content) >>> data, meta = arff.loadarff(f) >>> data array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')], dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')]) >>> meta Dataset: foo \twidth's type is numeric \theight's type is numeric \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black') """ if hasattr(f, 'read'): ofile = f else: ofile = open(f, 'rt') try: return _loadarff(ofile) finally: if ofile is not f: # only close what we opened ofile.close() def _loadarff(ofile): # Parse the header file try: rel, attr = read_header(ofile) except ValueError as e: msg = "Error while parsing header, error was: " + str(e) raise ParseArffError(msg) # Check whether we have a string attribute (not supported yet) hasstr = False for name, value in attr: type = parse_type(value) if type == 'string': hasstr = True meta = MetaData(rel, attr) # XXX The following code is not great # Build the type descriptor descr and the list of convertors to convert # each attribute to the suitable type (which should match the one in # descr). # This can be used once we want to support integer as integer values and # not as numeric anymore (using masked arrays ?). acls2dtype = {'real': float, 'integer': float, 'numeric': float} acls2conv = {'real': safe_float, 'integer': safe_float, 'numeric': safe_float} descr = [] convertors = [] if not hasstr: for name, value in attr: type = parse_type(value) if type == 'date': date_format, datetime_unit = get_date_format(value) descr.append((name, "datetime64[%s]" % datetime_unit)) convertors.append(partial(safe_date, date_format=date_format, datetime_unit=datetime_unit)) elif type == 'nominal': n = maxnomlen(value) descr.append((name, 'S%d' % n)) pvalue = get_nom_val(value) convertors.append(partial(safe_nominal, pvalue=pvalue)) else: descr.append((name, acls2dtype[type])) convertors.append(safe_float) #dc.append(acls2conv[type]) #sdescr.append((name, acls2sdtype[type])) else: # How to support string efficiently ? Ideally, we should know the max # size of the string before allocating the numpy array. raise NotImplementedError("String attributes not supported yet, sorry") ni = len(convertors) def generator(row_iter, delim=','): # TODO: this is where we are spending times (~80%). I think things # could be made more efficiently: # - We could for example "compile" the function, because some values # do not change here. # - The function to convert a line to dtyped values could also be # generated on the fly from a string and be executed instead of # looping. # - The regex are overkill: for comments, checking that a line starts # by % should be enough and faster, and for empty lines, same thing # --> this does not seem to change anything. # 'compiling' the range since it does not change # Note, I have already tried zipping the converters and # row elements and got slightly worse performance. elems = list(range(ni)) for raw in row_iter: # We do not abstract skipping comments and empty lines for # performance reasons. if r_comment.match(raw) or r_empty.match(raw): continue row = raw.split(delim) yield tuple([convertors[i](row[i]) for i in elems]) a = generator(ofile) # No error should happen here: it is a bug otherwise data = np.fromiter(a, descr) return data, meta #----- # Misc #----- def basic_stats(data): nbfac = data.size * 1. / (data.size - 1) return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac def print_attribute(name, tp, data): type = tp[0] if type == 'numeric' or type == 'real' or type == 'integer': min, max, mean, std = basic_stats(data) print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std)) else: msg = name + ",{" for i in range(len(tp[1])-1): msg += tp[1][i] + "," msg += tp[1][-1] msg += "}" print(msg) def test_weka(filename): data, meta = loadarff(filename) print(len(data.dtype)) print(data.size) for i in meta: print_attribute(i, meta[i], data[i]) # make sure nose does not find this as a test test_weka.__test__ = False if __name__ == '__main__': import sys filename = sys.argv[1] test_weka(filename)
19,810
28.52459
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/arff/setup.py
from __future__ import division, print_function, absolute_import def configuration(parent_package='io',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('arff', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
410
28.357143
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/arff/__init__.py
""" Module to read ARFF files, which are the standard data format for WEKA. ARFF is a text file format which support numerical, string and data values. The format can also represent missing data and sparse data. Notes ----- The ARFF support in ``scipy.io`` provides file reading functionality only. For more extensive ARFF functionality, see `liac-arff <https://github.com/renatopp/liac-arff>`_. See the `WEKA website <http://weka.wikispaces.com/ARFF>`_ for more details about the ARFF format and available datasets. """ from __future__ import division, print_function, absolute_import from .arffread import * from . import arffread __all__ = arffread.__all__ from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
761
27.222222
75
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/arff/tests/test_arffread.py
from __future__ import division, print_function, absolute_import import datetime import os import sys from os.path import join as pjoin if sys.version_info[0] >= 3: from io import StringIO else: from cStringIO import StringIO import numpy as np from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal, assert_) import pytest from pytest import raises as assert_raises from scipy.io.arff.arffread import loadarff from scipy.io.arff.arffread import read_header, parse_type, ParseArffError data_path = pjoin(os.path.dirname(__file__), 'data') test1 = pjoin(data_path, 'test1.arff') test2 = pjoin(data_path, 'test2.arff') test3 = pjoin(data_path, 'test3.arff') test4 = pjoin(data_path, 'test4.arff') test5 = pjoin(data_path, 'test5.arff') test6 = pjoin(data_path, 'test6.arff') test7 = pjoin(data_path, 'test7.arff') test8 = pjoin(data_path, 'test8.arff') expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'), (-0.1, -0.2, -0.3, -0.4, 'class2'), (1, 2, 3, 4, 'class3')] expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal'] missing = pjoin(data_path, 'missing.arff') expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) expect_missing = np.empty(3, [('yop', float), ('yap', float)]) expect_missing['yop'] = expect_missing_raw[:, 0] expect_missing['yap'] = expect_missing_raw[:, 1] class TestData(object): def test1(self): # Parsing trivial file with nothing. self._test(test4) def test2(self): # Parsing trivial file with some comments in the data section. self._test(test5) def test3(self): # Parsing trivial file with nominal attribute of 1 character. self._test(test6) def _test(self, test_file): data, meta = loadarff(test_file) for i in range(len(data)): for j in range(4): assert_array_almost_equal(expect4_data[i][j], data[i][j]) assert_equal(meta.types(), expected_types) def test_filelike(self): # Test reading from file-like object (StringIO) f1 = open(test1) data1, meta1 = loadarff(f1) f1.close() f2 = open(test1) data2, meta2 = loadarff(StringIO(f2.read())) f2.close() assert_(data1 == data2) assert_(repr(meta1) == repr(meta2)) @pytest.mark.skipif(sys.version_info < (3, 6), reason='Passing path-like objects to IO functions requires Python >= 3.6') def test_path(self): # Test reading from `pathlib.Path` object from pathlib import Path with open(test1) as f1: data1, meta1 = loadarff(f1) data2, meta2 = loadarff(Path(test1)) assert_(data1 == data2) assert_(repr(meta1) == repr(meta2)) class TestMissingData(object): def test_missing(self): data, meta = loadarff(missing) for i in ['yop', 'yap']: assert_array_almost_equal(data[i], expect_missing[i]) class TestNoData(object): def test_nodata(self): # The file nodata.arff has no data in the @DATA section. # Reading it should result in an array with length 0. nodata_filename = os.path.join(data_path, 'nodata.arff') data, meta = loadarff(nodata_filename) expected_dtype = np.dtype([('sepallength', '<f8'), ('sepalwidth', '<f8'), ('petallength', '<f8'), ('petalwidth', '<f8'), ('class', 'S15')]) assert_equal(data.dtype, expected_dtype) assert_equal(data.size, 0) class TestHeader(object): def test_type_parsing(self): # Test parsing type of attribute from their value. ofile = open(test2) rel, attrs = read_header(ofile) ofile.close() expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'string', 'string', 'nominal', 'nominal'] for i in range(len(attrs)): assert_(parse_type(attrs[i][1]) == expected[i]) def test_badtype_parsing(self): # Test parsing wrong type of attribute from their value. ofile = open(test3) rel, attrs = read_header(ofile) ofile.close() for name, value in attrs: assert_raises(ParseArffError, parse_type, value) def test_fullheader1(self): # Parsing trivial header with nothing. ofile = open(test1) rel, attrs = read_header(ofile) ofile.close() # Test relation assert_(rel == 'test1') # Test numerical attributes assert_(len(attrs) == 5) for i in range(4): assert_(attrs[i][0] == 'attr%d' % i) assert_(attrs[i][1] == 'REAL') # Test nominal attribute assert_(attrs[4][0] == 'class') assert_(attrs[4][1] == '{class0, class1, class2, class3}') def test_dateheader(self): ofile = open(test7) rel, attrs = read_header(ofile) ofile.close() assert_(rel == 'test7') assert_(len(attrs) == 5) assert_(attrs[0][0] == 'attr_year') assert_(attrs[0][1] == 'DATE yyyy') assert_(attrs[1][0] == 'attr_month') assert_(attrs[1][1] == 'DATE yyyy-MM') assert_(attrs[2][0] == 'attr_date') assert_(attrs[2][1] == 'DATE yyyy-MM-dd') assert_(attrs[3][0] == 'attr_datetime_local') assert_(attrs[3][1] == 'DATE "yyyy-MM-dd HH:mm"') assert_(attrs[4][0] == 'attr_datetime_missing') assert_(attrs[4][1] == 'DATE "yyyy-MM-dd HH:mm"') def test_dateheader_unsupported(self): ofile = open(test8) rel, attrs = read_header(ofile) ofile.close() assert_(rel == 'test8') assert_(len(attrs) == 2) assert_(attrs[0][0] == 'attr_datetime_utc') assert_(attrs[0][1] == 'DATE "yyyy-MM-dd HH:mm Z"') assert_(attrs[1][0] == 'attr_datetime_full') assert_(attrs[1][1] == 'DATE "yy-MM-dd HH:mm:ss z"') class TestDateAttribute(object): def setup_method(self): self.data, self.meta = loadarff(test7) def test_year_attribute(self): expected = np.array([ '1999', '2004', '1817', '2100', '2013', '1631' ], dtype='datetime64[Y]') assert_array_equal(self.data["attr_year"], expected) def test_month_attribute(self): expected = np.array([ '1999-01', '2004-12', '1817-04', '2100-09', '2013-11', '1631-10' ], dtype='datetime64[M]') assert_array_equal(self.data["attr_month"], expected) def test_date_attribute(self): expected = np.array([ '1999-01-31', '2004-12-01', '1817-04-28', '2100-09-10', '2013-11-30', '1631-10-15' ], dtype='datetime64[D]') assert_array_equal(self.data["attr_date"], expected) def test_datetime_local_attribute(self): expected = np.array([ datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1), datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59), datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0), datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0), datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55), datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4) ], dtype='datetime64[m]') assert_array_equal(self.data["attr_datetime_local"], expected) def test_datetime_missing(self): expected = np.array([ 'nat', '2004-12-01T23:59', 'nat', 'nat', '2013-11-30T04:55', '1631-10-15T20:04' ], dtype='datetime64[m]') assert_array_equal(self.data["attr_datetime_missing"], expected) def test_datetime_timezone(self): assert_raises(ValueError, loadarff, test8)
8,190
30.503846
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/arff/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/tests/test_paths.py
""" Ensure that we can use pathlib.Path objects in all relevant IO functions. """ import sys try: from pathlib import Path except ImportError: # Not available. No fallback import, since we'll skip the entire # test suite for Python < 3.6. pass import numpy as np from numpy.testing import assert_ import pytest import scipy.io import scipy.io.wavfile from scipy._lib._tmpdirs import tempdir import scipy.sparse @pytest.mark.skipif(sys.version_info < (3, 6), reason='Passing path-like objects to IO functions requires Python >= 3.6') class TestPaths(object): data = np.arange(5).astype(np.int64) def test_savemat(self): with tempdir() as temp_dir: path = Path(temp_dir) / 'data.mat' scipy.io.savemat(path, {'data': self.data}) assert_(path.is_file()) def test_loadmat(self): # Save data with string path, load with pathlib.Path with tempdir() as temp_dir: path = Path(temp_dir) / 'data.mat' scipy.io.savemat(str(path), {'data': self.data}) mat_contents = scipy.io.loadmat(path) assert_((mat_contents['data'] == self.data).all()) def test_whosmat(self): # Save data with string path, load with pathlib.Path with tempdir() as temp_dir: path = Path(temp_dir) / 'data.mat' scipy.io.savemat(str(path), {'data': self.data}) contents = scipy.io.whosmat(path) assert_(contents[0] == ('data', (1, 5), 'int64')) def test_readsav(self): path = Path(__file__).parent / 'data/scalar_string.sav' scipy.io.readsav(path) def test_hb_read(self): # Save data with string path, load with pathlib.Path with tempdir() as temp_dir: data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) path = Path(temp_dir) / 'data.hb' scipy.io.harwell_boeing.hb_write(str(path), data) data_new = scipy.io.harwell_boeing.hb_read(path) assert_((data_new != data).nnz == 0) def test_hb_write(self): with tempdir() as temp_dir: data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) path = Path(temp_dir) / 'data.hb' scipy.io.harwell_boeing.hb_write(path, data) assert_(path.is_file()) def test_netcdf_file(self): path = Path(__file__).parent / 'data/example_1.nc' scipy.io.netcdf.netcdf_file(path) def test_wavfile_read(self): path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' scipy.io.wavfile.read(path) def test_wavfile_write(self): # Read from str path, write to Path input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' rate, data = scipy.io.wavfile.read(str(input_path)) with tempdir() as temp_dir: output_path = Path(temp_dir) / input_path.name scipy.io.wavfile.write(output_path, rate, data)
2,994
32.651685
94
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/tests/test_netcdf.py
''' Tests for netcdf ''' from __future__ import division, print_function, absolute_import import os from os.path import join as pjoin, dirname import shutil import tempfile import warnings from io import BytesIO from glob import glob from contextlib import contextmanager import numpy as np from numpy.testing import assert_, assert_allclose, assert_equal from pytest import raises as assert_raises from scipy.io.netcdf import netcdf_file, IS_PYPY from scipy._lib._numpy_compat import suppress_warnings from scipy._lib._tmpdirs import in_tempdir TEST_DATA_PATH = pjoin(dirname(__file__), 'data') N_EG_ELS = 11 # number of elements for example variable VARTYPE_EG = 'b' # var type for example variable @contextmanager def make_simple(*args, **kwargs): f = netcdf_file(*args, **kwargs) f.history = 'Created for a test' f.createDimension('time', N_EG_ELS) time = f.createVariable('time', VARTYPE_EG, ('time',)) time[:] = np.arange(N_EG_ELS) time.units = 'days since 2008-01-01' f.flush() yield f f.close() def check_simple(ncfileobj): '''Example fileobj tests ''' assert_equal(ncfileobj.history, b'Created for a test') time = ncfileobj.variables['time'] assert_equal(time.units, b'days since 2008-01-01') assert_equal(time.shape, (N_EG_ELS,)) assert_equal(time[-1], N_EG_ELS-1) def assert_mask_matches(arr, expected_mask): ''' Asserts that the mask of arr is effectively the same as expected_mask. In contrast to numpy.ma.testutils.assert_mask_equal, this function allows testing the 'mask' of a standard numpy array (the mask in this case is treated as all False). Parameters ---------- arr: ndarray or MaskedArray Array to test. expected_mask: array_like of booleans A list giving the expected mask. ''' mask = np.ma.getmaskarray(arr) assert_equal(mask, expected_mask) def test_read_write_files(): # test round trip for example file cwd = os.getcwd() try: tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) with make_simple('simple.nc', 'w') as f: pass # read the file we just created in 'a' mode with netcdf_file('simple.nc', 'a') as f: check_simple(f) # add something f._attributes['appendRan'] = 1 # To read the NetCDF file we just created:: with netcdf_file('simple.nc') as f: # Using mmap is the default (but not on pypy) assert_equal(f.use_mmap, not IS_PYPY) check_simple(f) assert_equal(f._attributes['appendRan'], 1) # Read it in append (and check mmap is off) with netcdf_file('simple.nc', 'a') as f: assert_(not f.use_mmap) check_simple(f) assert_equal(f._attributes['appendRan'], 1) # Now without mmap with netcdf_file('simple.nc', mmap=False) as f: # Using mmap is the default assert_(not f.use_mmap) check_simple(f) # To read the NetCDF file we just created, as file object, no # mmap. When n * n_bytes(var_type) is not divisible by 4, this # raised an error in pupynere 1.0.12 and scipy rev 5893, because # calculated vsize was rounding up in units of 4 - see # https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html with open('simple.nc', 'rb') as fobj: with netcdf_file(fobj) as f: # by default, don't use mmap for file-like assert_(not f.use_mmap) check_simple(f) # Read file from fileobj, with mmap with suppress_warnings() as sup: if IS_PYPY: sup.filter(RuntimeWarning, "Cannot close a netcdf_file opened with mmap=True.*") with open('simple.nc', 'rb') as fobj: with netcdf_file(fobj, mmap=True) as f: assert_(f.use_mmap) check_simple(f) # Again read it in append mode (adding another att) with open('simple.nc', 'r+b') as fobj: with netcdf_file(fobj, 'a') as f: assert_(not f.use_mmap) check_simple(f) f.createDimension('app_dim', 1) var = f.createVariable('app_var', 'i', ('app_dim',)) var[:] = 42 # And... check that app_var made it in... with netcdf_file('simple.nc') as f: check_simple(f) assert_equal(f.variables['app_var'][:], 42) except: os.chdir(cwd) shutil.rmtree(tmpdir) raise os.chdir(cwd) shutil.rmtree(tmpdir) def test_read_write_sio(): eg_sio1 = BytesIO() with make_simple(eg_sio1, 'w') as f1: str_val = eg_sio1.getvalue() eg_sio2 = BytesIO(str_val) with netcdf_file(eg_sio2) as f2: check_simple(f2) # Test that error is raised if attempting mmap for sio eg_sio3 = BytesIO(str_val) assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True) # Test 64-bit offset write / read eg_sio_64 = BytesIO() with make_simple(eg_sio_64, 'w', version=2) as f_64: str_val = eg_sio_64.getvalue() eg_sio_64 = BytesIO(str_val) with netcdf_file(eg_sio_64) as f_64: check_simple(f_64) assert_equal(f_64.version_byte, 2) # also when version 2 explicitly specified eg_sio_64 = BytesIO(str_val) with netcdf_file(eg_sio_64, version=2) as f_64: check_simple(f_64) assert_equal(f_64.version_byte, 2) def test_bytes(): raw_file = BytesIO() f = netcdf_file(raw_file, mode='w') # Dataset only has a single variable, dimension and attribute to avoid # any ambiguity related to order. f.a = 'b' f.createDimension('dim', 1) var = f.createVariable('var', np.int16, ('dim',)) var[0] = -9999 var.c = 'd' f.sync() actual = raw_file.getvalue() expected = (b'CDF\x01' b'\x00\x00\x00\x00' b'\x00\x00\x00\x0a' b'\x00\x00\x00\x01' b'\x00\x00\x00\x03' b'dim\x00' b'\x00\x00\x00\x01' b'\x00\x00\x00\x0c' b'\x00\x00\x00\x01' b'\x00\x00\x00\x01' b'a\x00\x00\x00' b'\x00\x00\x00\x02' b'\x00\x00\x00\x01' b'b\x00\x00\x00' b'\x00\x00\x00\x0b' b'\x00\x00\x00\x01' b'\x00\x00\x00\x03' b'var\x00' b'\x00\x00\x00\x01' b'\x00\x00\x00\x00' b'\x00\x00\x00\x0c' b'\x00\x00\x00\x01' b'\x00\x00\x00\x01' b'c\x00\x00\x00' b'\x00\x00\x00\x02' b'\x00\x00\x00\x01' b'd\x00\x00\x00' b'\x00\x00\x00\x03' b'\x00\x00\x00\x04' b'\x00\x00\x00\x78' b'\xd8\xf1\x80\x01') assert_equal(actual, expected) def test_encoded_fill_value(): with netcdf_file(BytesIO(), mode='w') as f: f.createDimension('x', 1) var = f.createVariable('var', 'S1', ('x',)) assert_equal(var._get_encoded_fill_value(), b'\x00') var._FillValue = b'\x01' assert_equal(var._get_encoded_fill_value(), b'\x01') var._FillValue = b'\x00\x00' # invalid, wrong size assert_equal(var._get_encoded_fill_value(), b'\x00') def test_read_example_data(): # read any example data files for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')): with netcdf_file(fname, 'r') as f: pass with netcdf_file(fname, 'r', mmap=False) as f: pass def test_itemset_no_segfault_on_readonly(): # Regression test for ticket #1202. # Open the test file in read-only mode. filename = pjoin(TEST_DATA_PATH, 'example_1.nc') with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist") with netcdf_file(filename, 'r', mmap=True) as f: time_var = f.variables['time'] # time_var.assignValue(42) should raise a RuntimeError--not seg. fault! assert_raises(RuntimeError, time_var.assignValue, 42) def test_appending_issue_gh_8625(): stream = BytesIO() with make_simple(stream, mode='w') as f: f.createDimension('x', 2) f.createVariable('x', float, ('x',)) f.variables['x'][...] = 1 f.flush() contents = stream.getvalue() stream = BytesIO(contents) with netcdf_file(stream, mode='a') as f: f.variables['x'][...] = 2 def test_write_invalid_dtype(): dtypes = ['int64', 'uint64'] if np.dtype('int').itemsize == 8: # 64-bit machines dtypes.append('int') if np.dtype('uint').itemsize == 8: # 64-bit machines dtypes.append('uint') with netcdf_file(BytesIO(), 'w') as f: f.createDimension('time', N_EG_ELS) for dt in dtypes: assert_raises(ValueError, f.createVariable, 'time', dt, ('time',)) def test_flush_rewind(): stream = BytesIO() with make_simple(stream, mode='w') as f: x = f.createDimension('x',4) v = f.createVariable('v', 'i2', ['x']) v[:] = 1 f.flush() len_single = len(stream.getvalue()) f.flush() len_double = len(stream.getvalue()) assert_(len_single == len_double) def test_dtype_specifiers(): # Numpy 1.7.0-dev had a bug where 'i2' wouldn't work. # Specifying np.int16 or similar only works from the same commit as this # comment was made. with make_simple(BytesIO(), mode='w') as f: f.createDimension('x',4) f.createVariable('v1', 'i2', ['x']) f.createVariable('v2', np.int16, ['x']) f.createVariable('v3', np.dtype(np.int16), ['x']) def test_ticket_1720(): io = BytesIO() items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] with netcdf_file(io, 'w') as f: f.history = 'Created for a test' f.createDimension('float_var', 10) float_var = f.createVariable('float_var', 'f', ('float_var',)) float_var[:] = items float_var.units = 'metres' f.flush() contents = io.getvalue() io = BytesIO(contents) with netcdf_file(io, 'r') as f: assert_equal(f.history, b'Created for a test') float_var = f.variables['float_var'] assert_equal(float_var.units, b'metres') assert_equal(float_var.shape, (10,)) assert_allclose(float_var[:], items) def test_mmaps_segfault(): filename = pjoin(TEST_DATA_PATH, 'example_1.nc') if not IS_PYPY: with warnings.catch_warnings(): warnings.simplefilter("error") with netcdf_file(filename, mmap=True) as f: x = f.variables['lat'][:] # should not raise warnings del x def doit(): with netcdf_file(filename, mmap=True) as f: return f.variables['lat'][:] # should not crash with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist") x = doit() x.sum() def test_zero_dimensional_var(): io = BytesIO() with make_simple(io, 'w') as f: v = f.createVariable('zerodim', 'i2', []) # This is checking that .isrec returns a boolean - don't simplify it # to 'assert not ...' assert v.isrec is False, v.isrec f.flush() def test_byte_gatts(): # Check that global "string" atts work like they did before py3k # unicode and general bytes confusion with in_tempdir(): filename = 'g_byte_atts.nc' f = netcdf_file(filename, 'w') f._attributes['holy'] = b'grail' f._attributes['witch'] = 'floats' f.close() f = netcdf_file(filename, 'r') assert_equal(f._attributes['holy'], b'grail') assert_equal(f._attributes['witch'], b'floats') f.close() def test_open_append(): # open 'w' put one attr with in_tempdir(): filename = 'append_dat.nc' f = netcdf_file(filename, 'w') f._attributes['Kilroy'] = 'was here' f.close() # open again in 'a', read the att and and a new one f = netcdf_file(filename, 'a') assert_equal(f._attributes['Kilroy'], b'was here') f._attributes['naughty'] = b'Zoot' f.close() # open yet again in 'r' and check both atts f = netcdf_file(filename, 'r') assert_equal(f._attributes['Kilroy'], b'was here') assert_equal(f._attributes['naughty'], b'Zoot') f.close() def test_append_recordDimension(): dataSize = 100 with in_tempdir(): # Create file with record time dimension with netcdf_file('withRecordDimension.nc', 'w') as f: f.createDimension('time', None) f.createVariable('time', 'd', ('time',)) f.createDimension('x', dataSize) x = f.createVariable('x', 'd', ('x',)) x[:] = np.array(range(dataSize)) f.createDimension('y', dataSize) y = f.createVariable('y', 'd', ('y',)) y[:] = np.array(range(dataSize)) f.createVariable('testData', 'i', ('time', 'x', 'y')) f.flush() f.close() for i in range(2): # Open the file in append mode and add data with netcdf_file('withRecordDimension.nc', 'a') as f: f.variables['time'].data = np.append(f.variables["time"].data, i) f.variables['testData'][i, :, :] = np.ones((dataSize, dataSize))*i f.flush() # Read the file and check that append worked with netcdf_file('withRecordDimension.nc') as f: assert_equal(f.variables['time'][-1], i) assert_equal(f.variables['testData'][-1, :, :].copy(), np.ones((dataSize, dataSize))*i) assert_equal(f.variables['time'].data.shape[0], i+1) assert_equal(f.variables['testData'].data.shape[0], i+1) # Read the file and check that 'data' was not saved as user defined # attribute of testData variable during append operation with netcdf_file('withRecordDimension.nc') as f: with assert_raises(KeyError) as ar: f.variables['testData']._attributes['data'] ex = ar.value assert_equal(ex.args[0], 'data') def test_maskandscale(): t = np.linspace(20, 30, 15) t[3] = 100 tm = np.ma.masked_greater(t, 99) fname = pjoin(TEST_DATA_PATH, 'example_2.nc') with netcdf_file(fname, maskandscale=True) as f: Temp = f.variables['Temperature'] assert_equal(Temp.missing_value, 9999) assert_equal(Temp.add_offset, 20) assert_equal(Temp.scale_factor, np.float32(0.01)) found = Temp[:].compressed() del Temp # Remove ref to mmap, so file can be closed. expected = np.round(tm.compressed(), 2) assert_allclose(found, expected) with in_tempdir(): newfname = 'ms.nc' f = netcdf_file(newfname, 'w', maskandscale=True) f.createDimension('Temperature', len(tm)) temp = f.createVariable('Temperature', 'i', ('Temperature',)) temp.missing_value = 9999 temp.scale_factor = 0.01 temp.add_offset = 20 temp[:] = tm f.close() with netcdf_file(newfname, maskandscale=True) as f: Temp = f.variables['Temperature'] assert_equal(Temp.missing_value, 9999) assert_equal(Temp.add_offset, 20) assert_equal(Temp.scale_factor, np.float32(0.01)) expected = np.round(tm.compressed(), 2) found = Temp[:].compressed() del Temp assert_allclose(found, expected) # ------------------------------------------------------------------------ # Test reading with masked values (_FillValue / missing_value) # ------------------------------------------------------------------------ def test_read_withValuesNearFillValue(): # Regression test for ticket #5626 fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') with netcdf_file(fname, maskandscale=True) as f: vardata = f.variables['var1_fillval0'][:] assert_mask_matches(vardata, [False, True, False]) def test_read_withNoFillValue(): # For a variable with no fill value, reading data with maskandscale=True # should return unmasked data fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') with netcdf_file(fname, maskandscale=True) as f: vardata = f.variables['var2_noFillval'][:] assert_mask_matches(vardata, [False, False, False]) assert_equal(vardata, [1,2,3]) def test_read_withFillValueAndMissingValue(): # For a variable with both _FillValue and missing_value, the _FillValue # should be used IRRELEVANT_VALUE = 9999 fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') with netcdf_file(fname, maskandscale=True) as f: vardata = f.variables['var3_fillvalAndMissingValue'][:] assert_mask_matches(vardata, [True, False, False]) assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3]) def test_read_withMissingValue(): # For a variable with missing_value but not _FillValue, the missing_value # should be used fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') with netcdf_file(fname, maskandscale=True) as f: vardata = f.variables['var4_missingValue'][:] assert_mask_matches(vardata, [False, True, False]) def test_read_withFillValNaN(): fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') with netcdf_file(fname, maskandscale=True) as f: vardata = f.variables['var5_fillvalNaN'][:] assert_mask_matches(vardata, [False, True, False]) def test_read_withChar(): fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') with netcdf_file(fname, maskandscale=True) as f: vardata = f.variables['var6_char'][:] assert_mask_matches(vardata, [False, True, False]) def test_read_with2dVar(): fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') with netcdf_file(fname, maskandscale=True) as f: vardata = f.variables['var7_2d'][:] assert_mask_matches(vardata, [[True, False], [False, False], [False, True]]) def test_read_withMaskAndScaleFalse(): # If a variable has a _FillValue (or missing_value) attribute, but is read # with maskandscale set to False, the result should be unmasked fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') # Open file with mmap=False to avoid problems with closing a mmap'ed file # when arrays referring to its data still exist: with netcdf_file(fname, maskandscale=False, mmap=False) as f: vardata = f.variables['var3_fillvalAndMissingValue'][:] assert_mask_matches(vardata, [False, False, False]) assert_equal(vardata, [1, 2, 3])
19,245
34.313761
137
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/tests/test_fortran.py
''' Tests for fortran sequential files ''' import tempfile import shutil from os import path, unlink from glob import iglob import re from numpy.testing import assert_equal, assert_allclose import numpy as np from scipy.io import FortranFile, _test_fortran DATA_PATH = path.join(path.dirname(__file__), 'data') def test_fortranfiles_read(): for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) if not m: raise RuntimeError("Couldn't match %s filename to regex" % filename) dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) dtype = m.group(1).replace('s', '<') f = FortranFile(filename, 'r', '<u4') data = f.read_record(dtype=dtype).reshape(dims, order='F') f.close() expected = np.arange(np.prod(dims)).reshape(dims).astype(dtype) assert_equal(data, expected) def test_fortranfiles_mixed_record(): filename = path.join(DATA_PATH, "fortran-mixed.dat") with FortranFile(filename, 'r', '<u4') as f: record = f.read_record('<i4,<f4,<i8,(2)<f8') assert_equal(record['f0'][0], 1) assert_allclose(record['f1'][0], 2.3) assert_equal(record['f2'][0], 4) assert_allclose(record['f3'][0], [5.6, 7.8]) def test_fortranfiles_write(): for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) if not m: raise RuntimeError("Couldn't match %s filename to regex" % filename) dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) dtype = m.group(1).replace('s', '<') data = np.arange(np.prod(dims)).reshape(dims).astype(dtype) tmpdir = tempfile.mkdtemp() try: testFile = path.join(tmpdir,path.basename(filename)) f = FortranFile(testFile, 'w','<u4') f.write_record(data.T) f.close() originalfile = open(filename, 'rb') newfile = open(testFile, 'rb') assert_equal(originalfile.read(), newfile.read(), err_msg=filename) originalfile.close() newfile.close() finally: shutil.rmtree(tmpdir) def test_fortranfile_read_mixed_record(): # The data file fortran-3x3d-2i.dat contains the program that # produced it at the end. # # double precision :: a(3,3) # integer :: b(2) # ... # open(1, file='fortran-3x3d-2i.dat', form='unformatted') # write(1) a, b # close(1) # filename = path.join(DATA_PATH, "fortran-3x3d-2i.dat") with FortranFile(filename, 'r', '<u4') as f: record = f.read_record('(3,3)f8', '2i4') ax = np.arange(3*3).reshape(3, 3).astype(np.double) bx = np.array([-1, -2], dtype=np.int32) assert_equal(record[0], ax.T) assert_equal(record[1], bx.T) def test_fortranfile_write_mixed_record(tmpdir): tf = path.join(str(tmpdir), 'test.dat') records = [ (('f4', 'f4', 'i4'), (np.float32(2), np.float32(3), np.int32(100))), (('4f4', '(3,3)f4', '8i4'), (np.random.randint(255, size=[4]).astype(np.float32), np.random.randint(255, size=[3, 3]).astype(np.float32), np.random.randint(255, size=[8]).astype(np.int32))) ] for dtype, a in records: with FortranFile(tf, 'w') as f: f.write_record(*a) with FortranFile(tf, 'r') as f: b = f.read_record(*dtype) assert_equal(len(a), len(b)) for aa, bb in zip(a, b): assert_equal(bb, aa) def test_fortran_roundtrip(tmpdir): filename = path.join(str(tmpdir), 'test.dat') np.random.seed(1) # double precision m, n, k = 5, 3, 2 a = np.random.randn(m, n, k) with FortranFile(filename, 'w') as f: f.write_record(a.T) a2 = _test_fortran.read_unformatted_double(m, n, k, filename) with FortranFile(filename, 'r') as f: a3 = f.read_record('(2,3,5)f8').T assert_equal(a2, a) assert_equal(a3, a) # integer m, n, k = 5, 3, 2 a = np.random.randn(m, n, k).astype(np.int32) with FortranFile(filename, 'w') as f: f.write_record(a.T) a2 = _test_fortran.read_unformatted_int(m, n, k, filename) with FortranFile(filename, 'r') as f: a3 = f.read_record('(2,3,5)i4').T assert_equal(a2, a) assert_equal(a3, a) # mixed m, n, k = 5, 3, 2 a = np.random.randn(m, n) b = np.random.randn(k).astype(np.intc) with FortranFile(filename, 'w') as f: f.write_record(a.T, b.T) a2, b2 = _test_fortran.read_unformatted_mixed(m, n, k, filename) with FortranFile(filename, 'r') as f: a3, b3 = f.read_record('(3,5)f8', '2i4') a3 = a3.T assert_equal(a2, a) assert_equal(a3, a) assert_equal(b2, b) assert_equal(b3, b)
4,975
30.1
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/tests/test_wavfile.py
from __future__ import division, print_function, absolute_import import os import sys import tempfile from io import BytesIO import numpy as np from numpy.testing import assert_equal, assert_, assert_array_equal from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings from scipy.io import wavfile def datafile(fn): return os.path.join(os.path.dirname(__file__), 'data', fn) def test_read_1(): for mmap in [False, True]: rate, data = wavfile.read(datafile('test-44100Hz-le-1ch-4bytes.wav'), mmap=mmap) assert_equal(rate, 44100) assert_(np.issubdtype(data.dtype, np.int32)) assert_equal(data.shape, (4410,)) del data def test_read_2(): for mmap in [False, True]: rate, data = wavfile.read(datafile('test-8000Hz-le-2ch-1byteu.wav'), mmap=mmap) assert_equal(rate, 8000) assert_(np.issubdtype(data.dtype, np.uint8)) assert_equal(data.shape, (800, 2)) del data def test_read_3(): for mmap in [False, True]: rate, data = wavfile.read(datafile('test-44100Hz-2ch-32bit-float-le.wav'), mmap=mmap) assert_equal(rate, 44100) assert_(np.issubdtype(data.dtype, np.float32)) assert_equal(data.shape, (441, 2)) del data def test_read_4(): for mmap in [False, True]: with suppress_warnings() as sup: sup.filter(wavfile.WavFileWarning, "Chunk .non-data. not understood, skipping it") rate, data = wavfile.read(datafile('test-48000Hz-2ch-64bit-float-le-wavex.wav'), mmap=mmap) assert_equal(rate, 48000) assert_(np.issubdtype(data.dtype, np.float64)) assert_equal(data.shape, (480, 2)) del data def test_read_5(): for mmap in [False, True]: rate, data = wavfile.read(datafile('test-44100Hz-2ch-32bit-float-be.wav'), mmap=mmap) assert_equal(rate, 44100) assert_(np.issubdtype(data.dtype, np.float32)) assert_(data.dtype.byteorder == '>' or (sys.byteorder == 'big' and data.dtype.byteorder == '=')) assert_equal(data.shape, (441, 2)) del data def test_read_fail(): for mmap in [False, True]: fp = open(datafile('example_1.nc'), 'rb') assert_raises(ValueError, wavfile.read, fp, mmap=mmap) fp.close() def test_read_early_eof(): for mmap in [False, True]: fp = open(datafile('test-44100Hz-le-1ch-4bytes-early-eof.wav'), 'rb') assert_raises(ValueError, wavfile.read, fp, mmap=mmap) fp.close() def test_read_incomplete_chunk(): for mmap in [False, True]: fp = open(datafile('test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav'), 'rb') assert_raises(ValueError, wavfile.read, fp, mmap=mmap) fp.close() def _check_roundtrip(realfile, rate, dtype, channels): if realfile: fd, tmpfile = tempfile.mkstemp(suffix='.wav') os.close(fd) else: tmpfile = BytesIO() try: data = np.random.rand(100, channels) if channels == 1: data = data[:,0] if dtype.kind == 'f': # The range of the float type should be in [-1, 1] data = data.astype(dtype) else: data = (data*128).astype(dtype) wavfile.write(tmpfile, rate, data) for mmap in [False, True]: rate2, data2 = wavfile.read(tmpfile, mmap=mmap) assert_equal(rate, rate2) assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype) assert_array_equal(data, data2) del data2 finally: if realfile: os.unlink(tmpfile) def test_write_roundtrip(): for realfile in (False, True): for dtypechar in ('i', 'u', 'f', 'g', 'q'): for size in (1, 2, 4, 8): if size == 1 and dtypechar == 'i': # signed 8-bit integer PCM is not allowed continue if size > 1 and dtypechar == 'u': # unsigned > 8-bit integer PCM is not allowed continue if (size == 1 or size == 2) and dtypechar == 'f': # 8- or 16-bit float PCM is not expected continue if dtypechar in 'gq': # no size allowed for these types if size == 1: size = '' else: continue for endianness in ('>', '<'): if size == 1 and endianness == '<': continue for rate in (8000, 32000): for channels in (1, 2, 5): dt = np.dtype('%s%s%s' % (endianness, dtypechar, size)) _check_roundtrip(realfile, rate, dt, channels)
5,111
30.95
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/tests/test_idl.py
from __future__ import division, print_function, absolute_import from os import path import warnings DATA_PATH = path.join(path.dirname(__file__), 'data') import numpy as np from numpy.testing import (assert_equal, assert_array_equal, assert_) from scipy._lib._numpy_compat import suppress_warnings from scipy.io.idl import readsav def object_array(*args): """Constructs a numpy array of objects""" array = np.empty(len(args), dtype=object) for i in range(len(args)): array[i] = args[i] return array def assert_identical(a, b): """Assert whether value AND type are the same""" assert_equal(a, b) if type(b) is str: assert_equal(type(a), type(b)) else: assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type) def assert_array_identical(a, b): """Assert whether values AND type are the same""" assert_array_equal(a, b) assert_equal(a.dtype.type, b.dtype.type) # Define vectorized ID function for pointer arrays vect_id = np.vectorize(id) class TestIdict: def test_idict(self): custom_dict = {'a': np.int16(999)} original_id = id(custom_dict) s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False) assert_equal(original_id, id(s)) assert_('a' in s) assert_identical(s['a'], np.int16(999)) assert_identical(s['i8u'], np.uint8(234)) class TestScalars: # Test that scalar values are read in with the correct value and type def test_byte(self): s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False) assert_identical(s.i8u, np.uint8(234)) def test_int16(self): s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False) assert_identical(s.i16s, np.int16(-23456)) def test_int32(self): s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False) assert_identical(s.i32s, np.int32(-1234567890)) def test_float32(self): s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False) assert_identical(s.f32, np.float32(-3.1234567e+37)) def test_float64(self): s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False) assert_identical(s.f64, np.float64(-1.1976931348623157e+307)) def test_complex32(self): s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False) assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j)) def test_bytes(self): s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False) assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python")) def test_structure(self): pass def test_complex64(self): s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False) assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) def test_heap_pointer(self): pass def test_object_reference(self): pass def test_uint16(self): s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False) assert_identical(s.i16u, np.uint16(65511)) def test_uint32(self): s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False) assert_identical(s.i32u, np.uint32(4294967233)) def test_int64(self): s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False) assert_identical(s.i64s, np.int64(-9223372036854774567)) def test_uint64(self): s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False) assert_identical(s.i64u, np.uint64(18446744073709529285)) class TestCompressed(TestScalars): # Test that compressed .sav files can be read in def test_compressed(self): s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False) assert_identical(s.i8u, np.uint8(234)) assert_identical(s.f32, np.float32(-3.1234567e+37)) assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) class TestArrayDimensions: # Test that multi-dimensional arrays are read in with the correct dimensions def test_1d(self): s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False) assert_equal(s.array1d.shape, (123, )) def test_2d(self): s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False) assert_equal(s.array2d.shape, (22, 12)) def test_3d(self): s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False) assert_equal(s.array3d.shape, (11, 22, 12)) def test_4d(self): s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False) assert_equal(s.array4d.shape, (4, 5, 8, 7)) def test_5d(self): s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False) assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) def test_6d(self): s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False) assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4)) def test_7d(self): s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False) assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2)) def test_8d(self): s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False) assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4)) class TestStructures: def test_scalars(self): s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False) assert_identical(s.scalars.a, np.array(np.int16(1))) assert_identical(s.scalars.b, np.array(np.int32(2))) assert_identical(s.scalars.c, np.array(np.float32(3.))) assert_identical(s.scalars.d, np.array(np.float64(4.))) assert_identical(s.scalars.e, np.array([b"spam"], dtype=object)) assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j))) def test_scalars_replicated(self): s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False) assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5)) assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5)) assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5)) assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5)) assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object)) assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5)) def test_scalars_replicated_3d(self): s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False) assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object)) assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2)) def test_arrays(self): s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False) assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) def test_arrays_replicated(self): s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False) # Check column types assert_(s.arrays_rep.a.dtype.type is np.object_) assert_(s.arrays_rep.b.dtype.type is np.object_) assert_(s.arrays_rep.c.dtype.type is np.object_) assert_(s.arrays_rep.d.dtype.type is np.object_) # Check column shapes assert_equal(s.arrays_rep.a.shape, (5, )) assert_equal(s.arrays_rep.b.shape, (5, )) assert_equal(s.arrays_rep.c.shape, (5, )) assert_equal(s.arrays_rep.d.shape, (5, )) # Check values for i in range(5): assert_array_identical(s.arrays_rep.a[i], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays_rep.b[i], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays_rep.c[i], np.array([np.complex64(1+2j), np.complex64(7+8j)])) assert_array_identical(s.arrays_rep.d[i], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) def test_arrays_replicated_3d(self): s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False) # Check column types assert_(s.arrays_rep.a.dtype.type is np.object_) assert_(s.arrays_rep.b.dtype.type is np.object_) assert_(s.arrays_rep.c.dtype.type is np.object_) assert_(s.arrays_rep.d.dtype.type is np.object_) # Check column shapes assert_equal(s.arrays_rep.a.shape, (4, 3, 2)) assert_equal(s.arrays_rep.b.shape, (4, 3, 2)) assert_equal(s.arrays_rep.c.shape, (4, 3, 2)) assert_equal(s.arrays_rep.d.shape, (4, 3, 2)) # Check values for i in range(4): for j in range(3): for k in range(2): assert_array_identical(s.arrays_rep.a[i, j, k], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays_rep.b[i, j, k], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays_rep.c[i, j, k], np.array([np.complex64(1+2j), np.complex64(7+8j)])) assert_array_identical(s.arrays_rep.d[i, j, k], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) def test_inheritance(self): s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False) assert_identical(s.fc.x, np.array([0], dtype=np.int16)) assert_identical(s.fc.y, np.array([0], dtype=np.int16)) assert_identical(s.fc.r, np.array([0], dtype=np.int16)) assert_identical(s.fc.c, np.array([4], dtype=np.int16)) def test_arrays_corrupt_idl80(self): # test byte arrays with missing nbyte information from IDL 8.0 .sav file with suppress_warnings() as sup: sup.filter(UserWarning, "Not able to verify number of bytes from header") s = readsav(path.join(DATA_PATH,'struct_arrays_byte_idl80.sav'), verbose=False) assert_identical(s.y.x[0], np.array([55,66], dtype=np.uint8)) class TestPointers: # Check that pointers in .sav files produce references to the same object in Python def test_pointers(self): s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False) assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) assert_(s.c64_pointer1 is s.c64_pointer2) class TestPointerArray: # Test that pointers in arrays are correctly read in def test_1d(self): s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False) assert_equal(s.array1d.shape, (123, )) assert_(np.all(s.array1d == np.float32(4.))) assert_(np.all(vect_id(s.array1d) == id(s.array1d[0]))) def test_2d(self): s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False) assert_equal(s.array2d.shape, (22, 12)) assert_(np.all(s.array2d == np.float32(4.))) assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0]))) def test_3d(self): s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False) assert_equal(s.array3d.shape, (11, 22, 12)) assert_(np.all(s.array3d == np.float32(4.))) assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0]))) def test_4d(self): s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False) assert_equal(s.array4d.shape, (4, 5, 8, 7)) assert_(np.all(s.array4d == np.float32(4.))) assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0]))) def test_5d(self): s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False) assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) assert_(np.all(s.array5d == np.float32(4.))) assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0]))) def test_6d(self): s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False) assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4)) assert_(np.all(s.array6d == np.float32(4.))) assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0]))) def test_7d(self): s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False) assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2)) assert_(np.all(s.array7d == np.float32(4.))) assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0]))) def test_8d(self): s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False) assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4)) assert_(np.all(s.array8d == np.float32(4.))) assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0]))) class TestPointerStructures: # Test that structures are correctly read in def test_scalars(self): s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False) assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_)) assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_)) assert_(id(s.pointers.g[0]) == id(s.pointers.h[0])) def test_pointers_replicated(self): s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False) assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_)) assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_)) assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))) def test_pointers_replicated_3d(self): s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False) s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_) assert_identical(s.pointers_rep.g, s_expect) assert_identical(s.pointers_rep.h, s_expect) assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))) def test_arrays(self): s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False) assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_)) assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_)) assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0]))) assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0]))) assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0])) def test_arrays_replicated(self): s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False) # Check column types assert_(s.arrays_rep.g.dtype.type is np.object_) assert_(s.arrays_rep.h.dtype.type is np.object_) # Check column shapes assert_equal(s.arrays_rep.g.shape, (5, )) assert_equal(s.arrays_rep.h.shape, (5, )) # Check values for i in range(5): assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_)) assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_)) assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0]))) assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0]))) def test_arrays_replicated_3d(self): pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav') s = readsav(pth, verbose=False) # Check column types assert_(s.arrays_rep.g.dtype.type is np.object_) assert_(s.arrays_rep.h.dtype.type is np.object_) # Check column shapes assert_equal(s.arrays_rep.g.shape, (4, 3, 2)) assert_equal(s.arrays_rep.h.shape, (4, 3, 2)) # Check values for i in range(4): for j in range(3): for k in range(2): assert_array_identical(s.arrays_rep.g[i, j, k], np.repeat(np.float32(4.), 2).astype(np.object_)) assert_array_identical(s.arrays_rep.h[i, j, k], np.repeat(np.float32(4.), 3).astype(np.object_)) assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0]))) assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0]))) class TestTags: '''Test that sav files with description tag read at all''' def test_description(self): s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False) assert_identical(s.i8u, np.uint8(234)) def test_null_pointer(): # Regression test for null pointers. s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False) assert_identical(s.point, None) assert_identical(s.check, np.int16(5)) def test_invalid_pointer(): # Regression test for invalid pointers (gh-4613). # In some files in the wild, pointers can sometimes refer to a heap # variable that does not exist. In that case, we now gracefully fail for # that variable and replace the variable with None and emit a warning. # Since it's difficult to artificially produce such files, the file used # here has been edited to force the pointer reference to be invalid. with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False) assert_(len(w) == 1) assert_(str(w[0].message) == ("Variable referenced by pointer not found in " "heap: variable will be set to None")) assert_identical(s['a'], np.array([None, None]))
19,683
43.433409
105
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/io/tests/test_mmio.py
from __future__ import division, print_function, absolute_import from tempfile import mkdtemp, mktemp import os import shutil import numpy as np from numpy import array, transpose, pi from numpy.testing import (assert_equal, assert_array_equal, assert_array_almost_equal) import pytest from pytest import raises as assert_raises import scipy.sparse from scipy.io.mmio import mminfo, mmread, mmwrite parametrize_args = [('integer', 'int'), ('unsigned-integer', 'uint')] class TestMMIOArray(object): def setup_method(self): self.tmpdir = mkdtemp() self.fn = os.path.join(self.tmpdir, 'testfile.mtx') def teardown_method(self): shutil.rmtree(self.tmpdir) def check(self, a, info): mmwrite(self.fn, a) assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_array_almost_equal(a, b) def check_exact(self, a, info): mmwrite(self.fn, a) assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_equal(a, b) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_integer(self, typeval, dtype): self.check_exact(array([[1, 2], [3, 4]], dtype=dtype), (2, 2, 4, 'array', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_32bit_integer(self, typeval, dtype): a = array([[2**31-1, 2**31-2], [2**31-3, 2**31-4]], dtype=dtype) self.check_exact(a, (2, 2, 4, 'array', typeval, 'general')) def test_64bit_integer(self): a = array([[2**31, 2**32], [2**63-2, 2**63-1]], dtype=np.int64) if (np.intp(0).itemsize < 8): assert_raises(OverflowError, mmwrite, self.fn, a) else: self.check_exact(a, (2, 2, 4, 'array', 'integer', 'general')) def test_64bit_unsigned_integer(self): a = array([[2**31, 2**32], [2**64-2, 2**64-1]], dtype=np.uint64) self.check_exact(a, (2, 2, 4, 'array', 'unsigned-integer', 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_upper_triangle_integer(self, typeval, dtype): self.check_exact(array([[0, 1], [0, 0]], dtype=dtype), (2, 2, 4, 'array', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_lower_triangle_integer(self, typeval, dtype): self.check_exact(array([[0, 0], [1, 0]], dtype=dtype), (2, 2, 4, 'array', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_rectangular_integer(self, typeval, dtype): self.check_exact(array([[1, 2, 3], [4, 5, 6]], dtype=dtype), (2, 3, 6, 'array', typeval, 'general')) def test_simple_rectangular_float(self): self.check([[1, 2], [3.5, 4], [5, 6]], (3, 2, 6, 'array', 'real', 'general')) def test_simple_float(self): self.check([[1, 2], [3, 4.0]], (2, 2, 4, 'array', 'real', 'general')) def test_simple_complex(self): self.check([[1, 2], [3, 4j]], (2, 2, 4, 'array', 'complex', 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_symmetric_integer(self, typeval, dtype): self.check_exact(array([[1, 2], [2, 4]], dtype=dtype), (2, 2, 4, 'array', typeval, 'symmetric')) def test_simple_skew_symmetric_integer(self): self.check_exact([[0, 2], [-2, 0]], (2, 2, 4, 'array', 'integer', 'skew-symmetric')) def test_simple_skew_symmetric_float(self): self.check(array([[0, 2], [-2.0, 0.0]], 'f'), (2, 2, 4, 'array', 'real', 'skew-symmetric')) def test_simple_hermitian_complex(self): self.check([[1, 2+3j], [2-3j, 4]], (2, 2, 4, 'array', 'complex', 'hermitian')) def test_random_symmetric_float(self): sz = (20, 20) a = np.random.random(sz) a = a + transpose(a) self.check(a, (20, 20, 400, 'array', 'real', 'symmetric')) def test_random_rectangular_float(self): sz = (20, 15) a = np.random.random(sz) self.check(a, (20, 15, 300, 'array', 'real', 'general')) class TestMMIOSparseCSR(TestMMIOArray): def setup_method(self): self.tmpdir = mkdtemp() self.fn = os.path.join(self.tmpdir, 'testfile.mtx') def teardown_method(self): shutil.rmtree(self.tmpdir) def check(self, a, info): mmwrite(self.fn, a) assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_array_almost_equal(a.todense(), b.todense()) def check_exact(self, a, info): mmwrite(self.fn, a) assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_equal(a.todense(), b.todense()) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[1, 2], [3, 4]], dtype=dtype), (2, 2, 4, 'coordinate', typeval, 'general')) def test_32bit_integer(self): a = scipy.sparse.csr_matrix(array([[2**31-1, -2**31+2], [2**31-3, 2**31-4]], dtype=np.int32)) self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) def test_64bit_integer(self): a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1], [-2**63+2, 2**63-2]], dtype=np.int64)) if (np.intp(0).itemsize < 8): assert_raises(OverflowError, mmwrite, self.fn, a) else: self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) def test_32bit_unsigned_integer(self): a = scipy.sparse.csr_matrix(array([[2**31-1, 2**31-2], [2**31-3, 2**31-4]], dtype=np.uint32)) self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general')) def test_64bit_unsigned_integer(self): a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1], [2**64-2, 2**64-1]], dtype=np.uint64)) self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_upper_triangle_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[0, 1], [0, 0]], dtype=dtype), (2, 2, 1, 'coordinate', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_lower_triangle_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[0, 0], [1, 0]], dtype=dtype), (2, 2, 1, 'coordinate', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_rectangular_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[1, 2, 3], [4, 5, 6]], dtype=dtype), (2, 3, 6, 'coordinate', typeval, 'general')) def test_simple_rectangular_float(self): self.check(scipy.sparse.csr_matrix([[1, 2], [3.5, 4], [5, 6]]), (3, 2, 6, 'coordinate', 'real', 'general')) def test_simple_float(self): self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4.0]]), (2, 2, 4, 'coordinate', 'real', 'general')) def test_simple_complex(self): self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4j]]), (2, 2, 4, 'coordinate', 'complex', 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_symmetric_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[1, 2], [2, 4]], dtype=dtype), (2, 2, 3, 'coordinate', typeval, 'symmetric')) def test_simple_skew_symmetric_integer(self): self.check_exact(scipy.sparse.csr_matrix([[1, 2], [-2, 4]]), (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric')) def test_simple_skew_symmetric_float(self): self.check(scipy.sparse.csr_matrix(array([[1, 2], [-2.0, 4]], 'f')), (2, 2, 3, 'coordinate', 'real', 'skew-symmetric')) def test_simple_hermitian_complex(self): self.check(scipy.sparse.csr_matrix([[1, 2+3j], [2-3j, 4]]), (2, 2, 3, 'coordinate', 'complex', 'hermitian')) def test_random_symmetric_float(self): sz = (20, 20) a = np.random.random(sz) a = a + transpose(a) a = scipy.sparse.csr_matrix(a) self.check(a, (20, 20, 210, 'coordinate', 'real', 'symmetric')) def test_random_rectangular_float(self): sz = (20, 15) a = np.random.random(sz) a = scipy.sparse.csr_matrix(a) self.check(a, (20, 15, 300, 'coordinate', 'real', 'general')) def test_simple_pattern(self): a = scipy.sparse.csr_matrix([[0, 1.5], [3.0, 2.5]]) p = np.zeros_like(a.todense()) p[a.todense() > 0] = 1 info = (2, 2, 3, 'coordinate', 'pattern', 'general') mmwrite(self.fn, a, field='pattern') assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_array_almost_equal(p, b.todense()) _32bit_integer_dense_example = '''\ %%MatrixMarket matrix array integer general 2 2 2147483647 2147483646 2147483647 2147483646 ''' _32bit_integer_sparse_example = '''\ %%MatrixMarket matrix coordinate integer symmetric 2 2 2 1 1 2147483647 2 2 2147483646 ''' _64bit_integer_dense_example = '''\ %%MatrixMarket matrix array integer general 2 2 2147483648 -9223372036854775806 -2147483648 9223372036854775807 ''' _64bit_integer_sparse_general_example = '''\ %%MatrixMarket matrix coordinate integer general 2 2 3 1 1 2147483648 1 2 9223372036854775807 2 2 9223372036854775807 ''' _64bit_integer_sparse_symmetric_example = '''\ %%MatrixMarket matrix coordinate integer symmetric 2 2 3 1 1 2147483648 1 2 -9223372036854775807 2 2 9223372036854775807 ''' _64bit_integer_sparse_skew_example = '''\ %%MatrixMarket matrix coordinate integer skew-symmetric 2 2 3 1 1 2147483648 1 2 -9223372036854775807 2 2 9223372036854775807 ''' _over64bit_integer_dense_example = '''\ %%MatrixMarket matrix array integer general 2 2 2147483648 9223372036854775807 2147483648 9223372036854775808 ''' _over64bit_integer_sparse_example = '''\ %%MatrixMarket matrix coordinate integer symmetric 2 2 2 1 1 2147483648 2 2 19223372036854775808 ''' class TestMMIOReadLargeIntegers(object): def setup_method(self): self.tmpdir = mkdtemp() self.fn = os.path.join(self.tmpdir, 'testfile.mtx') def teardown_method(self): shutil.rmtree(self.tmpdir) def check_read(self, example, a, info, dense, over32, over64): with open(self.fn, 'w') as f: f.write(example) assert_equal(mminfo(self.fn), info) if (over32 and (np.intp(0).itemsize < 8)) or over64: assert_raises(OverflowError, mmread, self.fn) else: b = mmread(self.fn) if not dense: b = b.todense() assert_equal(a, b) def test_read_32bit_integer_dense(self): a = array([[2**31-1, 2**31-1], [2**31-2, 2**31-2]], dtype=np.int64) self.check_read(_32bit_integer_dense_example, a, (2, 2, 4, 'array', 'integer', 'general'), dense=True, over32=False, over64=False) def test_read_32bit_integer_sparse(self): a = array([[2**31-1, 0], [0, 2**31-2]], dtype=np.int64) self.check_read(_32bit_integer_sparse_example, a, (2, 2, 2, 'coordinate', 'integer', 'symmetric'), dense=False, over32=False, over64=False) def test_read_64bit_integer_dense(self): a = array([[2**31, -2**31], [-2**63+2, 2**63-1]], dtype=np.int64) self.check_read(_64bit_integer_dense_example, a, (2, 2, 4, 'array', 'integer', 'general'), dense=True, over32=True, over64=False) def test_read_64bit_integer_sparse_general(self): a = array([[2**31, 2**63-1], [0, 2**63-1]], dtype=np.int64) self.check_read(_64bit_integer_sparse_general_example, a, (2, 2, 3, 'coordinate', 'integer', 'general'), dense=False, over32=True, over64=False) def test_read_64bit_integer_sparse_symmetric(self): a = array([[2**31, -2**63+1], [-2**63+1, 2**63-1]], dtype=np.int64) self.check_read(_64bit_integer_sparse_symmetric_example, a, (2, 2, 3, 'coordinate', 'integer', 'symmetric'), dense=False, over32=True, over64=False) def test_read_64bit_integer_sparse_skew(self): a = array([[2**31, -2**63+1], [2**63-1, 2**63-1]], dtype=np.int64) self.check_read(_64bit_integer_sparse_skew_example, a, (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'), dense=False, over32=True, over64=False) def test_read_over64bit_integer_dense(self): self.check_read(_over64bit_integer_dense_example, None, (2, 2, 4, 'array', 'integer', 'general'), dense=True, over32=True, over64=True) def test_read_over64bit_integer_sparse(self): self.check_read(_over64bit_integer_sparse_example, None, (2, 2, 2, 'coordinate', 'integer', 'symmetric'), dense=False, over32=True, over64=True) _general_example = '''\ %%MatrixMarket matrix coordinate real general %================================================================================= % % This ASCII file represents a sparse MxN matrix with L % nonzeros in the following Matrix Market format: % % +----------------------------------------------+ % |%%MatrixMarket matrix coordinate real general | <--- header line % |% | <--+ % |% comments | |-- 0 or more comment lines % |% | <--+ % | M N L | <--- rows, columns, entries % | I1 J1 A(I1, J1) | <--+ % | I2 J2 A(I2, J2) | | % | I3 J3 A(I3, J3) | |-- L lines % | . . . | | % | IL JL A(IL, JL) | <--+ % +----------------------------------------------+ % % Indices are 1-based, i.e. A(1,1) is the first element. % %================================================================================= 5 5 8 1 1 1.000e+00 2 2 1.050e+01 3 3 1.500e-02 1 4 6.000e+00 4 2 2.505e+02 4 4 -2.800e+02 4 5 3.332e+01 5 5 1.200e+01 ''' _hermitian_example = '''\ %%MatrixMarket matrix coordinate complex hermitian 5 5 7 1 1 1.0 0 2 2 10.5 0 4 2 250.5 22.22 3 3 1.5e-2 0 4 4 -2.8e2 0 5 5 12. 0 5 4 0 33.32 ''' _skew_example = '''\ %%MatrixMarket matrix coordinate real skew-symmetric 5 5 7 1 1 1.0 2 2 10.5 4 2 250.5 3 3 1.5e-2 4 4 -2.8e2 5 5 12. 5 4 0 ''' _symmetric_example = '''\ %%MatrixMarket matrix coordinate real symmetric 5 5 7 1 1 1.0 2 2 10.5 4 2 250.5 3 3 1.5e-2 4 4 -2.8e2 5 5 12. 5 4 8 ''' _symmetric_pattern_example = '''\ %%MatrixMarket matrix coordinate pattern symmetric 5 5 7 1 1 2 2 4 2 3 3 4 4 5 5 5 4 ''' class TestMMIOCoordinate(object): def setup_method(self): self.tmpdir = mkdtemp() self.fn = os.path.join(self.tmpdir, 'testfile.mtx') def teardown_method(self): shutil.rmtree(self.tmpdir) def check_read(self, example, a, info): f = open(self.fn, 'w') f.write(example) f.close() assert_equal(mminfo(self.fn), info) b = mmread(self.fn).todense() assert_array_almost_equal(a, b) def test_read_general(self): a = [[1, 0, 0, 6, 0], [0, 10.5, 0, 0, 0], [0, 0, .015, 0, 0], [0, 250.5, 0, -280, 33.32], [0, 0, 0, 0, 12]] self.check_read(_general_example, a, (5, 5, 8, 'coordinate', 'real', 'general')) def test_read_hermitian(self): a = [[1, 0, 0, 0, 0], [0, 10.5, 0, 250.5 - 22.22j, 0], [0, 0, .015, 0, 0], [0, 250.5 + 22.22j, 0, -280, -33.32j], [0, 0, 0, 33.32j, 12]] self.check_read(_hermitian_example, a, (5, 5, 7, 'coordinate', 'complex', 'hermitian')) def test_read_skew(self): a = [[1, 0, 0, 0, 0], [0, 10.5, 0, -250.5, 0], [0, 0, .015, 0, 0], [0, 250.5, 0, -280, 0], [0, 0, 0, 0, 12]] self.check_read(_skew_example, a, (5, 5, 7, 'coordinate', 'real', 'skew-symmetric')) def test_read_symmetric(self): a = [[1, 0, 0, 0, 0], [0, 10.5, 0, 250.5, 0], [0, 0, .015, 0, 0], [0, 250.5, 0, -280, 8], [0, 0, 0, 8, 12]] self.check_read(_symmetric_example, a, (5, 5, 7, 'coordinate', 'real', 'symmetric')) def test_read_symmetric_pattern(self): a = [[1, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 1], [0, 0, 0, 1, 1]] self.check_read(_symmetric_pattern_example, a, (5, 5, 7, 'coordinate', 'pattern', 'symmetric')) def test_empty_write_read(self): # http://projects.scipy.org/scipy/ticket/883 b = scipy.sparse.coo_matrix((10, 10)) mmwrite(self.fn, b) assert_equal(mminfo(self.fn), (10, 10, 0, 'coordinate', 'real', 'symmetric')) a = b.todense() b = mmread(self.fn).todense() assert_array_almost_equal(a, b) def test_bzip2_py3(self): # test if fix for #2152 works try: # bz2 module isn't always built when building Python. import bz2 except: return I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) mmwrite(self.fn, b) fn_bzip2 = "%s.bz2" % self.fn with open(self.fn, 'rb') as f_in: f_out = bz2.BZ2File(fn_bzip2, 'wb') f_out.write(f_in.read()) f_out.close() a = mmread(fn_bzip2).todense() assert_array_almost_equal(a, b.todense()) def test_gzip_py3(self): # test if fix for #2152 works try: # gzip module can be missing from Python installation import gzip except: return I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) mmwrite(self.fn, b) fn_gzip = "%s.gz" % self.fn with open(self.fn, 'rb') as f_in: f_out = gzip.open(fn_gzip, 'wb') f_out.write(f_in.read()) f_out.close() a = mmread(fn_gzip).todense() assert_array_almost_equal(a, b.todense()) def test_real_write_read(self): I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) mmwrite(self.fn, b) assert_equal(mminfo(self.fn), (5, 5, 8, 'coordinate', 'real', 'general')) a = b.todense() b = mmread(self.fn).todense() assert_array_almost_equal(a, b) def test_complex_write_read(self): I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) mmwrite(self.fn, b) assert_equal(mminfo(self.fn), (5, 5, 8, 'coordinate', 'complex', 'general')) a = b.todense() b = mmread(self.fn).todense() assert_array_almost_equal(a, b) def test_sparse_formats(self): mats = [] I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))) V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))) for mat in mats: expected = mat.todense() for fmt in ['csr', 'csc', 'coo']: fn = mktemp(dir=self.tmpdir) # safe, we own tmpdir mmwrite(fn, mat.asformat(fmt)) result = mmread(fn).todense() assert_array_almost_equal(result, expected) def test_precision(self): test_values = [pi] + [10**(i) for i in range(0, -10, -1)] test_precisions = range(1, 10) for value in test_values: for precision in test_precisions: # construct sparse matrix with test value at last main diagonal n = 10**precision + 1 A = scipy.sparse.dok_matrix((n, n)) A[n-1, n-1] = value # write matrix with test precision and read again mmwrite(self.fn, A, precision=precision) A = scipy.io.mmread(self.fn) # check for right entries in matrix assert_array_equal(A.row, [n-1]) assert_array_equal(A.col, [n-1]) assert_array_almost_equal(A.data, [float('%%.%dg' % precision % value)])
23,724
34.200297
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_build_utils/_fortran.py
import re import sys import os import glob from distutils.dep_util import newer __all__ = ['needs_g77_abi_wrapper', 'split_fortran_files', 'get_g77_abi_wrappers', 'needs_sgemv_fix', 'get_sgemv_fix'] def uses_veclib(info): if sys.platform != "darwin": return False r_accelerate = re.compile("vecLib") extra_link_args = info.get('extra_link_args', '') for arg in extra_link_args: if r_accelerate.search(arg): return True return False def uses_accelerate(info): if sys.platform != "darwin": return False r_accelerate = re.compile("Accelerate") extra_link_args = info.get('extra_link_args', '') for arg in extra_link_args: if r_accelerate.search(arg): return True return False def uses_mkl(info): r_mkl = re.compile("mkl") libraries = info.get('libraries', '') for library in libraries: if r_mkl.search(library): return True return False def needs_g77_abi_wrapper(info): """Returns True if g77 ABI wrapper must be used.""" if uses_accelerate(info) or uses_veclib(info): return True elif uses_mkl(info): return True else: return False def get_g77_abi_wrappers(info): """ Returns file names of source files containing Fortran ABI wrapper routines. """ wrapper_sources = [] path = os.path.abspath(os.path.dirname(__file__)) if needs_g77_abi_wrapper(info): wrapper_sources += [ os.path.join(path, 'src', 'wrap_g77_abi_f.f'), os.path.join(path, 'src', 'wrap_g77_abi_c.c'), ] if uses_accelerate(info): wrapper_sources += [ os.path.join(path, 'src', 'wrap_accelerate_c.c'), os.path.join(path, 'src', 'wrap_accelerate_f.f'), ] elif uses_mkl(info): wrapper_sources += [ os.path.join(path, 'src', 'wrap_dummy_accelerate.f'), ] else: raise NotImplementedError("Do not know how to handle LAPACK %s on mac os x" % (info,)) else: wrapper_sources += [ os.path.join(path, 'src', 'wrap_dummy_g77_abi.f'), os.path.join(path, 'src', 'wrap_dummy_accelerate.f'), ] return wrapper_sources def needs_sgemv_fix(info): """Returns True if SGEMV must be fixed.""" if uses_accelerate(info): return True else: return False def get_sgemv_fix(info): """ Returns source file needed to correct SGEMV """ path = os.path.abspath(os.path.dirname(__file__)) if needs_sgemv_fix(info): return [os.path.join(path, 'src', 'apple_sgemv_fix.c')] else: return [] def split_fortran_files(source_dir, subroutines=None): """Split each file in `source_dir` into separate files per subroutine. Parameters ---------- source_dir : str Full path to directory in which sources to be split are located. subroutines : list of str, optional Subroutines to split. (Default: all) Returns ------- fnames : list of str List of file names (not including any path) that were created in `source_dir`. Notes ----- This function is useful for code that can't be compiled with g77 because of type casting errors which do work with gfortran. Created files are named: ``original_name + '_subr_i' + '.f'``, with ``i`` starting at zero and ending at ``num_subroutines_in_file - 1``. """ if subroutines is not None: subroutines = [x.lower() for x in subroutines] def split_file(fname): with open(fname, 'rb') as f: lines = f.readlines() subs = [] need_split_next = True # find lines with SUBROUTINE statements for ix, line in enumerate(lines): m = re.match(b'^\\s+subroutine\\s+([a-z0-9_]+)\\s*\\(', line, re.I) if m and line[0] not in b'Cc!*': if subroutines is not None: subr_name = m.group(1).decode('ascii').lower() subr_wanted = (subr_name in subroutines) else: subr_wanted = True if subr_wanted or need_split_next: need_split_next = subr_wanted subs.append(ix) # check if no split needed if len(subs) <= 1: return [fname] # write out one file per subroutine new_fnames = [] num_files = len(subs) for nfile in range(num_files): new_fname = fname[:-2] + '_subr_' + str(nfile) + '.f' new_fnames.append(new_fname) if not newer(fname, new_fname): continue with open(new_fname, 'wb') as fn: if nfile + 1 == num_files: fn.writelines(lines[subs[nfile]:]) else: fn.writelines(lines[subs[nfile]:subs[nfile+1]]) return new_fnames exclude_pattern = re.compile('_subr_[0-9]') source_fnames = [f for f in glob.glob(os.path.join(source_dir, '*.f')) if not exclude_pattern.search(os.path.basename(f))] fnames = [] for source_fname in source_fnames: created_files = split_file(source_fname) if created_files is not None: for cfile in created_files: fnames.append(os.path.basename(cfile)) return fnames
5,612
29.672131
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_build_utils/__init__.py
import numpy as np from ._fortran import * from scipy._lib._version import NumpyVersion # Don't use deprecated Numpy C API. Define this to a fixed version instead of # NPY_API_VERSION in order not to break compilation for released Scipy versions # when Numpy introduces a new deprecation. Use in setup.py:: # # config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api) # if NumpyVersion(np.__version__) >= '1.10.0.dev': numpy_nodepr_api = dict(define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_9_API_VERSION")]) else: numpy_nodepr_api = dict() from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
714
31.5
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_differentialevolution.py
""" differential_evolution: The differential evolution global optimization algorithm Added by Andrew Nelson 2014 """ from __future__ import division, print_function, absolute_import import numpy as np from scipy.optimize import OptimizeResult, minimize from scipy.optimize.optimize import _status_message from scipy._lib._util import check_random_state from scipy._lib.six import xrange, string_types import warnings __all__ = ['differential_evolution'] _MACHEPS = np.finfo(np.float64).eps def differential_evolution(func, bounds, args=(), strategy='best1bin', maxiter=1000, popsize=15, tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None, callback=None, disp=False, polish=True, init='latinhypercube', atol=0): """Finds the global minimum of a multivariate function. Differential Evolution is stochastic in nature (does not use gradient methods) to find the minimium, and can search large areas of candidate space, but often requires larger numbers of function evaluations than conventional gradient based techniques. The algorithm is due to Storn and Price [1]_. Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. bounds : sequence Bounds for variables. ``(min, max)`` pairs for each element in ``x``, defining the lower and upper bounds for the optimizing argument of `func`. It is required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used to determine the number of parameters in ``x``. args : tuple, optional Any additional fixed parameters needed to completely specify the objective function. strategy : str, optional The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' - 'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' - 'best2bin' - 'rand2bin' - 'rand1bin' The default is 'best1bin'. maxiter : int, optional The maximum number of generations over which the entire population is evolved. The maximum number of function evaluations (with no polishing) is: ``(maxiter + 1) * popsize * len(x)`` popsize : int, optional A multiplier for setting the total population size. The population has ``popsize * len(x)`` individuals (unless the initial population is supplied via the `init` keyword). tol : float, optional Relative tolerance for convergence, the solving stops when ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, where and `atol` and `tol` are the absolute and relative tolerance respectively. mutation : float or tuple(float, float), optional The mutation constant. In the literature this is also known as differential weight, being denoted by F. If specified as a float it should be in the range [0, 2]. If specified as a tuple ``(min, max)`` dithering is employed. Dithering randomly changes the mutation constant on a generation by generation basis. The mutation constant for that generation is taken from ``U[min, max)``. Dithering can help speed convergence significantly. Increasing the mutation constant increases the search radius, but will slow down convergence. recombination : float, optional The recombination constant, should be in the range [0, 1]. In the literature this is also known as the crossover probability, being denoted by CR. Increasing this value allows a larger number of mutants to progress into the next generation, but at the risk of population stability. seed : int or `np.random.RandomState`, optional If `seed` is not specified the `np.RandomState` singleton is used. If `seed` is an int, a new `np.random.RandomState` instance is used, seeded with seed. If `seed` is already a `np.random.RandomState instance`, then that `np.random.RandomState` instance is used. Specify `seed` for repeatable minimizations. disp : bool, optional Display status messages callback : callable, `callback(xk, convergence=val)`, optional A function to follow the progress of the minimization. ``xk`` is the current value of ``x0``. ``val`` represents the fractional value of the population convergence. When ``val`` is greater than one the function halts. If callback returns `True`, then the minimization is halted (any polishing is still carried out). polish : bool, optional If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` method is used to polish the best population member at the end, which can improve the minimization slightly. init : str or array-like, optional Specify which type of population initialization is performed. Should be one of: - 'latinhypercube' - 'random' - array specifying the initial population. The array should have shape ``(M, len(x))``, where len(x) is the number of parameters. `init` is clipped to `bounds` before use. The default is 'latinhypercube'. Latin Hypercube sampling tries to maximize coverage of the available parameter space. 'random' initializes the population randomly - this has the drawback that clustering can occur, preventing the whole of parameter space being covered. Use of an array to specify a population subset could be used, for example, to create a tight bunch of initial guesses in an location where the solution is known to exist, thereby reducing time for convergence. atol : float, optional Absolute tolerance for convergence, the solving stops when ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, where and `atol` and `tol` are the absolute and relative tolerance respectively. Returns ------- res : OptimizeResult The optimization result represented as a `OptimizeResult` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. If `polish` was employed, and a lower minimum was obtained by the polishing, then OptimizeResult also contains the ``jac`` attribute. Notes ----- Differential evolution is a stochastic population based method that is useful for global optimization problems. At each pass through the population the algorithm mutates each candidate solution by mixing with other candidate solutions to create a trial candidate. There are several strategies [2]_ for creating trial candidates, which suit some problems more than others. The 'best1bin' strategy is a good starting point for many systems. In this strategy two members of the population are randomly chosen. Their difference is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`, so far: .. math:: b' = b_0 + mutation * (population[rand0] - population[rand1]) A trial vector is then constructed. Starting with a randomly chosen 'i'th parameter the trial is sequentially filled (in modulo) with parameters from `b'` or the original candidate. The choice of whether to use `b'` or the original candidate is made with a binomial distribution (the 'bin' in 'best1bin') - a random number in [0, 1) is generated. If this number is less than the `recombination` constant then the parameter is loaded from `b'`, otherwise it is loaded from the original candidate. The final parameter is always loaded from `b'`. Once the trial candidate is built its fitness is assessed. If the trial is better than the original candidate then it takes its place. If it is also better than the best overall candidate it also replaces that. To improve your chances of finding a global minimum use higher `popsize` values, with higher `mutation` and (dithering), but lower `recombination` values. This has the effect of widening the search radius, but slowing convergence. .. versionadded:: 0.15.0 Examples -------- Let us consider the problem of minimizing the Rosenbrock function. This function is implemented in `rosen` in `scipy.optimize`. >>> from scipy.optimize import rosen, differential_evolution >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] >>> result = differential_evolution(rosen, bounds) >>> result.x, result.fun (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) Next find the minimum of the Ackley function (http://en.wikipedia.org/wiki/Test_functions_for_optimization). >>> from scipy.optimize import differential_evolution >>> import numpy as np >>> def ackley(x): ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2)) ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1])) ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e >>> bounds = [(-5, 5), (-5, 5)] >>> result = differential_evolution(ackley, bounds) >>> result.x, result.fun (array([ 0., 0.]), 4.4408920985006262e-16) References ---------- .. [1] Storn, R and Price, K, Differential Evolution - a Simple and Efficient Heuristic for Global Optimization over Continuous Spaces, Journal of Global Optimization, 1997, 11, 341 - 359. .. [2] http://www1.icsi.berkeley.edu/~storn/code.html .. [3] http://en.wikipedia.org/wiki/Differential_evolution """ solver = DifferentialEvolutionSolver(func, bounds, args=args, strategy=strategy, maxiter=maxiter, popsize=popsize, tol=tol, mutation=mutation, recombination=recombination, seed=seed, polish=polish, callback=callback, disp=disp, init=init, atol=atol) return solver.solve() class DifferentialEvolutionSolver(object): """This class implements the differential evolution solver Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. bounds : sequence Bounds for variables. ``(min, max)`` pairs for each element in ``x``, defining the lower and upper bounds for the optimizing argument of `func`. It is required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used to determine the number of parameters in ``x``. args : tuple, optional Any additional fixed parameters needed to completely specify the objective function. strategy : str, optional The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' - 'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' - 'best2bin' - 'rand2bin' - 'rand1bin' The default is 'best1bin' maxiter : int, optional The maximum number of generations over which the entire population is evolved. The maximum number of function evaluations (with no polishing) is: ``(maxiter + 1) * popsize * len(x)`` popsize : int, optional A multiplier for setting the total population size. The population has ``popsize * len(x)`` individuals (unless the initial population is supplied via the `init` keyword). tol : float, optional Relative tolerance for convergence, the solving stops when ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, where and `atol` and `tol` are the absolute and relative tolerance respectively. mutation : float or tuple(float, float), optional The mutation constant. In the literature this is also known as differential weight, being denoted by F. If specified as a float it should be in the range [0, 2]. If specified as a tuple ``(min, max)`` dithering is employed. Dithering randomly changes the mutation constant on a generation by generation basis. The mutation constant for that generation is taken from U[min, max). Dithering can help speed convergence significantly. Increasing the mutation constant increases the search radius, but will slow down convergence. recombination : float, optional The recombination constant, should be in the range [0, 1]. In the literature this is also known as the crossover probability, being denoted by CR. Increasing this value allows a larger number of mutants to progress into the next generation, but at the risk of population stability. seed : int or `np.random.RandomState`, optional If `seed` is not specified the `np.random.RandomState` singleton is used. If `seed` is an int, a new `np.random.RandomState` instance is used, seeded with `seed`. If `seed` is already a `np.random.RandomState` instance, then that `np.random.RandomState` instance is used. Specify `seed` for repeatable minimizations. disp : bool, optional Display status messages callback : callable, `callback(xk, convergence=val)`, optional A function to follow the progress of the minimization. ``xk`` is the current value of ``x0``. ``val`` represents the fractional value of the population convergence. When ``val`` is greater than one the function halts. If callback returns `True`, then the minimization is halted (any polishing is still carried out). polish : bool, optional If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method is used to polish the best population member at the end. This requires a few more function evaluations. maxfun : int, optional Set the maximum number of function evaluations. However, it probably makes more sense to set `maxiter` instead. init : str or array-like, optional Specify which type of population initialization is performed. Should be one of: - 'latinhypercube' - 'random' - array specifying the initial population. The array should have shape ``(M, len(x))``, where len(x) is the number of parameters. `init` is clipped to `bounds` before use. The default is 'latinhypercube'. Latin Hypercube sampling tries to maximize coverage of the available parameter space. 'random' initializes the population randomly - this has the drawback that clustering can occur, preventing the whole of parameter space being covered. Use of an array to specify a population could be used, for example, to create a tight bunch of initial guesses in an location where the solution is known to exist, thereby reducing time for convergence. atol : float, optional Absolute tolerance for convergence, the solving stops when ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, where and `atol` and `tol` are the absolute and relative tolerance respectively. """ # Dispatch of mutation strategy method (binomial or exponential). _binomial = {'best1bin': '_best1', 'randtobest1bin': '_randtobest1', 'currenttobest1bin': '_currenttobest1', 'best2bin': '_best2', 'rand2bin': '_rand2', 'rand1bin': '_rand1'} _exponential = {'best1exp': '_best1', 'rand1exp': '_rand1', 'randtobest1exp': '_randtobest1', 'currenttobest1exp': '_currenttobest1', 'best2exp': '_best2', 'rand2exp': '_rand2'} __init_error_msg = ("The population initialization method must be one of " "'latinhypercube' or 'random', or an array of shape " "(M, N) where N is the number of parameters and M>5") def __init__(self, func, bounds, args=(), strategy='best1bin', maxiter=1000, popsize=15, tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None, maxfun=np.inf, callback=None, disp=False, polish=True, init='latinhypercube', atol=0): if strategy in self._binomial: self.mutation_func = getattr(self, self._binomial[strategy]) elif strategy in self._exponential: self.mutation_func = getattr(self, self._exponential[strategy]) else: raise ValueError("Please select a valid mutation strategy") self.strategy = strategy self.callback = callback self.polish = polish # relative and absolute tolerances for convergence self.tol, self.atol = tol, atol # Mutation constant should be in [0, 2). If specified as a sequence # then dithering is performed. self.scale = mutation if (not np.all(np.isfinite(mutation)) or np.any(np.array(mutation) >= 2) or np.any(np.array(mutation) < 0)): raise ValueError('The mutation constant must be a float in ' 'U[0, 2), or specified as a tuple(min, max)' ' where min < max and min, max are in U[0, 2).') self.dither = None if hasattr(mutation, '__iter__') and len(mutation) > 1: self.dither = [mutation[0], mutation[1]] self.dither.sort() self.cross_over_probability = recombination self.func = func self.args = args # convert tuple of lower and upper bounds to limits # [(low_0, high_0), ..., (low_n, high_n] # -> [[low_0, ..., low_n], [high_0, ..., high_n]] self.limits = np.array(bounds, dtype='float').T if (np.size(self.limits, 0) != 2 or not np.all(np.isfinite(self.limits))): raise ValueError('bounds should be a sequence containing ' 'real valued (min, max) pairs for each value' ' in x') if maxiter is None: # the default used to be None maxiter = 1000 self.maxiter = maxiter if maxfun is None: # the default used to be None maxfun = np.inf self.maxfun = maxfun # population is scaled to between [0, 1]. # We have to scale between parameter <-> population # save these arguments for _scale_parameter and # _unscale_parameter. This is an optimization self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1]) self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1]) self.parameter_count = np.size(self.limits, 1) self.random_number_generator = check_random_state(seed) # default population initialization is a latin hypercube design, but # there are other population initializations possible. # the minimum is 5 because 'best2bin' requires a population that's at # least 5 long self.num_population_members = max(5, popsize * self.parameter_count) self.population_shape = (self.num_population_members, self.parameter_count) self._nfev = 0 if isinstance(init, string_types): if init == 'latinhypercube': self.init_population_lhs() elif init == 'random': self.init_population_random() else: raise ValueError(self.__init_error_msg) else: self.init_population_array(init) self.disp = disp def init_population_lhs(self): """ Initializes the population with Latin Hypercube Sampling. Latin Hypercube Sampling ensures that each parameter is uniformly sampled over its range. """ rng = self.random_number_generator # Each parameter range needs to be sampled uniformly. The scaled # parameter range ([0, 1)) needs to be split into # `self.num_population_members` segments, each of which has the following # size: segsize = 1.0 / self.num_population_members # Within each segment we sample from a uniform random distribution. # We need to do this sampling for each parameter. samples = (segsize * rng.random_sample(self.population_shape) # Offset each segment to cover the entire parameter range [0, 1) + np.linspace(0., 1., self.num_population_members, endpoint=False)[:, np.newaxis]) # Create an array for population of candidate solutions. self.population = np.zeros_like(samples) # Initialize population of candidate solutions by permutation of the # random samples. for j in range(self.parameter_count): order = rng.permutation(range(self.num_population_members)) self.population[:, j] = samples[order, j] # reset population energies self.population_energies = (np.ones(self.num_population_members) * np.inf) # reset number of function evaluations counter self._nfev = 0 def init_population_random(self): """ Initialises the population at random. This type of initialization can possess clustering, Latin Hypercube sampling is generally better. """ rng = self.random_number_generator self.population = rng.random_sample(self.population_shape) # reset population energies self.population_energies = (np.ones(self.num_population_members) * np.inf) # reset number of function evaluations counter self._nfev = 0 def init_population_array(self, init): """ Initialises the population with a user specified population. Parameters ---------- init : np.ndarray Array specifying subset of the initial population. The array should have shape (M, len(x)), where len(x) is the number of parameters. The population is clipped to the lower and upper `bounds`. """ # make sure you're using a float array popn = np.asfarray(init) if (np.size(popn, 0) < 5 or popn.shape[1] != self.parameter_count or len(popn.shape) != 2): raise ValueError("The population supplied needs to have shape" " (M, len(x)), where M > 4.") # scale values and clip to bounds, assigning to population self.population = np.clip(self._unscale_parameters(popn), 0, 1) self.num_population_members = np.size(self.population, 0) self.population_shape = (self.num_population_members, self.parameter_count) # reset population energies self.population_energies = (np.ones(self.num_population_members) * np.inf) # reset number of function evaluations counter self._nfev = 0 @property def x(self): """ The best solution from the solver Returns ------- x : ndarray The best solution from the solver. """ return self._scale_parameters(self.population[0]) @property def convergence(self): """ The standard deviation of the population energies divided by their mean. """ return (np.std(self.population_energies) / np.abs(np.mean(self.population_energies) + _MACHEPS)) def solve(self): """ Runs the DifferentialEvolutionSolver. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. If `polish` was employed, and a lower minimum was obtained by the polishing, then OptimizeResult also contains the ``jac`` attribute. """ nit, warning_flag = 0, False status_message = _status_message['success'] # The population may have just been initialized (all entries are # np.inf). If it has you have to calculate the initial energies. # Although this is also done in the evolve generator it's possible # that someone can set maxiter=0, at which point we still want the # initial energies to be calculated (the following loop isn't run). if np.all(np.isinf(self.population_energies)): self._calculate_population_energies() # do the optimisation. for nit in xrange(1, self.maxiter + 1): # evolve the population by a generation try: next(self) except StopIteration: warning_flag = True status_message = _status_message['maxfev'] break if self.disp: print("differential_evolution step %d: f(x)= %g" % (nit, self.population_energies[0])) # should the solver terminate? convergence = self.convergence if (self.callback and self.callback(self._scale_parameters(self.population[0]), convergence=self.tol / convergence) is True): warning_flag = True status_message = ('callback function requested stop early ' 'by returning True') break intol = (np.std(self.population_energies) <= self.atol + self.tol * np.abs(np.mean(self.population_energies))) if warning_flag or intol: break else: status_message = _status_message['maxiter'] warning_flag = True DE_result = OptimizeResult( x=self.x, fun=self.population_energies[0], nfev=self._nfev, nit=nit, message=status_message, success=(warning_flag is not True)) if self.polish: result = minimize(self.func, np.copy(DE_result.x), method='L-BFGS-B', bounds=self.limits.T, args=self.args) self._nfev += result.nfev DE_result.nfev = self._nfev if result.fun < DE_result.fun: DE_result.fun = result.fun DE_result.x = result.x DE_result.jac = result.jac # to keep internal state consistent self.population_energies[0] = result.fun self.population[0] = self._unscale_parameters(result.x) return DE_result def _calculate_population_energies(self): """ Calculate the energies of all the population members at the same time. Puts the best member in first place. Useful if the population has just been initialised. """ for index, candidate in enumerate(self.population): if self._nfev > self.maxfun: break parameters = self._scale_parameters(candidate) self.population_energies[index] = self.func(parameters, *self.args) self._nfev += 1 minval = np.argmin(self.population_energies) # put the lowest energy into the best solution position. lowest_energy = self.population_energies[minval] self.population_energies[minval] = self.population_energies[0] self.population_energies[0] = lowest_energy self.population[[0, minval], :] = self.population[[minval, 0], :] def __iter__(self): return self def __next__(self): """ Evolve the population by a single generation Returns ------- x : ndarray The best solution from the solver. fun : float Value of objective function obtained from the best solution. """ # the population may have just been initialized (all entries are # np.inf). If it has you have to calculate the initial energies if np.all(np.isinf(self.population_energies)): self._calculate_population_energies() if self.dither is not None: self.scale = (self.random_number_generator.rand() * (self.dither[1] - self.dither[0]) + self.dither[0]) for candidate in range(self.num_population_members): if self._nfev > self.maxfun: raise StopIteration # create a trial solution trial = self._mutate(candidate) # ensuring that it's in the range [0, 1) self._ensure_constraint(trial) # scale from [0, 1) to the actual parameter value parameters = self._scale_parameters(trial) # determine the energy of the objective function energy = self.func(parameters, *self.args) self._nfev += 1 # if the energy of the trial candidate is lower than the # original population member then replace it if energy < self.population_energies[candidate]: self.population[candidate] = trial self.population_energies[candidate] = energy # if the trial candidate also has a lower energy than the # best solution then replace that as well if energy < self.population_energies[0]: self.population_energies[0] = energy self.population[0] = trial return self.x, self.population_energies[0] def next(self): """ Evolve the population by a single generation Returns ------- x : ndarray The best solution from the solver. fun : float Value of objective function obtained from the best solution. """ # next() is required for compatibility with Python2.7. return self.__next__() def _scale_parameters(self, trial): """ scale from a number between 0 and 1 to parameters. """ return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2 def _unscale_parameters(self, parameters): """ scale from parameters to a number between 0 and 1. """ return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5 def _ensure_constraint(self, trial): """ make sure the parameters lie between the limits """ for index in np.where((trial < 0) | (trial > 1))[0]: trial[index] = self.random_number_generator.rand() def _mutate(self, candidate): """ create a trial vector based on a mutation strategy """ trial = np.copy(self.population[candidate]) rng = self.random_number_generator fill_point = rng.randint(0, self.parameter_count) if self.strategy in ['currenttobest1exp', 'currenttobest1bin']: bprime = self.mutation_func(candidate, self._select_samples(candidate, 5)) else: bprime = self.mutation_func(self._select_samples(candidate, 5)) if self.strategy in self._binomial: crossovers = rng.rand(self.parameter_count) crossovers = crossovers < self.cross_over_probability # the last one is always from the bprime vector for binomial # If you fill in modulo with a loop you have to set the last one to # true. If you don't use a loop then you can have any random entry # be True. crossovers[fill_point] = True trial = np.where(crossovers, bprime, trial) return trial elif self.strategy in self._exponential: i = 0 while (i < self.parameter_count and rng.rand() < self.cross_over_probability): trial[fill_point] = bprime[fill_point] fill_point = (fill_point + 1) % self.parameter_count i += 1 return trial def _best1(self, samples): """ best1bin, best1exp """ r0, r1 = samples[:2] return (self.population[0] + self.scale * (self.population[r0] - self.population[r1])) def _rand1(self, samples): """ rand1bin, rand1exp """ r0, r1, r2 = samples[:3] return (self.population[r0] + self.scale * (self.population[r1] - self.population[r2])) def _randtobest1(self, samples): """ randtobest1bin, randtobest1exp """ r0, r1, r2 = samples[:3] bprime = np.copy(self.population[r0]) bprime += self.scale * (self.population[0] - bprime) bprime += self.scale * (self.population[r1] - self.population[r2]) return bprime def _currenttobest1(self, candidate, samples): """ currenttobest1bin, currenttobest1exp """ r0, r1 = samples[:2] bprime = (self.population[candidate] + self.scale * (self.population[0] - self.population[candidate] + self.population[r0] - self.population[r1])) return bprime def _best2(self, samples): """ best2bin, best2exp """ r0, r1, r2, r3 = samples[:4] bprime = (self.population[0] + self.scale * (self.population[r0] + self.population[r1] - self.population[r2] - self.population[r3])) return bprime def _rand2(self, samples): """ rand2bin, rand2exp """ r0, r1, r2, r3, r4 = samples bprime = (self.population[r0] + self.scale * (self.population[r1] + self.population[r2] - self.population[r3] - self.population[r4])) return bprime def _select_samples(self, candidate, number_samples): """ obtain random integers from range(self.num_population_members), without replacement. You can't have the original candidate either. """ idxs = list(range(self.num_population_members)) idxs.remove(candidate) self.random_number_generator.shuffle(idxs) idxs = idxs[:number_samples] return idxs
36,263
40.82699
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py
""" Functions --------- .. autosummary:: :toctree: generated/ fmin_l_bfgs_b """ ## License for the Python wrapper ## ============================== ## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca> ## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), ## to deal in the Software without restriction, including without limitation ## the rights to use, copy, modify, merge, publish, distribute, sublicense, ## and/or sell copies of the Software, and to permit persons to whom the ## Software is furnished to do so, subject to the following conditions: ## The above copyright notice and this permission notice shall be included in ## all copies or substantial portions of the Software. ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ## DEALINGS IN THE SOFTWARE. ## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy from __future__ import division, print_function, absolute_import import numpy as np from numpy import array, asarray, float64, int32, zeros from . import _lbfgsb from .optimize import (approx_fprime, MemoizeJac, OptimizeResult, _check_unknown_options, wrap_function, _approx_fprime_helper) from scipy.sparse.linalg import LinearOperator __all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] def fmin_l_bfgs_b(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=1e7, pgtol=1e-5, epsilon=1e-8, iprint=-1, maxfun=15000, maxiter=15000, disp=None, callback=None, maxls=20): """ Minimize a function func using the L-BFGS-B algorithm. Parameters ---------- func : callable f(x,*args) Function to minimise. x0 : ndarray Initial guess. fprime : callable fprime(x,*args), optional The gradient of `func`. If None, then `func` returns the function value and the gradient (``f, g = func(x, *args)``), unless `approx_grad` is True in which case `func` returns only ``f``. args : sequence, optional Arguments to pass to `func` and `fprime`. approx_grad : bool, optional Whether to approximate the gradient numerically (in which case `func` returns only the function value). bounds : list, optional ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None or +-inf for one of ``min`` or ``max`` when there is no bound in that direction. m : int, optional The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) factr : float, optional The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is the machine precision, which is automatically generated by the code. Typical values for `factr` are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. See Notes for relationship to `ftol`, which is exposed (instead of `factr`) by the `scipy.optimize.minimize` interface to L-BFGS-B. pgtol : float, optional The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol`` where ``pg_i`` is the i-th component of the projected gradient. epsilon : float, optional Step size used when `approx_grad` is True, for numerically calculating the gradient iprint : int, optional Controls the frequency of output. ``iprint < 0`` means no output; ``iprint = 0`` print only one line at the last iteration; ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; ``iprint = 99`` print details of every iteration except n-vectors; ``iprint = 100`` print also the changes of active set and final x; ``iprint > 100`` print details of every iteration including x and g. disp : int, optional If zero, then no output. If a positive number, then this over-rides `iprint` (i.e., `iprint` gets the value of `disp`). maxfun : int, optional Maximum number of function evaluations. maxiter : int, optional Maximum number of iterations. callback : callable, optional Called after each iteration, as ``callback(xk)``, where ``xk`` is the current parameter vector. maxls : int, optional Maximum number of line search steps (per iteration). Default is 20. Returns ------- x : array_like Estimated position of the minimum. f : float Value of `func` at the minimum. d : dict Information dictionary. * d['warnflag'] is - 0 if converged, - 1 if too many function evaluations or too many iterations, - 2 if stopped for another reason, given in d['task'] * d['grad'] is the gradient at the minimum (should be 0 ish) * d['funcalls'] is the number of function calls made. * d['nit'] is the number of iterations. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'L-BFGS-B' `method` in particular. Note that the `ftol` option is made available via that interface, while `factr` is provided via this interface, where `factr` is the factor multiplying the default machine floating-point precision to arrive at `ftol`: ``ftol = factr * numpy.finfo(float).eps``. Notes ----- License of L-BFGS-B (FORTRAN code): The version included here (in fortran code) is 3.0 (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following condition for use: This software is freely available, but we expect that all publications describing work using this software, or all commercial products using it, quote at least one of the references given below. This software is released under the BSD License. References ---------- * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing, 16, 5, pp. 1190-1208. * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (2011), ACM Transactions on Mathematical Software, 38, 1. """ # handle fprime/approx_grad if approx_grad: fun = func jac = None elif fprime is None: fun = MemoizeJac(func) jac = fun.derivative else: fun = func jac = fprime # build options if disp is None: disp = iprint opts = {'disp': disp, 'iprint': iprint, 'maxcor': m, 'ftol': factr * np.finfo(float).eps, 'gtol': pgtol, 'eps': epsilon, 'maxfun': maxfun, 'maxiter': maxiter, 'callback': callback, 'maxls': maxls} res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, **opts) d = {'grad': res['jac'], 'task': res['message'], 'funcalls': res['nfev'], 'nit': res['nit'], 'warnflag': res['status']} f = res['fun'] x = res['x'] return x, f, d def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, disp=None, maxcor=10, ftol=2.2204460492503131e-09, gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, iprint=-1, callback=None, maxls=20, **unknown_options): """ Minimize a scalar function of one or more variables using the L-BFGS-B algorithm. Options ------- disp : bool Set to True to print convergence messages. maxcor : int The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) ftol : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. gtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= gtol`` where ``pg_i`` is the i-th component of the projected gradient. eps : float Step size used for numerical approximation of the jacobian. disp : int Set to True to print convergence messages. maxfun : int Maximum number of function evaluations. maxiter : int Maximum number of iterations. maxls : int, optional Maximum number of line search steps (per iteration). Default is 20. Notes ----- The option `ftol` is exposed via the `scipy.optimize.minimize` interface, but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. I.e., `factr` multiplies the default machine floating-point precision to arrive at `ftol`. """ _check_unknown_options(unknown_options) m = maxcor epsilon = eps pgtol = gtol factr = ftol / np.finfo(float).eps x0 = asarray(x0).ravel() n, = x0.shape if bounds is None: bounds = [(None, None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') # unbounded variables must use None, not +-inf, for optimizer to work properly bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds] if disp is not None: if disp == 0: iprint = -1 else: iprint = disp n_function_evals, fun = wrap_function(fun, ()) if jac is None: def func_and_grad(x): f = fun(x, *args) g = _approx_fprime_helper(x, fun, epsilon, args=args, f0=f) return f, g else: def func_and_grad(x): f = fun(x, *args) g = jac(x, *args) return f, g nbd = zeros(n, int32) low_bnd = zeros(n, float64) upper_bnd = zeros(n, float64) bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3} for i in range(0, n): l, u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] if not maxls > 0: raise ValueError('maxls must be positive.') x = array(x0, float64) f = array(0.0, float64) g = zeros((n,), float64) wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) iwa = zeros(3*n, int32) task = zeros(1, 'S60') csave = zeros(1, 'S60') lsave = zeros(4, int32) isave = zeros(44, int32) dsave = zeros(29, float64) task[:] = 'START' n_iterations = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave, maxls) task_str = task.tostring() if task_str.startswith(b'FG'): # The minimization routine wants f and g at the current x. # Note that interruptions due to maxfun are postponed # until the completion of the current minimization iteration. # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith(b'NEW_X'): # new iteration n_iterations += 1 if callback is not None: callback(np.copy(x)) if n_iterations >= maxiter: task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' elif n_function_evals[0] > maxfun: task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' 'EXCEEDS LIMIT') else: break task_str = task.tostring().strip(b'\x00').strip() if task_str.startswith(b'CONV'): warnflag = 0 elif n_function_evals[0] > maxfun or n_iterations >= maxiter: warnflag = 1 else: warnflag = 2 # These two portions of the workspace are described in the mainlb # subroutine in lbfgsb.f. See line 363. s = wa[0: m*n].reshape(m, n) y = wa[m*n: 2*m*n].reshape(m, n) # See lbfgsb.f line 160 for this portion of the workspace. # isave(31) = the total number of BFGS updates prior the current iteration; n_bfgs_updates = isave[30] n_corrs = min(n_bfgs_updates, maxcor) hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) return OptimizeResult(fun=f, jac=g, nfev=n_function_evals[0], nit=n_iterations, status=warnflag, message=task_str, x=x, success=(warnflag == 0), hess_inv=hess_inv) class LbfgsInvHessProduct(LinearOperator): """Linear operator for the L-BFGS approximate inverse Hessian. This operator computes the product of a vector with the approximate inverse of the Hessian of the objective function, using the L-BFGS limited memory approximation to the inverse Hessian, accumulated during the optimization. Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` interface. Parameters ---------- sk : array_like, shape=(n_corr, n) Array of `n_corr` most recent updates to the solution vector. (See [1]). yk : array_like, shape=(n_corr, n) Array of `n_corr` most recent updates to the gradient. (See [1]). References ---------- .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited storage." Mathematics of computation 35.151 (1980): 773-782. """ def __init__(self, sk, yk): """Construct the operator.""" if sk.shape != yk.shape or sk.ndim != 2: raise ValueError('sk and yk must have matching shape, (n_corrs, n)') n_corrs, n = sk.shape super(LbfgsInvHessProduct, self).__init__( dtype=np.float64, shape=(n, n)) self.sk = sk self.yk = yk self.n_corrs = n_corrs self.rho = 1 / np.einsum('ij,ij->i', sk, yk) def _matvec(self, x): """Efficient matrix-vector multiply with the BFGS matrices. This calculation is described in Section (4) of [1]. Parameters ---------- x : ndarray An array with shape (n,) or (n,1). Returns ------- y : ndarray The matrix-vector product """ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho q = np.array(x, dtype=self.dtype, copy=True) if q.ndim == 2 and q.shape[1] == 1: q = q.reshape(-1) alpha = np.zeros(n_corrs) for i in range(n_corrs-1, -1, -1): alpha[i] = rho[i] * np.dot(s[i], q) q = q - alpha[i]*y[i] r = q for i in range(n_corrs): beta = rho[i] * np.dot(y[i], r) r = r + s[i] * (alpha[i] - beta) return r def todense(self): """Return a dense array representation of this operator. Returns ------- arr : ndarray, shape=(n, n) An array with the same shape and containing the same data represented by this `LinearOperator`. """ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho I = np.eye(*self.shape, dtype=self.dtype) Hk = I for i in range(n_corrs): A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * s[i][np.newaxis, :]) return Hk
16,922
35.083156
91
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_dogleg.py
"""Dog-leg trust-region optimization.""" from __future__ import division, print_function, absolute_import import numpy as np import scipy.linalg from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) __all__ = [] def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None, **trust_region_options): """ Minimization of scalar function of one or more variables using the dog-leg trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `gtol` before successful termination. """ if jac is None: raise ValueError('Jacobian is required for dogleg minimization') if hess is None: raise ValueError('Hessian is required for dogleg minimization') return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, subproblem=DoglegSubproblem, **trust_region_options) class DoglegSubproblem(BaseQuadraticSubproblem): """Quadratic subproblem solved by the dogleg method""" def cauchy_point(self): """ The Cauchy point is minimal along the direction of steepest descent. """ if self._cauchy_point is None: g = self.jac Bg = self.hessp(g) self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g return self._cauchy_point def newton_point(self): """ The Newton point is a global minimum of the approximate function. """ if self._newton_point is None: g = self.jac B = self.hess cho_info = scipy.linalg.cho_factor(B) self._newton_point = -scipy.linalg.cho_solve(cho_info, g) return self._newton_point def solve(self, trust_radius): """ Minimize a function using the dog-leg trust-region algorithm. This algorithm requires function values and first and second derivatives. It also performs a costly Hessian decomposition for most iterations, and the Hessian is required to be positive definite. Parameters ---------- trust_radius : float We are allowed to wander only this far away from the origin. Returns ------- p : ndarray The proposed step. hits_boundary : bool True if the proposed step is on the boundary of the trust region. Notes ----- The Hessian is required to be positive definite. References ---------- .. [1] Jorge Nocedal and Stephen Wright, Numerical Optimization, second edition, Springer-Verlag, 2006, page 73. """ # Compute the Newton point. # This is the optimum for the quadratic model function. # If it is inside the trust radius then return this point. p_best = self.newton_point() if scipy.linalg.norm(p_best) < trust_radius: hits_boundary = False return p_best, hits_boundary # Compute the Cauchy point. # This is the predicted optimum along the direction of steepest descent. p_u = self.cauchy_point() # If the Cauchy point is outside the trust region, # then return the point where the path intersects the boundary. p_u_norm = scipy.linalg.norm(p_u) if p_u_norm >= trust_radius: p_boundary = p_u * (trust_radius / p_u_norm) hits_boundary = True return p_boundary, hits_boundary # Compute the intersection of the trust region boundary # and the line segment connecting the Cauchy and Newton points. # This requires solving a quadratic equation. # ||p_u + t*(p_best - p_u)||**2 == trust_radius**2 # Solve this for positive time t using the quadratic formula. _, tb = self.get_boundaries_intersections(p_u, p_best - p_u, trust_radius) p_boundary = p_u + tb * (p_best - p_u) hits_boundary = True return p_boundary, hits_boundary
4,449
34.6
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/setup.py
from __future__ import division, print_function, absolute_import from os.path import join from scipy._build_utils import numpy_nodepr_api def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info config = Configuration('optimize',parent_package, top_path) minpack_src = [join('minpack','*f')] config.add_library('minpack',sources=minpack_src) config.add_extension('_minpack', sources=['_minpackmodule.c'], libraries=['minpack'], depends=(["minpack.h","__minpack.h"] + minpack_src), **numpy_nodepr_api) rootfind_src = [join('Zeros','*.c')] rootfind_hdr = [join('Zeros','zeros.h')] config.add_library('rootfind', sources=rootfind_src, headers=rootfind_hdr, **numpy_nodepr_api) config.add_extension('_zeros', sources=['zeros.c'], libraries=['rootfind'], depends=(rootfind_src + rootfind_hdr), **numpy_nodepr_api) lapack = get_info('lapack_opt') if 'define_macros' in numpy_nodepr_api: if ('define_macros' in lapack) and (lapack['define_macros'] is not None): lapack['define_macros'] = (lapack['define_macros'] + numpy_nodepr_api['define_macros']) else: lapack['define_macros'] = numpy_nodepr_api['define_macros'] sources = ['lbfgsb.pyf', 'lbfgsb.f', 'linpack.f', 'timer.f'] config.add_extension('_lbfgsb', sources=[join('lbfgsb',x) for x in sources], **lapack) sources = ['moduleTNC.c','tnc.c'] config.add_extension('moduleTNC', sources=[join('tnc',x) for x in sources], depends=[join('tnc','tnc.h')], **numpy_nodepr_api) config.add_extension('_cobyla', sources=[join('cobyla',x) for x in ['cobyla.pyf', 'cobyla2.f', 'trstlp.f']], **numpy_nodepr_api) sources = ['minpack2.pyf', 'dcsrch.f', 'dcstep.f'] config.add_extension('minpack2', sources=[join('minpack2',x) for x in sources], **numpy_nodepr_api) sources = ['slsqp.pyf', 'slsqp_optmz.f'] config.add_extension('_slsqp', sources=[join('slsqp', x) for x in sources], **numpy_nodepr_api) config.add_extension('_nnls', sources=[join('nnls', x) for x in ["nnls.f","nnls.pyf"]], **numpy_nodepr_api) config.add_extension('_group_columns', sources=['_group_columns.c'],) config.add_subpackage('_lsq') config.add_subpackage('_trlib') config.add_subpackage('_trustregion_constr') config.add_data_dir('tests') # Add license files config.add_data_files('lbfgsb/README') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
3,405
36.844444
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_exact.py
"""Nearly exact trust-region optimization subproblem.""" from __future__ import division, print_function, absolute_import import numpy as np from scipy.linalg import (norm, get_lapack_funcs, solve_triangular, cho_solve) from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) __all__ = ['_minimize_trustregion_exact', 'estimate_smallest_singular_value', 'singular_leading_submatrix', 'IterativeSubproblem'] def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None, **trust_region_options): """ Minimization of scalar function of one or more variables using a nearly exact trust-region algorithm. Options ------- initial_tr_radius : float Initial trust-region radius. max_tr_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than ``gtol`` before successful termination. """ if jac is None: raise ValueError('Jacobian is required for trust region ' 'exact minimization.') if hess is None: raise ValueError('Hessian matrix is required for trust region ' 'exact minimization.') return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, subproblem=IterativeSubproblem, **trust_region_options) def estimate_smallest_singular_value(U): """Given upper triangular matrix ``U`` estimate the smallest singular value and the correspondent right singular vector in O(n**2) operations. Parameters ---------- U : ndarray Square upper triangular matrix. Returns ------- s_min : float Estimated smallest singular value of the provided matrix. z_min : ndarray Estimatied right singular vector. Notes ----- The procedure is based on [1]_ and is done in two steps. First it finds a vector ``e`` with components selected from {+1, -1} such that the solution ``w`` from the system ``U.T w = e`` is as large as possible. Next it estimate ``U v = w``. The smallest singular value is close to ``norm(w)/norm(v)`` and the right singular vector is close to ``v/norm(v)``. The estimation will be better more ill-conditioned is the matrix. References ---------- .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H. An estimate for the condition number of a matrix. 1979. SIAM Journal on Numerical Analysis, 16(2), 368-375. """ U = np.atleast_2d(U) m, n = U.shape if m != n: raise ValueError("A square triangular matrix should be provided.") # A vector `e` with components selected from {+1, -1} # is selected so that the solution `w` to the system # `U.T w = e` is as large as possible. Implementation # based on algorithm 3.5.1, p. 142, from reference [2] # adapted for lower triangular matrix. p = np.zeros(n) w = np.empty(n) # Implemented according to: Golub, G. H., Van Loan, C. F. (2013). # "Matrix computations". Forth Edition. JHU press. pp. 140-142. for k in range(n): wp = (1-p[k]) / U.T[k, k] wm = (-1-p[k]) / U.T[k, k] pp = p[k+1:] + U.T[k+1:, k]*wp pm = p[k+1:] + U.T[k+1:, k]*wm if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1): w[k] = wp p[k+1:] = pp else: w[k] = wm p[k+1:] = pm # The system `U v = w` is solved using backward substitution. v = solve_triangular(U, w) v_norm = norm(v) w_norm = norm(w) # Smallest singular value s_min = w_norm / v_norm # Associated vector z_min = v / v_norm return s_min, z_min def gershgorin_bounds(H): """ Given a square matrix ``H`` compute upper and lower bounds for its eigenvalues (Gregoshgorin Bounds). Defined ref. [1]. References ---------- .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. Trust region methods. 2000. Siam. pp. 19. """ H_diag = np.diag(H) H_diag_abs = np.abs(H_diag) H_row_sums = np.sum(np.abs(H), axis=1) lb = np.min(H_diag + H_diag_abs - H_row_sums) ub = np.max(H_diag - H_diag_abs + H_row_sums) return lb, ub def singular_leading_submatrix(A, U, k): """ Compute term that makes the leading ``k`` by ``k`` submatrix from ``A`` singular. Parameters ---------- A : ndarray Symmetric matrix that is not positive definite. U : ndarray Upper triangular matrix resulting of an incomplete Cholesky decomposition of matrix ``A``. k : int Positive integer such that the leading k by k submatrix from `A` is the first non-positive definite leading submatrix. Returns ------- delta : float Amount that should be added to the element (k, k) of the leading k by k submatrix of ``A`` to make it singular. v : ndarray A vector such that ``v.T B v = 0``. Where B is the matrix A after ``delta`` is added to its element (k, k). """ # Compute delta delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1] n = len(A) # Inicialize v v = np.zeros(n) v[k-1] = 1 # Compute the remaining values of v by solving a triangular system. if k != 1: v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1]) return delta, v class IterativeSubproblem(BaseQuadraticSubproblem): """Quadratic subproblem solved by nearly exact iterative method. Notes ----- This subproblem solver was based on [1]_, [2]_ and [3]_, which implement similar algorithms. The algorithm is basically that of [1]_ but ideas from [2]_ and [3]_ were also used. References ---------- .. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods", Siam, pp. 169-200, 2000. .. [2] J. Nocedal and S. Wright, "Numerical optimization", Springer Science & Business Media. pp. 83-91, 2006. .. [3] J.J. More and D.C. Sorensen, "Computing a trust region step", SIAM Journal on Scientific and Statistical Computing, vol. 4(3), pp. 553-572, 1983. """ # UPDATE_COEFF appears in reference [1]_ # in formula 7.3.14 (p. 190) named as "theta". # As recommended there it value is fixed in 0.01. UPDATE_COEFF = 0.01 EPS = np.finfo(float).eps def __init__(self, x, fun, jac, hess, hessp=None, k_easy=0.1, k_hard=0.2): super(IterativeSubproblem, self).__init__(x, fun, jac, hess) # When the trust-region shrinks in two consecutive # calculations (``tr_radius < previous_tr_radius``) # the lower bound ``lambda_lb`` may be reused, # facilitating the convergence. To indicate no # previous value is known at first ``previous_tr_radius`` # is set to -1 and ``lambda_lb`` to None. self.previous_tr_radius = -1 self.lambda_lb = None self.niter = 0 # ``k_easy`` and ``k_hard`` are parameters used # to determine the stop criteria to the iterative # subproblem solver. Take a look at pp. 194-197 # from reference _[1] for a more detailed description. self.k_easy = k_easy self.k_hard = k_hard # Get Lapack function for cholesky decomposition. # The implemented Scipy wrapper does not return # the incomplete factorization needed by the method. self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,)) # Get info about Hessian self.dimension = len(self.hess) self.hess_gershgorin_lb,\ self.hess_gershgorin_ub = gershgorin_bounds(self.hess) self.hess_inf = norm(self.hess, np.Inf) self.hess_fro = norm(self.hess, 'fro') # A constant such that for vectors smaler than that # backward substituition is not reliable. It was stabilished # based on Golub, G. H., Van Loan, C. F. (2013). # "Matrix computations". Forth Edition. JHU press., p.165. self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf def _initial_values(self, tr_radius): """Given a trust radius, return a good initial guess for the damping factor, the lower bound and the upper bound. The values were chosen accordingly to the guidelines on section 7.3.8 (p. 192) from [1]_. """ # Upper bound for the damping factor lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb, self.hess_fro, self.hess_inf)) # Lower bound for the damping factor lambda_lb = max(0, -min(self.hess.diagonal()), self.jac_mag/tr_radius - min(self.hess_gershgorin_ub, self.hess_fro, self.hess_inf)) # Improve bounds with previous info if tr_radius < self.previous_tr_radius: lambda_lb = max(self.lambda_lb, lambda_lb) # Initial guess for the damping factor if lambda_lb == 0: lambda_initial = 0 else: lambda_initial = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) return lambda_initial, lambda_lb, lambda_ub def solve(self, tr_radius): """Solve quadratic subproblem""" lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius) n = self.dimension hits_boundary = True already_factorized = False self.niter = 0 while True: # Compute Cholesky factorization if already_factorized: already_factorized = False else: H = self.hess+lambda_current*np.eye(n) U, info = self.cholesky(H, lower=False, overwrite_a=False, clean=True) self.niter += 1 # Check if factorization succeeded if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO: # Successful factorization # Solve `U.T U p = s` p = cho_solve((U, False), -self.jac) p_norm = norm(p) # Check for interior convergence if p_norm <= tr_radius and lambda_current == 0: hits_boundary = False break # Solve `U.T w = p` w = solve_triangular(U, p, trans='T') w_norm = norm(w) # Compute Newton step accordingly to # formula (4.44) p.87 from ref [2]_. delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius lambda_new = lambda_current + delta_lambda if p_norm < tr_radius: # Inside boundary s_min, z_min = estimate_smallest_singular_value(U) ta, tb = self.get_boundaries_intersections(p, z_min, tr_radius) # Choose `step_len` with the smallest magnitude. # The reason for this choice is explained at # ref [3]_, p. 6 (Immediately before the formula # for `tau`). step_len = min([ta, tb], key=abs) # Compute the quadratic term (p.T*H*p) quadratic_term = np.dot(p, np.dot(H, p)) # Check stop criteria relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2) if relative_error <= self.k_hard: p += step_len * z_min break # Update uncertanty bounds lambda_ub = lambda_current lambda_lb = max(lambda_lb, lambda_current - s_min**2) # Compute Cholesky factorization H = self.hess + lambda_new*np.eye(n) c, info = self.cholesky(H, lower=False, overwrite_a=False, clean=True) # Check if the factorization have succeeded # if info == 0: # Successful factorization # Update damping factor lambda_current = lambda_new already_factorized = True else: # Unsuccessful factorization # Update uncertanty bounds lambda_lb = max(lambda_lb, lambda_new) # Update damping factor lambda_current = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) else: # Outside boundary # Check stop criteria relative_error = abs(p_norm - tr_radius) / tr_radius if relative_error <= self.k_easy: break # Update uncertanty bounds lambda_lb = lambda_current # Update damping factor lambda_current = lambda_new elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO: # jac_mag very close to zero # Check for interior convergence if lambda_current == 0: p = np.zeros(n) hits_boundary = False break s_min, z_min = estimate_smallest_singular_value(U) step_len = tr_radius # Check stop criteria if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2: p = step_len * z_min break # Update uncertanty bounds lambda_ub = lambda_current lambda_lb = max(lambda_lb, lambda_current - s_min**2) # Update damping factor lambda_current = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) else: # Unsuccessful factorization # Compute auxiliary terms delta, v = singular_leading_submatrix(H, U, info) v_norm = norm(v) # Update uncertanty interval lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2) # Update damping factor lambda_current = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) self.lambda_lb = lambda_lb self.lambda_current = lambda_current self.previous_tr_radius = tr_radius return p, hits_boundary
15,492
34.7806
110
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_tstutils.py
''' Parameters used in test and benchmark methods ''' from __future__ import division, print_function, absolute_import from random import random from scipy.optimize import zeros as cc def f1(x): return x*(x-1.) def f2(x): return x**2 - 1 def f3(x): return x*(x-1.)*(x-2.)*(x-3.) def f4(x): if x > 1: return 1.0 + .1*x if x < 1: return -1.0 + .1*x return 0 def f5(x): if x != 1: return 1.0/(1. - x) return 0 def f6(x): if x > 1: return random() elif x < 1: return -random() else: return 0 description = """ f2 is a symmetric parabola, x**2 - 1 f3 is a quartic polynomial with large hump in interval f4 is step function with a discontinuity at 1 f5 is a hyperbola with vertical asymptote at 1 f6 has random values positive to left of 1, negative to right of course these are not real problems. They just test how the 'good' solvers behave in bad circumstances where bisection is really the best. A good solver should not be much worse than bisection in such circumstance, while being faster for smooth monotone sorts of functions. """ methods = [cc.bisect,cc.ridder,cc.brenth,cc.brentq] mstrings = ['cc.bisect','cc.ridder','cc.brenth','cc.brentq'] functions = [f2,f3,f4,f5,f6] fstrings = ['f2','f3','f4','f5','f6']
1,323
20.354839
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_ncg.py
"""Newton-CG trust-region optimization.""" from __future__ import division, print_function, absolute_import import math import numpy as np import scipy.linalg from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) __all__ = [] def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, **trust_region_options): """ Minimization of scalar function of one or more variables using the Newton conjugate gradient trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `gtol` before successful termination. """ if jac is None: raise ValueError('Jacobian is required for Newton-CG trust-region ' 'minimization') if hess is None and hessp is None: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is required for Newton-CG trust-region minimization') return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=CGSteihaugSubproblem, **trust_region_options) class CGSteihaugSubproblem(BaseQuadraticSubproblem): """Quadratic subproblem solved by a conjugate gradient method""" def solve(self, trust_radius): """ Solve the subproblem using a conjugate gradient method. Parameters ---------- trust_radius : float We are allowed to wander only this far away from the origin. Returns ------- p : ndarray The proposed step. hits_boundary : bool True if the proposed step is on the boundary of the trust region. Notes ----- This is algorithm (7.2) of Nocedal and Wright 2nd edition. Only the function that computes the Hessian-vector product is required. The Hessian itself is not required, and the Hessian does not need to be positive semidefinite. """ # get the norm of jacobian and define the origin p_origin = np.zeros_like(self.jac) # define a default tolerance tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag # Stop the method if the search direction # is a direction of nonpositive curvature. if self.jac_mag < tolerance: hits_boundary = False return p_origin, hits_boundary # init the state for the first iteration z = p_origin r = self.jac d = -r # Search for the min of the approximation of the objective function. while True: # do an iteration Bd = self.hessp(d) dBd = np.dot(d, Bd) if dBd <= 0: # Look at the two boundary points. # Find both values of t to get the boundary points such that # ||z + t d|| == trust_radius # and then choose the one with the predicted min value. ta, tb = self.get_boundaries_intersections(z, d, trust_radius) pa = z + ta * d pb = z + tb * d if self(pa) < self(pb): p_boundary = pa else: p_boundary = pb hits_boundary = True return p_boundary, hits_boundary r_squared = np.dot(r, r) alpha = r_squared / dBd z_next = z + alpha * d if scipy.linalg.norm(z_next) >= trust_radius: # Find t >= 0 to get the boundary point such that # ||z + t d|| == trust_radius ta, tb = self.get_boundaries_intersections(z, d, trust_radius) p_boundary = z + tb * d hits_boundary = True return p_boundary, hits_boundary r_next = r + alpha * Bd r_next_squared = np.dot(r_next, r_next) if math.sqrt(r_next_squared) < tolerance: hits_boundary = False return z_next, hits_boundary beta_next = r_next_squared / r_squared d_next = -r_next + beta_next * d # update the state for the next iteration z = z_next r = r_next d = d_next
4,646
35.023256
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_root.py
""" Unified interfaces to root finding algorithms. Functions --------- - root : find a root of a vector function. """ from __future__ import division, print_function, absolute_import __all__ = ['root'] import numpy as np from scipy._lib.six import callable from warnings import warn from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options from .minpack import _root_hybr, leastsq from ._spectral import _root_df_sane from . import nonlin def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, options=None): """ Find a root of a vector function. Parameters ---------- fun : callable A vector function to find a root of. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to the objective function and its Jacobian. method : str, optional Type of solver. Should be one of - 'hybr' :ref:`(see here) <optimize.root-hybr>` - 'lm' :ref:`(see here) <optimize.root-lm>` - 'broyden1' :ref:`(see here) <optimize.root-broyden1>` - 'broyden2' :ref:`(see here) <optimize.root-broyden2>` - 'anderson' :ref:`(see here) <optimize.root-anderson>` - 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>` - 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>` - 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>` - 'krylov' :ref:`(see here) <optimize.root-krylov>` - 'df-sane' :ref:`(see here) <optimize.root-dfsane>` jac : bool or callable, optional If `jac` is a Boolean and is True, `fun` is assumed to return the value of Jacobian along with the objective function. If False, the Jacobian will be estimated numerically. `jac` can also be a callable returning the Jacobian of `fun`. In this case, it must accept the same arguments as `fun`. tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. callback : function, optional Optional callback function. It is called on every iteration as ``callback(x, f)`` where `x` is the current solution and `f` the corresponding residual. For all methods but 'hybr' and 'lm'. options : dict, optional A dictionary of solver options. E.g. `xtol` or `maxiter`, see :obj:`show_options()` for details. Returns ------- sol : OptimizeResult The solution represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the algorithm exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *hybr*. Method *hybr* uses a modification of the Powell hybrid method as implemented in MINPACK [1]_. Method *lm* solves the system of nonlinear equations in a least squares sense using a modification of the Levenberg-Marquardt algorithm as implemented in MINPACK [1]_. Method *df-sane* is a derivative-free spectral method. [3]_ Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*, *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods, with backtracking or full line searches [2]_. Each method corresponds to a particular Jacobian approximations. See `nonlin` for details. - Method *broyden1* uses Broyden's first Jacobian approximation, it is known as Broyden's good method. - Method *broyden2* uses Broyden's second Jacobian approximation, it is known as Broyden's bad method. - Method *anderson* uses (extended) Anderson mixing. - Method *Krylov* uses Krylov approximation for inverse Jacobian. It is suitable for large-scale problem. - Method *diagbroyden* uses diagonal Broyden Jacobian approximation. - Method *linearmixing* uses a scalar Jacobian approximation. - Method *excitingmixing* uses a tuned diagonal Jacobian approximation. .. warning:: The algorithms implemented for methods *diagbroyden*, *linearmixing* and *excitingmixing* may be useful for specific problems, but whether they will work may depend strongly on the problem. .. versionadded:: 0.11.0 References ---------- .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom. 1980. User Guide for MINPACK-1. .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear Equations. Society for Industrial and Applied Mathematics. <http://www.siam.org/books/kelley/fr16/index.php> .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006). Examples -------- The following functions define a system of nonlinear equations and its jacobian. >>> def fun(x): ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, ... 0.5 * (x[1] - x[0])**3 + x[1]] >>> def jac(x): ... return np.array([[1 + 1.5 * (x[0] - x[1])**2, ... -1.5 * (x[0] - x[1])**2], ... [-1.5 * (x[1] - x[0])**2, ... 1 + 1.5 * (x[1] - x[0])**2]]) A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr') >>> sol.x array([ 0.8411639, 0.1588361]) """ if not isinstance(args, tuple): args = (args,) meth = method.lower() if options is None: options = {} if callback is not None and meth in ('hybr', 'lm'): warn('Method %s does not accept callback.' % method, RuntimeWarning) # fun also returns the jacobian if not callable(jac) and meth in ('hybr', 'lm'): if bool(jac): fun = MemoizeJac(fun) jac = fun.derivative else: jac = None # set default tolerances if tol is not None: options = dict(options) if meth in ('hybr', 'lm'): options.setdefault('xtol', tol) elif meth in ('df-sane',): options.setdefault('ftol', tol) elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov'): options.setdefault('xtol', tol) options.setdefault('xatol', np.inf) options.setdefault('ftol', np.inf) options.setdefault('fatol', np.inf) if meth == 'hybr': sol = _root_hybr(fun, x0, args=args, jac=jac, **options) elif meth == 'lm': sol = _root_leastsq(fun, x0, args=args, jac=jac, **options) elif meth == 'df-sane': _warn_jac_unused(jac, method) sol = _root_df_sane(fun, x0, args=args, callback=callback, **options) elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov'): _warn_jac_unused(jac, method) sol = _root_nonlin_solve(fun, x0, args=args, jac=jac, _method=meth, _callback=callback, **options) else: raise ValueError('Unknown solver %s' % method) return sol def _warn_jac_unused(jac, method): if jac is not None: warn('Method %s does not use the jacobian (jac).' % (method,), RuntimeWarning) def _root_leastsq(func, x0, args=(), jac=None, col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08, gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None, **unknown_options): """ Solve for least squares with Levenberg-Marquardt Options ------- col_deriv : bool non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float Relative error desired in the sum of squares. xtol : float Relative error desired in the approximate solution. gtol : float Orthogonality desired between the function vector and the columns of the Jacobian. maxiter : int The maximum number of calls to the function. If zero, then 100*(N+1) is the maximum where N is the number of elements in x0. epsfcn : float A suitable step length for the forward-difference approximation of the Jacobian (for Dfun=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence N positive entries that serve as a scale factors for the variables. """ _check_unknown_options(unknown_options) x, cov_x, info, msg, ier = leastsq(func, x0, args=args, Dfun=jac, full_output=True, col_deriv=col_deriv, xtol=xtol, ftol=ftol, gtol=gtol, maxfev=maxiter, epsfcn=eps, factor=factor, diag=diag) sol = OptimizeResult(x=x, message=msg, status=ier, success=ier in (1, 2, 3, 4), cov_x=cov_x, fun=info.pop('fvec')) sol.update(info) return sol def _root_nonlin_solve(func, x0, args=(), jac=None, _callback=None, _method=None, nit=None, disp=False, maxiter=None, ftol=None, fatol=None, xtol=None, xatol=None, tol_norm=None, line_search='armijo', jac_options=None, **unknown_options): _check_unknown_options(unknown_options) f_tol = fatol f_rtol = ftol x_tol = xatol x_rtol = xtol verbose = disp if jac_options is None: jac_options = dict() jacobian = {'broyden1': nonlin.BroydenFirst, 'broyden2': nonlin.BroydenSecond, 'anderson': nonlin.Anderson, 'linearmixing': nonlin.LinearMixing, 'diagbroyden': nonlin.DiagBroyden, 'excitingmixing': nonlin.ExcitingMixing, 'krylov': nonlin.KrylovJacobian }[_method] if args: if jac: def f(x): return func(x, *args)[0] else: def f(x): return func(x, *args) else: f = func x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options), iter=nit, verbose=verbose, maxiter=maxiter, f_tol=f_tol, f_rtol=f_rtol, x_tol=x_tol, x_rtol=x_rtol, tol_norm=tol_norm, line_search=line_search, callback=_callback, full_output=True, raise_exception=False) sol = OptimizeResult(x=x) sol.update(info) return sol def _root_broyden1_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart``: drop all matrix columns. Has no extra parameters. - ``simple``: drop oldest matrix column. Has no extra parameters. - ``svd``: keep only the most significant SVD components. Extra parameters: - ``to_retain``: number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). """ pass def _root_broyden2_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart``: drop all matrix columns. Has no extra parameters. - ``simple``: drop oldest matrix column. Has no extra parameters. - ``svd``: keep only the most significant SVD components. Extra parameters: - ``to_retain``: number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). """ pass def _root_anderson_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). M : float, optional Number of previous vectors to retain. Defaults to 5. w0 : float, optional Regularization parameter for numerical stability. Compared to unity, good values of the order of 0.01. """ pass def _root_linearmixing_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, ``NoConvergence`` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional initial guess for the jacobian is (-1/alpha). """ pass def _root_diagbroyden_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional initial guess for the jacobian is (-1/alpha). """ pass def _root_excitingmixing_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial Jacobian approximation is (-1/alpha). alphamax : float, optional The entries of the diagonal Jacobian are kept in the range ``[alpha, alphamax]``. """ pass def _root_krylov_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. rdiff : float, optional Relative step size to use in numerical differentiation. method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function Krylov method to use to approximate the Jacobian. Can be a string, or a function implementing the same interface as the iterative solvers in `scipy.sparse.linalg`. The default is `scipy.sparse.linalg.lgmres`. inner_M : LinearOperator or InverseJacobian Preconditioner for the inner Krylov iteration. Note that you can use also inverse Jacobians as (adaptive) preconditioners. For example, >>> jac = BroydenFirst() >>> kjac = KrylovJacobian(inner_M=jac.inverse). If the preconditioner has a method named 'update', it will be called as ``update(x, f)`` after each nonlinear step, with ``x`` giving the current point, and ``f`` the current function value. inner_tol, inner_maxiter, ... Parameters to pass on to the "inner" Krylov solver. See `scipy.sparse.linalg.gmres` for details. outer_k : int, optional Size of the subspace kept across LGMRES nonlinear iterations. See `scipy.sparse.linalg.lgmres` for details. """ pass
26,020
39.594384
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_linprog.py
""" A top-level linear programming interface. Currently this interface only solves linear programming problems via the Simplex Method. .. versionadded:: 0.15.0 Functions --------- .. autosummary:: :toctree: generated/ linprog linprog_verbose_callback linprog_terse_callback """ from __future__ import division, print_function, absolute_import import numpy as np from .optimize import OptimizeResult, _check_unknown_options from ._linprog_ip import _linprog_ip __all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback'] __docformat__ = "restructuredtext en" def linprog_verbose_callback(xk, **kwargs): """ A sample callback function demonstrating the linprog callback interface. This callback produces detailed output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- xk : array_like The current solution vector. **kwargs : dict A dictionary containing the following parameters: tableau : array_like The current tableau of the simplex algorithm. Its structure is defined in _solve_simplex. phase : int The current Phase of the simplex algorithm (1 or 2) nit : int The current iteration number. pivot : tuple(int, int) The index of the tableau selected as the next pivot, or nan if no pivot exists basis : array(int) A list of the current basic variables. Each element contains the name of a basic variable and its value. complete : bool True if the simplex algorithm has completed (and this is the final call to callback), otherwise False. """ tableau = kwargs["tableau"] nit = kwargs["nit"] pivrow, pivcol = kwargs["pivot"] phase = kwargs["phase"] basis = kwargs["basis"] complete = kwargs["complete"] saved_printoptions = np.get_printoptions() np.set_printoptions(linewidth=500, formatter={'float': lambda x: "{0: 12.4f}".format(x)}) if complete: print("--------- Iteration Complete - Phase {0:d} -------\n".format(phase)) print("Tableau:") elif nit == 0: print("--------- Initial Tableau - Phase {0:d} ----------\n".format(phase)) else: print("--------- Iteration {0:d} - Phase {1:d} --------\n".format(nit, phase)) print("Tableau:") if nit >= 0: print("" + str(tableau) + "\n") if not complete: print("Pivot Element: T[{0:.0f}, {1:.0f}]\n".format(pivrow, pivcol)) print("Basic Variables:", basis) print() print("Current Solution:") print("x = ", xk) print() print("Current Objective Value:") print("f = ", -tableau[-1, -1]) print() np.set_printoptions(**saved_printoptions) def linprog_terse_callback(xk, **kwargs): """ A sample callback function demonstrating the linprog callback interface. This callback produces brief output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- xk : array_like The current solution vector. **kwargs : dict A dictionary containing the following parameters: tableau : array_like The current tableau of the simplex algorithm. Its structure is defined in _solve_simplex. vars : tuple(str, ...) Column headers for each column in tableau. "x[i]" for actual variables, "s[i]" for slack surplus variables, "a[i]" for artificial variables, and "RHS" for the constraint RHS vector. phase : int The current Phase of the simplex algorithm (1 or 2) nit : int The current iteration number. pivot : tuple(int, int) The index of the tableau selected as the next pivot, or nan if no pivot exists basics : list[tuple(int, float)] A list of the current basic variables. Each element contains the index of a basic variable and its value. complete : bool True if the simplex algorithm has completed (and this is the final call to callback), otherwise False. """ nit = kwargs["nit"] if nit == 0: print("Iter: X:") print("{0: <5d} ".format(nit), end="") print(xk) def _pivot_col(T, tol=1.0E-12, bland=False): """ Given a linear programming simplex tableau, determine the column of the variable to enter the basis. Parameters ---------- T : 2D ndarray The simplex tableau. tol : float Elements in the objective row larger than -tol will not be considered for pivoting. Nominally this value is zero, but numerical issues cause a tolerance about zero to be necessary. bland : bool If True, use Bland's rule for selection of the column (select the first column with a negative coefficient in the objective row, regardless of magnitude). Returns ------- status: bool True if a suitable pivot column was found, otherwise False. A return of False indicates that the linear programming simplex algorithm is complete. col: int The index of the column of the pivot element. If status is False, col will be returned as nan. """ ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False) if ma.count() == 0: return False, np.nan if bland: return True, np.where(ma.mask == False)[0][0] return True, np.ma.where(ma == ma.min())[0][0] def _pivot_row(T, basis, pivcol, phase, tol=1.0E-12, bland=False): """ Given a linear programming simplex tableau, determine the row for the pivot operation. Parameters ---------- T : 2D ndarray The simplex tableau. basis : array A list of the current basic variables. pivcol : int The index of the pivot column. phase : int The phase of the simplex algorithm (1 or 2). tol : float Elements in the pivot column smaller than tol will not be considered for pivoting. Nominally this value is zero, but numerical issues cause a tolerance about zero to be necessary. bland : bool If True, use Bland's rule for selection of the row (if more than one row can be used, choose the one with the lowest variable index). Returns ------- status: bool True if a suitable pivot row was found, otherwise False. A return of False indicates that the linear programming problem is unbounded. row: int The index of the row of the pivot element. If status is False, row will be returned as nan. """ if phase == 1: k = 2 else: k = 1 ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False) if ma.count() == 0: return False, np.nan mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False) q = mb / ma min_rows = np.ma.where(q == q.min())[0] if bland: return True, min_rows[np.argmin(np.take(basis, min_rows))] return True, min_rows[0] def _solve_simplex(T, n, basis, maxiter=1000, phase=2, callback=None, tol=1.0E-12, nit0=0, bland=False): """ Solve a linear programming problem in "standard maximization form" using the Simplex Method. Minimize :math:`f = c^T x` subject to .. math:: Ax = b x_i >= 0 b_j >= 0 Parameters ---------- T : array_like A 2-D array representing the simplex T corresponding to the maximization problem. It should have the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0]] for a Phase 2 problem, or the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0], [c'[0], c'[1], ..., c'[n_total], 0]] for a Phase 1 problem (a Problem in which a basic feasible solution is sought prior to maximizing the actual objective. T is modified in place by _solve_simplex. n : int The number of true variables in the problem. basis : array An array of the indices of the basic variables, such that basis[i] contains the column corresponding to the basic variable for row i. Basis is modified in place by _solve_simplex maxiter : int The maximum number of iterations to perform before aborting the optimization. phase : int The phase of the optimization being executed. In phase 1 a basic feasible solution is sought and the T has an additional row representing an alternate objective function. callback : callable, optional If a callback function is provided, it will be called within each iteration of the simplex algorithm. The callback must have the signature `callback(xk, **kwargs)` where xk is the current solution vector and kwargs is a dictionary containing the following:: "T" : The current Simplex algorithm T "nit" : The current iteration. "pivot" : The pivot (row, column) used for the next iteration. "phase" : Whether the algorithm is in Phase 1 or Phase 2. "basis" : The indices of the columns of the basic variables. tol : float The tolerance which determines when a solution is "close enough" to zero in Phase 1 to be considered a basic feasible solution or close enough to positive to serve as an optimal solution. nit0 : int The initial iteration number used to keep an accurate iteration total in a two-phase problem. bland : bool If True, choose pivots using Bland's rule [3]. In problems which fail to converge due to cycling, using Bland's rule can provide convergence at the expense of a less optimal path about the simplex. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. Possible values for the ``status`` attribute are: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded See `OptimizeResult` for a description of other attributes. """ nit = nit0 complete = False if phase == 1: m = T.shape[0]-2 elif phase == 2: m = T.shape[0]-1 else: raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2") if phase == 2: # Check if any artificial variables are still in the basis. # If yes, check if any coefficients from this row and a column # corresponding to one of the non-artificial variable is non-zero. # If found, pivot at this term. If not, start phase 2. # Do this for all artificial variables in the basis. # Ref: "An Introduction to Linear Programming and Game Theory" # by Paul R. Thie, Gerard E. Keough, 3rd Ed, # Chapter 3.7 Redundant Systems (pag 102) for pivrow in [row for row in range(basis.size) if basis[row] > T.shape[1] - 2]: non_zero_row = [col for col in range(T.shape[1] - 1) if T[pivrow, col] != 0] if len(non_zero_row) > 0: pivcol = non_zero_row[0] # variable represented by pivcol enters # variable in basis[pivrow] leaves basis[pivrow] = pivcol pivval = T[pivrow][pivcol] T[pivrow, :] = T[pivrow, :] / pivval for irow in range(T.shape[0]): if irow != pivrow: T[irow, :] = T[irow, :] - T[pivrow, :]*T[irow, pivcol] nit += 1 if len(basis[:m]) == 0: solution = np.zeros(T.shape[1] - 1, dtype=np.float64) else: solution = np.zeros(max(T.shape[1] - 1, max(basis[:m]) + 1), dtype=np.float64) while not complete: # Find the pivot column pivcol_found, pivcol = _pivot_col(T, tol, bland) if not pivcol_found: pivcol = np.nan pivrow = np.nan status = 0 complete = True else: # Find the pivot row pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland) if not pivrow_found: status = 3 complete = True if callback is not None: solution[:] = 0 solution[basis[:m]] = T[:m, -1] callback(solution[:n], **{"tableau": T, "phase": phase, "nit": nit, "pivot": (pivrow, pivcol), "basis": basis, "complete": complete and phase == 2}) if not complete: if nit >= maxiter: # Iteration limit exceeded status = 1 complete = True else: # variable represented by pivcol enters # variable in basis[pivrow] leaves basis[pivrow] = pivcol pivval = T[pivrow][pivcol] T[pivrow, :] = T[pivrow, :] / pivval for irow in range(T.shape[0]): if irow != pivrow: T[irow, :] = T[irow, :] - T[pivrow, :]*T[irow, pivcol] nit += 1 return nit, status def _linprog_simplex(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, maxiter=1000, disp=False, callback=None, tol=1.0E-12, bland=False, **unknown_options): """ Solve the following linear programming problem via a two-phase simplex algorithm.:: minimize: c^T * x subject to: A_ub * x <= b_ub A_eq * x == b_eq Parameters ---------- c : array_like Coefficients of the linear objective function to be minimized. A_ub : array_like 2-D array which, when matrix-multiplied by ``x``, gives the values of the upper-bound inequality constraints at ``x``. b_ub : array_like 1-D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. A_eq : array_like 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. b_eq : array_like 1-D array of values representing the RHS of each equality constraint (row) in ``A_eq``. bounds : array_like The bounds for each independent variable in the solution, which can take one of three forms:: None : The default bounds, all variables are non-negative. (lb, ub) : If a 2-element sequence is provided, the same lower bound (lb) and upper bound (ub) will be applied to all variables. [(lb_0, ub_0), (lb_1, ub_1), ...] : If an n x 2 sequence is provided, each variable x_i will be bounded by lb[i] and ub[i]. Infinite bounds are specified using -np.inf (negative) or np.inf (positive). callback : callable If a callback function is provide, it will be called within each iteration of the simplex algorithm. The callback must have the signature ``callback(xk, **kwargs)`` where ``xk`` is the current s olution vector and kwargs is a dictionary containing the following:: "tableau" : The current Simplex algorithm tableau "nit" : The current iteration. "pivot" : The pivot (row, column) used for the next iteration. "phase" : Whether the algorithm is in Phase 1 or Phase 2. "bv" : A structured array containing a string representation of each basic variable and its current value. Options ------- maxiter : int The maximum number of iterations to perform. disp : bool If True, print exit status message to sys.stdout tol : float The tolerance which determines when a solution is "close enough" to zero in Phase 1 to be considered a basic feasible solution or close enough to positive to serve as an optimal solution. bland : bool If True, use Bland's anti-cycling rule [3] to choose pivots to prevent cycling. If False, choose pivots which should lead to a converged solution more quickly. The latter method is subject to cycling (non-convergence) in rare instances. Returns ------- A `scipy.optimize.OptimizeResult` consisting of the following fields: x : ndarray The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. slack : ndarray The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. success : bool Returns True if the algorithm succeeded in finding an optimal solution. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. Examples -------- Consider the following problem: Minimize: f = -1*x[0] + 4*x[1] Subject to: -3*x[0] + 1*x[1] <= 6 1*x[0] + 2*x[1] <= 4 x[1] >= -3 where: -inf <= x[0] <= inf This problem deviates from the standard linear programming problem. In standard form, linear programming problems assume the variables x are non-negative. Since the variables don't have standard bounds where 0 <= x <= inf, the bounds of the variables must be explicitly set. There are two upper-bound constraints, which can be expressed as dot(A_ub, x) <= b_ub The input for this problem is as follows: >>> from scipy.optimize import linprog >>> c = [-1, 4] >>> A = [[-3, 1], [1, 2]] >>> b = [6, 4] >>> x0_bnds = (None, None) >>> x1_bnds = (-3, None) >>> res = linprog(c, A, b, bounds=(x0_bnds, x1_bnds)) >>> print(res) fun: -22.0 message: 'Optimization terminated successfully.' nit: 1 slack: array([ 39., 0.]) status: 0 success: True x: array([ 10., -3.]) References ---------- .. [1] Dantzig, George B., Linear programming and extensions. Rand Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963 .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to Mathematical Programming", McGraw-Hill, Chapter 4. .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. Mathematics of Operations Research (2), 1977: pp. 103-107. """ _check_unknown_options(unknown_options) status = 0 messages = {0: "Optimization terminated successfully.", 1: "Iteration limit reached.", 2: "Optimization failed. Unable to find a feasible" " starting point.", 3: "Optimization failed. The problem appears to be unbounded.", 4: "Optimization failed. Singular matrix encountered."} have_floor_variable = False cc = np.asarray(c) # The initial value of the objective function element in the tableau f0 = 0 # The number of variables as given by c n = len(c) # Convert the input arguments to arrays (sized to zero if not provided) Aeq = np.asarray(A_eq) if A_eq is not None else np.empty([0, len(cc)]) Aub = np.asarray(A_ub) if A_ub is not None else np.empty([0, len(cc)]) beq = np.ravel(np.asarray(b_eq)) if b_eq is not None else np.empty([0]) bub = np.ravel(np.asarray(b_ub)) if b_ub is not None else np.empty([0]) # Analyze the bounds and determine what modifications to be made to # the constraints in order to accommodate them. L = np.zeros(n, dtype=np.float64) U = np.ones(n, dtype=np.float64)*np.inf if bounds is None or len(bounds) == 0: pass elif len(bounds) == 2 and not hasattr(bounds[0], '__len__'): # All bounds are the same a = bounds[0] if bounds[0] is not None else -np.inf b = bounds[1] if bounds[1] is not None else np.inf L = np.asarray(n*[a], dtype=np.float64) U = np.asarray(n*[b], dtype=np.float64) else: if len(bounds) != n: status = -1 message = ("Invalid input for linprog with method = 'simplex'. " "Length of bounds is inconsistent with the length of c") else: try: for i in range(n): if len(bounds[i]) != 2: raise IndexError() L[i] = bounds[i][0] if bounds[i][0] is not None else -np.inf U[i] = bounds[i][1] if bounds[i][1] is not None else np.inf except IndexError: status = -1 message = ("Invalid input for linprog with " "method = 'simplex'. bounds must be a n x 2 " "sequence/array where n = len(c).") if np.any(L == -np.inf): # If any lower-bound constraint is a free variable # add the first column variable as the "floor" variable which # accommodates the most negative variable in the problem. n = n + 1 L = np.concatenate([np.array([0]), L]) U = np.concatenate([np.array([np.inf]), U]) cc = np.concatenate([np.array([0]), cc]) Aeq = np.hstack([np.zeros([Aeq.shape[0], 1]), Aeq]) Aub = np.hstack([np.zeros([Aub.shape[0], 1]), Aub]) have_floor_variable = True # Now before we deal with any variables with lower bounds < 0, # deal with finite bounds which can be simply added as new constraints. # Also validate bounds inputs here. for i in range(n): if(L[i] > U[i]): status = -1 message = ("Invalid input for linprog with method = 'simplex'. " "Lower bound %d is greater than upper bound%d" % (i, i)) if np.isinf(L[i]) and L[i] > 0: status = -1 message = ("Invalid input for linprog with method = 'simplex'. " "Lower bound may not be +infinity") if np.isinf(U[i]) and U[i] < 0: status = -1 message = ("Invalid input for linprog with method = 'simplex'. " "Upper bound may not be -infinity") if np.isfinite(L[i]) and L[i] > 0: # Add a new lower-bound (negative upper-bound) constraint Aub = np.vstack([Aub, np.zeros(n)]) Aub[-1, i] = -1 bub = np.concatenate([bub, np.array([-L[i]])]) L[i] = 0 if np.isfinite(U[i]): # Add a new upper-bound constraint Aub = np.vstack([Aub, np.zeros(n)]) Aub[-1, i] = 1 bub = np.concatenate([bub, np.array([U[i]])]) U[i] = np.inf # Now find negative lower bounds (finite or infinite) which require a # change of variables or free variables and handle them appropriately for i in range(0, n): if L[i] < 0: if np.isfinite(L[i]) and L[i] < 0: # Add a change of variables for x[i] # For each row in the constraint matrices, we take the # coefficient from column i in A, # and subtract the product of that and L[i] to the RHS b beq = beq - Aeq[:, i] * L[i] bub = bub - Aub[:, i] * L[i] # We now have a nonzero initial value for the objective # function as well. f0 = f0 - cc[i] * L[i] else: # This is an unrestricted variable, let x[i] = u[i] - v[0] # where v is the first column in all matrices. Aeq[:, 0] = Aeq[:, 0] - Aeq[:, i] Aub[:, 0] = Aub[:, 0] - Aub[:, i] cc[0] = cc[0] - cc[i] if np.isinf(U[i]): if U[i] < 0: status = -1 message = ("Invalid input for linprog with " "method = 'simplex'. Upper bound may not be -inf.") # The number of upper bound constraints (rows in A_ub and elements in b_ub) mub = len(bub) # The number of equality constraints (rows in A_eq and elements in b_eq) meq = len(beq) # The total number of constraints m = mub+meq # The number of slack variables (one for each upper-bound constraints) n_slack = mub # The number of artificial variables (one for each lower-bound and equality # constraint) n_artificial = meq + np.count_nonzero(bub < 0) try: Aub_rows, Aub_cols = Aub.shape except ValueError: raise ValueError("Invalid input. A_ub must be two-dimensional") try: Aeq_rows, Aeq_cols = Aeq.shape except ValueError: raise ValueError("Invalid input. A_eq must be two-dimensional") if Aeq_rows != meq: status = -1 message = ("Invalid input for linprog with method = 'simplex'. " "The number of rows in A_eq must be equal " "to the number of values in b_eq") if Aub_rows != mub: status = -1 message = ("Invalid input for linprog with method = 'simplex'. " "The number of rows in A_ub must be equal " "to the number of values in b_ub") if Aeq_cols > 0 and Aeq_cols != n: status = -1 message = ("Invalid input for linprog with method = 'simplex'. " "Number of columns in A_eq must be equal " "to the size of c") if Aub_cols > 0 and Aub_cols != n: status = -1 message = ("Invalid input for linprog with method = 'simplex'. " "Number of columns in A_ub must be equal to the size of c") if status != 0: # Invalid inputs provided raise ValueError(message) # Create the tableau T = np.zeros([m+2, n+n_slack+n_artificial+1]) # Insert objective into tableau T[-2, :n] = cc T[-2, -1] = f0 b = T[:-2, -1] if meq > 0: # Add Aeq to the tableau T[:meq, :n] = Aeq # Add beq to the tableau b[:meq] = beq if mub > 0: # Add Aub to the tableau T[meq:meq+mub, :n] = Aub # At bub to the tableau b[meq:meq+mub] = bub # Add the slack variables to the tableau np.fill_diagonal(T[meq:m, n:n+n_slack], 1) # Further set up the tableau. # If a row corresponds to an equality constraint or a negative b (a lower # bound constraint), then an artificial variable is added for that row. # Also, if b is negative, first flip the signs in that constraint. slcount = 0 avcount = 0 basis = np.zeros(m, dtype=int) r_artificial = np.zeros(n_artificial, dtype=int) for i in range(m): if i < meq or b[i] < 0: # basic variable i is in column n+n_slack+avcount basis[i] = n+n_slack+avcount r_artificial[avcount] = i avcount += 1 if b[i] < 0: b[i] *= -1 T[i, :-1] *= -1 T[i, basis[i]] = 1 T[-1, basis[i]] = 1 else: # basic variable i is in column n+slcount basis[i] = n+slcount slcount += 1 # Make the artificial variables basic feasible variables by subtracting # each row with an artificial variable from the Phase 1 objective for r in r_artificial: T[-1, :] = T[-1, :] - T[r, :] nit1, status = _solve_simplex(T, n, basis, phase=1, callback=callback, maxiter=maxiter, tol=tol, bland=bland) # if pseudo objective is zero, remove the last row from the tableau and # proceed to phase 2 if abs(T[-1, -1]) < tol: # Remove the pseudo-objective row from the tableau T = T[:-1, :] # Remove the artificial variable columns from the tableau T = np.delete(T, np.s_[n+n_slack:n+n_slack+n_artificial], 1) else: # Failure to find a feasible starting point status = 2 if status != 0: message = messages[status] if disp: print(message) return OptimizeResult(x=np.nan, fun=-T[-1, -1], nit=nit1, status=status, message=message, success=False) # Phase 2 nit2, status = _solve_simplex(T, n, basis, maxiter=maxiter-nit1, phase=2, callback=callback, tol=tol, nit0=nit1, bland=bland) solution = np.zeros(n+n_slack+n_artificial) solution[basis[:m]] = T[:m, -1] x = solution[:n] slack = solution[n:n+n_slack] # For those variables with finite negative lower bounds, # reverse the change of variables masked_L = np.ma.array(L, mask=np.isinf(L), fill_value=0.0).filled() x = x + masked_L # For those variables with infinite negative lower bounds, # take x[i] as the difference between x[i] and the floor variable. if have_floor_variable: for i in range(1, n): if np.isinf(L[i]): x[i] -= x[0] x = x[1:] # Optimization complete at this point obj = -T[-1, -1] if status in (0, 1): if disp: print(messages[status]) print(" Current function value: {0: <12.6f}".format(obj)) print(" Iterations: {0:d}".format(nit2)) else: if disp: print(messages[status]) print(" Iterations: {0:d}".format(nit2)) return OptimizeResult(x=x, fun=obj, nit=int(nit2), status=status, slack=slack, message=messages[status], success=(status == 0)) def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='simplex', callback=None, options=None): """ Minimize a linear objective function subject to linear equality and inequality constraints. Linear Programming is intended to solve the following problem form:: Minimize: c^T * x Subject to: A_ub * x <= b_ub A_eq * x == b_eq Parameters ---------- c : array_like Coefficients of the linear objective function to be minimized. A_ub : array_like, optional 2-D array which, when matrix-multiplied by ``x``, gives the values of the upper-bound inequality constraints at ``x``. b_ub : array_like, optional 1-D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. A_eq : array_like, optional 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. b_eq : array_like, optional 1-D array of values representing the RHS of each equality constraint (row) in ``A_eq``. bounds : sequence, optional ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for one of ``min`` or ``max`` when there is no bound in that direction. By default bounds are ``(0, None)`` (non-negative) If a sequence containing a single tuple is provided, then ``min`` and ``max`` will be applied to all variables in the problem. method : str, optional Type of solver. :ref:`'simplex' <optimize.linprog-simplex>` and :ref:`'interior-point' <optimize.linprog-interior-point>` are supported. callback : callable, optional (simplex only) If a callback function is provide, it will be called within each iteration of the simplex algorithm. The callback must have the signature ``callback(xk, **kwargs)`` where ``xk`` is the current solution vector and ``kwargs`` is a dictionary containing the following:: "tableau" : The current Simplex algorithm tableau "nit" : The current iteration. "pivot" : The pivot (row, column) used for the next iteration. "phase" : Whether the algorithm is in Phase 1 or Phase 2. "basis" : The indices of the columns of the basic variables. options : dict, optional A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options('linprog')`. Returns ------- A `scipy.optimize.OptimizeResult` consisting of the following fields: x : ndarray The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. slack : ndarray The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. success : bool Returns True if the algorithm succeeded in finding an optimal solution. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. See Also -------- show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is :ref:`Simplex <optimize.linprog-simplex>`. :ref:`Interior point <optimize.linprog-interior-point>` is also available. Method *simplex* uses the simplex algorithm (as it relates to linear programming, NOT the Nelder-Mead simplex) [1]_, [2]_. This algorithm should be reasonably reliable and fast for small problems. .. versionadded:: 0.15.0 Method *interior-point* uses the primal-dual path following algorithm as outlined in [4]_. This algorithm is intended to provide a faster and more reliable alternative to *simplex*, especially for large, sparse problems. Note, however, that the solution returned may be slightly less accurate than that of the simplex method and may not correspond with a vertex of the polytope defined by the constraints. References ---------- .. [1] Dantzig, George B., Linear programming and extensions. Rand Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963 .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to Mathematical Programming", McGraw-Hill, Chapter 4. .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. Mathematics of Operations Research (2), 1977: pp. 103-107. .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. .. [5] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear Programming based on Newton's Method." Unpublished Course Notes, March 2004. Available 2/25/2017 at https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at http://www.4er.org/CourseNotes/Book%20B/B-III.pdf .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear programming." Mathematical Programming 71.2 (1995): 221-245. .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. .. [10] Andersen, Erling D., et al. Implementation of interior point methods for large scale linear programming. HEC/Universite de Geneve, 1996. Examples -------- Consider the following problem: Minimize: f = -1*x[0] + 4*x[1] Subject to: -3*x[0] + 1*x[1] <= 6 1*x[0] + 2*x[1] <= 4 x[1] >= -3 where: -inf <= x[0] <= inf This problem deviates from the standard linear programming problem. In standard form, linear programming problems assume the variables x are non-negative. Since the variables don't have standard bounds where 0 <= x <= inf, the bounds of the variables must be explicitly set. There are two upper-bound constraints, which can be expressed as dot(A_ub, x) <= b_ub The input for this problem is as follows: >>> c = [-1, 4] >>> A = [[-3, 1], [1, 2]] >>> b = [6, 4] >>> x0_bounds = (None, None) >>> x1_bounds = (-3, None) >>> from scipy.optimize import linprog >>> res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), ... options={"disp": True}) Optimization terminated successfully. Current function value: -22.000000 Iterations: 1 >>> print(res) fun: -22.0 message: 'Optimization terminated successfully.' nit: 1 slack: array([39., 0.]) status: 0 success: True x: array([10., -3.]) Note the actual objective value is 11.428571. In this case we minimized the negative of the objective function. """ meth = method.lower() if options is None: options = {} if meth == 'simplex': return _linprog_simplex(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, callback=callback, **options) elif meth == 'interior-point': return _linprog_ip(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, callback=callback, **options) else: raise ValueError('Unknown solver %s' % method)
40,117
37.244042
143
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_hungarian.py
# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment # problem. Taken from scikit-learn. Based on original code by Brian Clapper, # adapted to NumPy by Gael Varoquaux. # Further improvements by Ben Root, Vlad Niculae and Lars Buitinck. # # Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux # Author: Brian M. Clapper, Gael Varoquaux # License: 3-clause BSD import numpy as np def linear_sum_assignment(cost_matrix): """Solve the linear sum assignment problem. The linear sum assignment problem is also known as minimum weight matching in bipartite graphs. A problem instance is described by a matrix C, where each C[i,j] is the cost of matching vertex i of the first partite set (a "worker") and vertex j of the second set (a "job"). The goal is to find a complete assignment of workers to jobs of minimal cost. Formally, let X be a boolean matrix where :math:`X[i,j] = 1` iff row i is assigned to column j. Then the optimal assignment has cost .. math:: \\min \\sum_i \\sum_j C_{i,j} X_{i,j} s.t. each row is assignment to at most one column, and each column to at most one row. This function can also solve a generalization of the classic assignment problem where the cost matrix is rectangular. If it has more rows than columns, then not every row needs to be assigned to a column, and vice versa. The method used is the Hungarian algorithm, also known as the Munkres or Kuhn-Munkres algorithm. Parameters ---------- cost_matrix : array The cost matrix of the bipartite graph. Returns ------- row_ind, col_ind : array An array of row indices and one of corresponding column indices giving the optimal assignment. The cost of the assignment can be computed as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be sorted; in the case of a square cost matrix they will be equal to ``numpy.arange(cost_matrix.shape[0])``. Notes ----- .. versionadded:: 0.17.0 Examples -------- >>> cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]]) >>> from scipy.optimize import linear_sum_assignment >>> row_ind, col_ind = linear_sum_assignment(cost) >>> col_ind array([1, 0, 2]) >>> cost[row_ind, col_ind].sum() 5 References ---------- 1. http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html 2. Harold W. Kuhn. The Hungarian Method for the assignment problem. *Naval Research Logistics Quarterly*, 2:83-97, 1955. 3. Harold W. Kuhn. Variants of the Hungarian method for assignment problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956. 4. Munkres, J. Algorithms for the Assignment and Transportation Problems. *J. SIAM*, 5(1):32-38, March, 1957. 5. https://en.wikipedia.org/wiki/Hungarian_algorithm """ cost_matrix = np.asarray(cost_matrix) if len(cost_matrix.shape) != 2: raise ValueError("expected a matrix (2-d array), got a %r array" % (cost_matrix.shape,)) if not (np.issubdtype(cost_matrix.dtype, np.number) or cost_matrix.dtype == np.dtype(np.bool)): raise ValueError("expected a matrix containing numerical entries, got %s" % (cost_matrix.dtype,)) if np.any(np.isinf(cost_matrix) | np.isnan(cost_matrix)): raise ValueError("matrix contains invalid numeric entries") if cost_matrix.dtype == np.dtype(np.bool): cost_matrix = cost_matrix.astype(np.int) # The algorithm expects more columns than rows in the cost matrix. if cost_matrix.shape[1] < cost_matrix.shape[0]: cost_matrix = cost_matrix.T transposed = True else: transposed = False state = _Hungary(cost_matrix) # No need to bother with assignments if one of the dimensions # of the cost matrix is zero-length. step = None if 0 in cost_matrix.shape else _step1 while step is not None: step = step(state) if transposed: marked = state.marked.T else: marked = state.marked return np.where(marked == 1) class _Hungary(object): """State of the Hungarian algorithm. Parameters ---------- cost_matrix : 2D matrix The cost matrix. Must have shape[1] >= shape[0]. """ def __init__(self, cost_matrix): self.C = cost_matrix.copy() n, m = self.C.shape self.row_uncovered = np.ones(n, dtype=bool) self.col_uncovered = np.ones(m, dtype=bool) self.Z0_r = 0 self.Z0_c = 0 self.path = np.zeros((n + m, 2), dtype=int) self.marked = np.zeros((n, m), dtype=int) def _clear_covers(self): """Clear all covered matrix cells""" self.row_uncovered[:] = True self.col_uncovered[:] = True # Individual steps of the algorithm follow, as a state machine: they return # the next step to be taken (function to be called), if any. def _step1(state): """Steps 1 and 2 in the Wikipedia page.""" # Step 1: For each row of the matrix, find the smallest element and # subtract it from every element in its row. state.C -= state.C.min(axis=1)[:, np.newaxis] # Step 2: Find a zero (Z) in the resulting matrix. If there is no # starred zero in its row or column, star Z. Repeat for each element # in the matrix. for i, j in zip(*np.where(state.C == 0)): if state.col_uncovered[j] and state.row_uncovered[i]: state.marked[i, j] = 1 state.col_uncovered[j] = False state.row_uncovered[i] = False state._clear_covers() return _step3 def _step3(state): """ Cover each column containing a starred zero. If n columns are covered, the starred zeros describe a complete set of unique assignments. In this case, Go to DONE, otherwise, Go to Step 4. """ marked = (state.marked == 1) state.col_uncovered[np.any(marked, axis=0)] = False if marked.sum() < state.C.shape[0]: return _step4 def _step4(state): """ Find a noncovered zero and prime it. If there is no starred zero in the row containing this primed zero, Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero. Continue in this manner until there are no uncovered zeros left. Save the smallest uncovered value and Go to Step 6. """ # We convert to int as numpy operations are faster on int C = (state.C == 0).astype(int) covered_C = C * state.row_uncovered[:, np.newaxis] covered_C *= np.asarray(state.col_uncovered, dtype=int) n = state.C.shape[0] m = state.C.shape[1] while True: # Find an uncovered zero row, col = np.unravel_index(np.argmax(covered_C), (n, m)) if covered_C[row, col] == 0: return _step6 else: state.marked[row, col] = 2 # Find the first starred element in the row star_col = np.argmax(state.marked[row] == 1) if state.marked[row, star_col] != 1: # Could not find one state.Z0_r = row state.Z0_c = col return _step5 else: col = star_col state.row_uncovered[row] = False state.col_uncovered[col] = True covered_C[:, col] = C[:, col] * ( np.asarray(state.row_uncovered, dtype=int)) covered_C[row] = 0 def _step5(state): """ Construct a series of alternating primed and starred zeros as follows. Let Z0 represent the uncovered primed zero found in Step 4. Let Z1 denote the starred zero in the column of Z0 (if any). Let Z2 denote the primed zero in the row of Z1 (there will always be one). Continue until the series terminates at a primed zero that has no starred zero in its column. Unstar each starred zero of the series, star each primed zero of the series, erase all primes and uncover every line in the matrix. Return to Step 3 """ count = 0 path = state.path path[count, 0] = state.Z0_r path[count, 1] = state.Z0_c while True: # Find the first starred element in the col defined by # the path. row = np.argmax(state.marked[:, path[count, 1]] == 1) if state.marked[row, path[count, 1]] != 1: # Could not find one break else: count += 1 path[count, 0] = row path[count, 1] = path[count - 1, 1] # Find the first prime element in the row defined by the # first path step col = np.argmax(state.marked[path[count, 0]] == 2) if state.marked[row, col] != 2: col = -1 count += 1 path[count, 0] = path[count - 1, 0] path[count, 1] = col # Convert paths for i in range(count + 1): if state.marked[path[i, 0], path[i, 1]] == 1: state.marked[path[i, 0], path[i, 1]] = 0 else: state.marked[path[i, 0], path[i, 1]] = 1 state._clear_covers() # Erase all prime markings state.marked[state.marked == 2] = 0 return _step3 def _step6(state): """ Add the value found in Step 4 to every element of each covered row, and subtract it from every element of each uncovered column. Return to Step 4 without altering any stars, primes, or covered lines. """ # the smallest uncovered value in the matrix if np.any(state.row_uncovered) and np.any(state.col_uncovered): minval = np.min(state.C[state.row_uncovered], axis=0) minval = np.min(minval[state.col_uncovered]) state.C[~state.row_uncovered] += minval state.C[:, state.col_uncovered] -= minval return _step4
9,854
33.823322
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tnc.py
# TNC Python interface # @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $ # Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ TNC: A python interface to the TNC non-linear optimizer TNC is a non-linear optimizer. To use it, you must provide a function to minimize. The function must take one argument: the list of coordinates where to evaluate the function; and it must return either a tuple, whose first element is the value of the function, and whose second argument is the gradient of the function (as a list of values); or None, to abort the minimization. """ from __future__ import division, print_function, absolute_import from scipy.optimize import moduleTNC, approx_fprime from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options from numpy import inf, array, zeros, asfarray __all__ = ['fmin_tnc'] MSG_NONE = 0 # No messages MSG_ITER = 1 # One line per iteration MSG_INFO = 2 # Informational messages MSG_VERS = 4 # Version info MSG_EXIT = 8 # Exit reasons MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT MSGS = { MSG_NONE: "No messages", MSG_ITER: "One line per iteration", MSG_INFO: "Informational messages", MSG_VERS: "Version info", MSG_EXIT: "Exit reasons", MSG_ALL: "All messages" } INFEASIBLE = -1 # Infeasible (lower bound > upper bound) LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0) FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0) XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0) MAXFUN = 3 # Max. number of function evaluations reached LSFAIL = 4 # Linear search failed CONSTANT = 5 # All lower bounds are equal to the upper bounds NOPROGRESS = 6 # Unable to progress USERABORT = 7 # User requested end of minimization RCSTRINGS = { INFEASIBLE: "Infeasible (lower bound > upper bound)", LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)", FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)", XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)", MAXFUN: "Max. number of function evaluations reached", LSFAIL: "Linear search failed", CONSTANT: "All lower bounds are equal to the upper bounds", NOPROGRESS: "Unable to progress", USERABORT: "User requested end of minimization" } # Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in # SciPy def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, epsilon=1e-8, scale=None, offset=None, messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, rescale=-1, disp=None, callback=None): """ Minimize a function with variables subject to bounds, using gradient information in a truncated Newton algorithm. This method wraps a C implementation of the algorithm. Parameters ---------- func : callable ``func(x, *args)`` Function to minimize. Must do one of: 1. Return f and g, where f is the value of the function and g its gradient (a list of floats). 2. Return the function value but supply gradient function separately as `fprime`. 3. Return the function value and set ``approx_grad=True``. If the function returns None, the minimization is aborted. x0 : array_like Initial estimate of minimum. fprime : callable ``fprime(x, *args)``, optional Gradient of `func`. If None, then either `func` must return the function value and the gradient (``f,g = func(x, *args)``) or `approx_grad` must be True. args : tuple, optional Arguments to pass to function. approx_grad : bool, optional If true, approximate the gradient numerically. bounds : list, optional (min, max) pairs for each element in x0, defining the bounds on that parameter. Use None or +/-inf for one of min or max when there is no bound in that direction. epsilon : float, optional Used if approx_grad is True. The stepsize in a finite difference approximation for fprime. scale : array_like, optional Scaling factors to apply to each variable. If None, the factors are up-low for interval bounded variables and 1+|x| for the others. Defaults to None. offset : array_like, optional Value to subtract from each variable. If None, the offsets are (up+low)/2 for interval bounded variables and x for the others. messages : int, optional Bit mask used to select messages display during minimization values defined in the MSGS dict. Defaults to MGS_ALL. disp : int, optional Integer interface to messages. 0 = no message, 5 = all messages maxCGit : int, optional Maximum number of hessian*vector evaluations per main iteration. If maxCGit == 0, the direction chosen is -gradient if maxCGit < 0, maxCGit is set to max(1,min(50,n/2)). Defaults to -1. maxfun : int, optional Maximum number of function evaluation. if None, maxfun is set to max(100, 10*len(x0)). Defaults to None. eta : float, optional Severity of the line search. if < 0 or > 1, set to 0.25. Defaults to -1. stepmx : float, optional Maximum step for the line search. May be increased during call. If too small, it will be set to 10.0. Defaults to 0. accuracy : float, optional Relative precision for finite difference calculations. If <= machine_precision, set to sqrt(machine_precision). Defaults to 0. fmin : float, optional Minimum function value estimate. Defaults to 0. ftol : float, optional Precision goal for the value of f in the stopping criterion. If ftol < 0.0, ftol is set to 0.0 defaults to -1. xtol : float, optional Precision goal for the value of x in the stopping criterion (after applying x scaling factors). If xtol < 0.0, xtol is set to sqrt(machine_precision). Defaults to -1. pgtol : float, optional Precision goal for the value of the projected gradient in the stopping criterion (after applying x scaling factors). If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). Setting it to 0.0 is not recommended. Defaults to -1. rescale : float, optional Scaling factor (in log10) used to trigger f value rescaling. If 0, rescale at each iteration. If a large value, never rescale. If < 0, rescale is set to 1.3. callback : callable, optional Called after each iteration, as callback(xk), where xk is the current parameter vector. Returns ------- x : ndarray The solution. nfeval : int The number of function evaluations. rc : int Return code, see below See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'TNC' `method` in particular. Notes ----- The underlying algorithm is truncated Newton, also called Newton Conjugate-Gradient. This method differs from scipy.optimize.fmin_ncg in that 1. It wraps a C implementation of the algorithm 2. It allows each variable to be given an upper and lower bound. The algorithm incorporates the bound constraints by determining the descent direction as in an unconstrained truncated Newton, but never taking a step-size large enough to leave the space of feasible x's. The algorithm keeps track of a set of currently active constraints, and ignores them when computing the minimum allowable step size. (The x's associated with the active constraint are kept fixed.) If the maximum allowable step size is zero then a new constraint is added. At the end of each iteration one of the constraints may be deemed no longer active and removed. A constraint is considered no longer active is if it is currently active but the gradient for that variable points inward from the constraint. The specific constraint removed is the one associated with the variable of largest index whose constraint is no longer active. Return codes are defined as follows:: -1 : Infeasible (lower bound > upper bound) 0 : Local minimum reached (|pg| ~= 0) 1 : Converged (|f_n-f_(n-1)| ~= 0) 2 : Converged (|x_n-x_(n-1)| ~= 0) 3 : Max. number of function evaluations reached 4 : Linear search failed 5 : All lower bounds are equal to the upper bounds 6 : Unable to progress 7 : User requested end of minimization References ---------- Wright S., Nocedal J. (2006), 'Numerical Optimization' Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method", SIAM Journal of Numerical Analysis 21, pp. 770-778 """ # handle fprime/approx_grad if approx_grad: fun = func jac = None elif fprime is None: fun = MemoizeJac(func) jac = fun.derivative else: fun = func jac = fprime if disp is not None: # disp takes precedence over messages mesg_num = disp else: mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL) # build options opts = {'eps': epsilon, 'scale': scale, 'offset': offset, 'mesg_num': mesg_num, 'maxCGit': maxCGit, 'maxiter': maxfun, 'eta': eta, 'stepmx': stepmx, 'accuracy': accuracy, 'minfev': fmin, 'ftol': ftol, 'xtol': xtol, 'gtol': pgtol, 'rescale': rescale, 'disp': False} res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts) return res['x'], res['nfev'], res['status'] def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, eps=1e-8, scale=None, offset=None, mesg_num=None, maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0, minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False, callback=None, **unknown_options): """ Minimize a scalar function of one or more variables using a truncated Newton (TNC) algorithm. Options ------- eps : float Step size used for numerical approximation of the jacobian. scale : list of floats Scaling factors to apply to each variable. If None, the factors are up-low for interval bounded variables and 1+|x] fo the others. Defaults to None offset : float Value to subtract from each variable. If None, the offsets are (up+low)/2 for interval bounded variables and x for the others. disp : bool Set to True to print convergence messages. maxCGit : int Maximum number of hessian*vector evaluations per main iteration. If maxCGit == 0, the direction chosen is -gradient if maxCGit < 0, maxCGit is set to max(1,min(50,n/2)). Defaults to -1. maxiter : int Maximum number of function evaluation. if None, `maxiter` is set to max(100, 10*len(x0)). Defaults to None. eta : float Severity of the line search. if < 0 or > 1, set to 0.25. Defaults to -1. stepmx : float Maximum step for the line search. May be increased during call. If too small, it will be set to 10.0. Defaults to 0. accuracy : float Relative precision for finite difference calculations. If <= machine_precision, set to sqrt(machine_precision). Defaults to 0. minfev : float Minimum function value estimate. Defaults to 0. ftol : float Precision goal for the value of f in the stopping criterion. If ftol < 0.0, ftol is set to 0.0 defaults to -1. xtol : float Precision goal for the value of x in the stopping criterion (after applying x scaling factors). If xtol < 0.0, xtol is set to sqrt(machine_precision). Defaults to -1. gtol : float Precision goal for the value of the projected gradient in the stopping criterion (after applying x scaling factors). If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). Setting it to 0.0 is not recommended. Defaults to -1. rescale : float Scaling factor (in log10) used to trigger f value rescaling. If 0, rescale at each iteration. If a large value, never rescale. If < 0, rescale is set to 1.3. """ _check_unknown_options(unknown_options) epsilon = eps maxfun = maxiter fmin = minfev pgtol = gtol x0 = asfarray(x0).flatten() n = len(x0) if bounds is None: bounds = [(None,None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if mesg_num is not None: messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL) elif disp: messages = MSG_ALL else: messages = MSG_NONE if jac is None: def func_and_grad(x): f = fun(x, *args) g = approx_fprime(x, fun, epsilon, *args) return f, g else: def func_and_grad(x): f = fun(x, *args) g = jac(x, *args) return f, g """ low, up : the bounds (lists of floats) if low is None, the lower bounds are removed. if up is None, the upper bounds are removed. low and up defaults to None """ low = zeros(n) up = zeros(n) for i in range(n): if bounds[i] is None: l, u = -inf, inf else: l,u = bounds[i] if l is None: low[i] = -inf else: low[i] = l if u is None: up[i] = inf else: up[i] = u if scale is None: scale = array([]) if offset is None: offset = array([]) if maxfun is None: maxfun = max(100, 10*len(x0)) rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, offset, messages, maxCGit, maxfun, eta, stepmx, accuracy, fmin, ftol, xtol, pgtol, rescale, callback) funv, jacv = func_and_grad(x) return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=nf, nit=nit, status=rc, message=RCSTRINGS[rc], success=(-1 < rc < 3)) if __name__ == '__main__': # Examples for TNC def example(): print("Example") # A function to minimize def function(x): f = pow(x[0],2.0)+pow(abs(x[1]),3.0) g = [0,0] g[0] = 2.0*x[0] g[1] = 3.0*pow(abs(x[1]),2.0) if x[1] < 0: g[1] = -g[1] return f, g # Optimizer call x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10])) print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc]) print("x =", x) print("exact value = [0, 1]") print() example()
16,537
36.41629
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/optimize.py
#__docformat__ = "restructuredtext en" # ******NOTICE*************** # optimize.py module by Travis E. Oliphant # # You may copy and use this module as you see fit with no # guarantee implied provided you keep this notice in all copies. # *****END NOTICE************ # A collection of optimization algorithms. Version 0.5 # CHANGES # Added fminbound (July 2001) # Added brute (Aug. 2002) # Finished line search satisfying strong Wolfe conditions (Mar. 2004) # Updated strong Wolfe conditions line search to use # cubic-interpolation (Mar. 2004) from __future__ import division, print_function, absolute_import # Minimization routines __all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg', 'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der', 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 'line_search', 'check_grad', 'OptimizeResult', 'show_options', 'OptimizeWarning'] __docformat__ = "restructuredtext en" import warnings import sys import numpy from scipy._lib.six import callable, xrange from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze, vectorize, asarray, sqrt, Inf, asfarray, isinf) import numpy as np from .linesearch import (line_search_wolfe1, line_search_wolfe2, line_search_wolfe2 as line_search, LineSearchWarning) from scipy._lib._util import getargspec_no_self as _getargspec # standard status messages of optimizers _status_message = {'success': 'Optimization terminated successfully.', 'maxfev': 'Maximum number of function evaluations has ' 'been exceeded.', 'maxiter': 'Maximum number of iterations has been ' 'exceeded.', 'pr_loss': 'Desired error not necessarily achieved due ' 'to precision loss.'} class MemoizeJac(object): """ Decorator that caches the value gradient of function each time it is called. """ def __init__(self, fun): self.fun = fun self.jac = None self.x = None def __call__(self, x, *args): self.x = numpy.asarray(x).copy() fg = self.fun(x, *args) self.jac = fg[1] return fg[0] def derivative(self, x, *args): if self.jac is not None and numpy.alltrue(x == self.x): return self.jac else: self(x, *args) return self.jac class OptimizeResult(dict): """ Represents the optimization result. Attributes ---------- x : ndarray The solution of the optimization. success : bool Whether or not the optimizer exited successfully. status : int Termination status of the optimizer. Its value depends on the underlying solver. Refer to `message` for details. message : str Description of the cause of the termination. fun, jac, hess: ndarray Values of objective function, its Jacobian and its Hessian (if available). The Hessians may be approximations, see the documentation of the function in question. hess_inv : object Inverse of the objective function's Hessian; may be an approximation. Not available for all solvers. The type of this attribute may be either np.ndarray or scipy.sparse.linalg.LinearOperator. nfev, njev, nhev : int Number of evaluations of the objective functions and of its Jacobian and Hessian. nit : int Number of iterations performed by the optimizer. maxcv : float The maximum constraint violation. Notes ----- There may be additional attributes not listed above depending of the specific solver. Since this class is essentially a subclass of dict with attribute accessors, one can see which attributes are available using the `keys()` method. """ def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ def __repr__(self): if self.keys(): m = max(map(len, list(self.keys()))) + 1 return '\n'.join([k.rjust(m) + ': ' + repr(v) for k, v in sorted(self.items())]) else: return self.__class__.__name__ + "()" def __dir__(self): return list(self.keys()) class OptimizeWarning(UserWarning): pass def _check_unknown_options(unknown_options): if unknown_options: msg = ", ".join(map(str, unknown_options.keys())) # Stack level 4: this is called from _minimize_*, which is # called from another function in Scipy. Level 4 is the first # level in user code. warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4) def is_array_scalar(x): """Test whether `x` is either a scalar or an array scalar. """ return np.size(x) == 1 _epsilon = sqrt(numpy.finfo(float).eps) def vecnorm(x, ord=2): if ord == Inf: return numpy.amax(numpy.abs(x)) elif ord == -Inf: return numpy.amin(numpy.abs(x)) else: return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord) def rosen(x): """ The Rosenbrock function. The function computed is:: sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) Parameters ---------- x : array_like 1-D array of points at which the Rosenbrock function is to be computed. Returns ------- f : float The value of the Rosenbrock function. See Also -------- rosen_der, rosen_hess, rosen_hess_prod """ x = asarray(x) r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0) return r def rosen_der(x): """ The derivative (i.e. gradient) of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the derivative is to be computed. Returns ------- rosen_der : (N,) ndarray The gradient of the Rosenbrock function at `x`. See Also -------- rosen, rosen_hess, rosen_hess_prod """ x = asarray(x) xm = x[1:-1] xm_m1 = x[:-2] xm_p1 = x[2:] der = numpy.zeros_like(x) der[1:-1] = (200 * (xm - xm_m1**2) - 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) der[-1] = 200 * (x[-1] - x[-2]**2) return der def rosen_hess(x): """ The Hessian matrix of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. Returns ------- rosen_hess : ndarray The Hessian matrix of the Rosenbrock function at `x`. See Also -------- rosen, rosen_der, rosen_hess_prod """ x = atleast_1d(x) H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1) diagonal = numpy.zeros(len(x), dtype=x.dtype) diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 diagonal[-1] = 200 diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] H = H + numpy.diag(diagonal) return H def rosen_hess_prod(x, p): """ Product of the Hessian matrix of the Rosenbrock function with a vector. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. p : array_like 1-D array, the vector to be multiplied by the Hessian matrix. Returns ------- rosen_hess_prod : ndarray The Hessian matrix of the Rosenbrock function at `x` multiplied by the vector `p`. See Also -------- rosen, rosen_der, rosen_hess """ x = atleast_1d(x) Hp = numpy.zeros(len(x), dtype=x.dtype) Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1] Hp[1:-1] = (-400 * x[:-2] * p[:-2] + (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] - 400 * x[1:-1] * p[2:]) Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1] return Hp def wrap_function(function, args): ncalls = [0] if function is None: return ncalls, None def function_wrapper(*wrapper_args): ncalls[0] += 1 return function(*(wrapper_args + args)) return ncalls, function_wrapper def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, initial_simplex=None): """ Minimize a function using the downhill simplex algorithm. This algorithm only uses function values, not derivatives or second derivatives. Parameters ---------- func : callable func(x,*args) The objective function to be minimized. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to func, i.e. ``f(x,*args)``. xtol : float, optional Absolute error in xopt between iterations that is acceptable for convergence. ftol : number, optional Absolute error in func(xopt) between iterations that is acceptable for convergence. maxiter : int, optional Maximum number of iterations to perform. maxfun : number, optional Maximum number of function evaluations to make. full_output : bool, optional Set to True if fopt and warnflag outputs are desired. disp : bool, optional Set to True to print convergence messages. retall : bool, optional Set to True to return list of solutions at each iteration. callback : callable, optional Called after each iteration, as callback(xk), where xk is the current parameter vector. initial_simplex : array_like of shape (N + 1, N), optional Initial simplex. If given, overrides `x0`. ``initial_simplex[j,:]`` should contain the coordinates of the j-th vertex of the ``N+1`` vertices in the simplex, where ``N`` is the dimension. Returns ------- xopt : ndarray Parameter that minimizes function. fopt : float Value of function at minimum: ``fopt = func(xopt)``. iter : int Number of iterations performed. funcalls : int Number of function calls made. warnflag : int 1 : Maximum number of function evaluations made. 2 : Maximum number of iterations reached. allvecs : list Solution at each iteration. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'Nelder-Mead' `method` in particular. Notes ----- Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. This algorithm has a long history of successful use in applications. But it will usually be slower than an algorithm that uses first or second derivative information. In practice it can have poor performance in high-dimensional problems and is not robust to minimizing complicated functions. Additionally, there currently is no complete theory describing when the algorithm will successfully converge to the minimum, or how fast it will if it does. Both the ftol and xtol criteria must be met for convergence. Examples -------- >>> def f(x): ... return x**2 >>> from scipy import optimize >>> minimum = optimize.fmin(f, 1) Optimization terminated successfully. Current function value: 0.000000 Iterations: 17 Function evaluations: 34 >>> minimum[0] -8.8817841970012523e-16 References ---------- .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function minimization", The Computer Journal, 7, pp. 308-313 .. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now Respectable", in Numerical Analysis 1995, Proceedings of the 1995 Dundee Biennial Conference in Numerical Analysis, D.F. Griffiths and G.A. Watson (Eds.), Addison Wesley Longman, Harlow, UK, pp. 191-208. """ opts = {'xatol': xtol, 'fatol': ftol, 'maxiter': maxiter, 'maxfev': maxfun, 'disp': disp, 'return_all': retall, 'initial_simplex': initial_simplex} res = _minimize_neldermead(func, x0, args, callback=callback, **opts) if full_output: retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status'] if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_neldermead(func, x0, args=(), callback=None, maxiter=None, maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=1e-4, fatol=1e-4, adaptive=False, **unknown_options): """ Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter, maxfev : int Maximum allowed number of iterations and function evaluations. Will default to ``N*200``, where ``N`` is the number of variables, if neither `maxiter` or `maxfev` is set. If both `maxiter` and `maxfev` are set, minimization will stop at the first reached. initial_simplex : array_like of shape (N + 1, N) Initial simplex. If given, overrides `x0`. ``initial_simplex[j,:]`` should contain the coordinates of the j-th vertex of the ``N+1`` vertices in the simplex, where ``N`` is the dimension. xatol : float, optional Absolute error in xopt between iterations that is acceptable for convergence. fatol : number, optional Absolute error in func(xopt) between iterations that is acceptable for convergence. adaptive : bool, optional Adapt algorithm parameters to dimensionality of problem. Useful for high-dimensional minimization [1]_. References ---------- .. [1] Gao, F. and Han, L. Implementing the Nelder-Mead simplex algorithm with adaptive parameters. 2012. Computational Optimization and Applications. 51:1, pp. 259-277 """ if 'ftol' in unknown_options: warnings.warn("ftol is deprecated for Nelder-Mead," " use fatol instead. If you specified both, only" " fatol is used.", DeprecationWarning) if (np.isclose(fatol, 1e-4) and not np.isclose(unknown_options['ftol'], 1e-4)): # only ftol was probably specified, use it. fatol = unknown_options['ftol'] unknown_options.pop('ftol') if 'xtol' in unknown_options: warnings.warn("xtol is deprecated for Nelder-Mead," " use xatol instead. If you specified both, only" " xatol is used.", DeprecationWarning) if (np.isclose(xatol, 1e-4) and not np.isclose(unknown_options['xtol'], 1e-4)): # only xtol was probably specified, use it. xatol = unknown_options['xtol'] unknown_options.pop('xtol') _check_unknown_options(unknown_options) maxfun = maxfev retall = return_all fcalls, func = wrap_function(func, args) if adaptive: dim = float(len(x0)) rho = 1 chi = 1 + 2/dim psi = 0.75 - 1/(2*dim) sigma = 1 - 1/dim else: rho = 1 chi = 2 psi = 0.5 sigma = 0.5 nonzdelt = 0.05 zdelt = 0.00025 x0 = asfarray(x0).flatten() if initial_simplex is None: N = len(x0) sim = numpy.zeros((N + 1, N), dtype=x0.dtype) sim[0] = x0 for k in range(N): y = numpy.array(x0, copy=True) if y[k] != 0: y[k] = (1 + nonzdelt)*y[k] else: y[k] = zdelt sim[k + 1] = y else: sim = np.asfarray(initial_simplex).copy() if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1: raise ValueError("`initial_simplex` should be an array of shape (N+1,N)") if len(x0) != sim.shape[1]: raise ValueError("Size of `initial_simplex` is not consistent with `x0`") N = sim.shape[1] if retall: allvecs = [sim[0]] # If neither are set, then set both to default if maxiter is None and maxfun is None: maxiter = N * 200 maxfun = N * 200 elif maxiter is None: # Convert remaining Nones, to np.inf, unless the other is np.inf, in # which case use the default to avoid unbounded iteration if maxfun == np.inf: maxiter = N * 200 else: maxiter = np.inf elif maxfun is None: if maxiter == np.inf: maxfun = N * 200 else: maxfun = np.inf one2np1 = list(range(1, N + 1)) fsim = numpy.zeros((N + 1,), float) for k in range(N + 1): fsim[k] = func(sim[k]) ind = numpy.argsort(fsim) fsim = numpy.take(fsim, ind, 0) # sort so sim[0,:] has the lowest function value sim = numpy.take(sim, ind, 0) iterations = 1 while (fcalls[0] < maxfun and iterations < maxiter): if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol): break xbar = numpy.add.reduce(sim[:-1], 0) / N xr = (1 + rho) * xbar - rho * sim[-1] fxr = func(xr) doshrink = 0 if fxr < fsim[0]: xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] fxe = func(xe) if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] fxc = func(xc) if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink = 1 else: # Perform an inside contraction xcc = (1 - psi) * xbar + psi * sim[-1] fxcc = func(xcc) if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma * (sim[j] - sim[0]) fsim[j] = func(sim[j]) ind = numpy.argsort(fsim) sim = numpy.take(sim, ind, 0) fsim = numpy.take(fsim, ind, 0) if callback is not None: callback(sim[0]) iterations += 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = numpy.min(fsim) warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 msg = _status_message['maxfev'] if disp: print('Warning: ' + msg) elif iterations >= maxiter: warnflag = 2 msg = _status_message['maxiter'] if disp: print('Warning: ' + msg) else: msg = _status_message['success'] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % iterations) print(" Function evaluations: %d" % fcalls[0]) result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0], status=warnflag, success=(warnflag == 0), message=msg, x=x, final_simplex=(sim, fsim)) if retall: result['allvecs'] = allvecs return result def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None): """ See ``approx_fprime``. An optional initial function value arg is added. """ if f0 is None: f0 = f(*((xk,) + args)) grad = numpy.zeros((len(xk),), float) ei = numpy.zeros((len(xk),), float) for k in range(len(xk)): ei[k] = 1.0 d = epsilon * ei grad[k] = (f(*((xk + d,) + args)) - f0) / d[k] ei[k] = 0.0 return grad def approx_fprime(xk, f, epsilon, *args): """Finite-difference approximation of the gradient of a scalar function. Parameters ---------- xk : array_like The coordinate vector at which to determine the gradient of `f`. f : callable The function of which to determine the gradient (partial derivatives). Should take `xk` as first argument, other arguments to `f` can be supplied in ``*args``. Should return a scalar, the value of the function at `xk`. epsilon : array_like Increment to `xk` to use for determining the function gradient. If a scalar, uses the same finite difference delta for all partial derivatives. If an array, should contain one value per element of `xk`. \\*args : args, optional Any other arguments that are to be passed to `f`. Returns ------- grad : ndarray The partial derivatives of `f` to `xk`. See Also -------- check_grad : Check correctness of gradient function against approx_fprime. Notes ----- The function gradient is determined by the forward finite difference formula:: f(xk[i] + epsilon[i]) - f(xk[i]) f'[i] = --------------------------------- epsilon[i] The main use of `approx_fprime` is in scalar function optimizers like `fmin_bfgs`, to determine numerically the Jacobian of a function. Examples -------- >>> from scipy import optimize >>> def func(x, c0, c1): ... "Coordinate vector `x` should be an array of size two." ... return c0 * x[0]**2 + c1*x[1]**2 >>> x = np.ones(2) >>> c0, c1 = (1, 200) >>> eps = np.sqrt(np.finfo(float).eps) >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) array([ 2. , 400.00004198]) """ return _approx_fprime_helper(xk, f, epsilon, args=args) def check_grad(func, grad, x0, *args, **kwargs): """Check the correctness of a gradient function by comparing it against a (forward) finite-difference approximation of the gradient. Parameters ---------- func : callable ``func(x0, *args)`` Function whose derivative is to be checked. grad : callable ``grad(x0, *args)`` Gradient of `func`. x0 : ndarray Points to check `grad` against forward difference approximation of grad using `func`. args : \\*args, optional Extra arguments passed to `func` and `grad`. epsilon : float, optional Step size used for the finite difference approximation. It defaults to ``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08. Returns ------- err : float The square root of the sum of squares (i.e. the 2-norm) of the difference between ``grad(x0, *args)`` and the finite difference approximation of `grad` using func at the points `x0`. See Also -------- approx_fprime Examples -------- >>> def func(x): ... return x[0]**2 - 0.5 * x[1]**3 >>> def grad(x): ... return [2 * x[0], -1.5 * x[1]**2] >>> from scipy.optimize import check_grad >>> check_grad(func, grad, [1.5, -1.5]) 2.9802322387695312e-08 """ step = kwargs.pop('epsilon', _epsilon) if kwargs: raise ValueError("Unknown keyword arguments: %r" % (list(kwargs.keys()),)) return sqrt(sum((grad(x0, *args) - approx_fprime(x0, func, step, *args))**2)) def approx_fhess_p(x0, p, fprime, epsilon, *args): f2 = fprime(*((x0 + epsilon*p,) + args)) f1 = fprime(*((x0,) + args)) return (f2 - f1) / epsilon class _LineSearchError(RuntimeError): pass def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs): """ Same as line_search_wolfe1, but fall back to line_search_wolfe2 if suitable step length is not found, and raise an exception if a suitable step length is not found. Raises ------ _LineSearchError If no suitable step size is found """ extra_condition = kwargs.pop('extra_condition', None) ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs) if ret[0] is not None and extra_condition is not None: xp1 = xk + ret[0] * pk if not extra_condition(ret[0], xp1, ret[3], ret[5]): # Reject step if extra_condition fails ret = (None,) if ret[0] is None: # line search failed: try different one. with warnings.catch_warnings(): warnings.simplefilter('ignore', LineSearchWarning) kwargs2 = {} for key in ('c1', 'c2', 'amax'): if key in kwargs: kwargs2[key] = kwargs[key] ret = line_search_wolfe2(f, fprime, xk, pk, gfk, old_fval, old_old_fval, extra_condition=extra_condition, **kwargs2) if ret[0] is None: raise _LineSearchError() return ret def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """ Minimize a function using the BFGS algorithm. Parameters ---------- f : callable f(x,*args) Objective function to be minimized. x0 : ndarray Initial guess. fprime : callable f'(x,*args), optional Gradient of f. args : tuple, optional Extra arguments passed to f and fprime. gtol : float, optional Gradient norm must be less than gtol before successful termination. norm : float, optional Order of norm (Inf is max, -Inf is min) epsilon : int or ndarray, optional If fprime is approximated, use this value for the step size. callback : callable, optional An optional user-supplied function to call after each iteration. Called as callback(xk), where xk is the current parameter vector. maxiter : int, optional Maximum number of iterations to perform. full_output : bool, optional If True,return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp : bool, optional Print convergence message if True. retall : bool, optional Return a list of results at each iteration if True. Returns ------- xopt : ndarray Parameters which minimize f, i.e. f(xopt) == fopt. fopt : float Minimum value. gopt : ndarray Value of gradient at minimum, f'(xopt), which should be near 0. Bopt : ndarray Value of 1/f''(xopt), i.e. the inverse hessian matrix. func_calls : int Number of function_calls made. grad_calls : int Number of gradient calls made. warnflag : integer 1 : Maximum number of iterations exceeded. 2 : Gradient and/or function calls not changing. allvecs : list The value of xopt at each iteration. Only returned if retall is True. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'BFGS' `method` in particular. Notes ----- Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) References ---------- Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. """ opts = {'gtol': gtol, 'norm': norm, 'eps': epsilon, 'disp': disp, 'maxiter': maxiter, 'return_all': retall} res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'], res['nfev'], res['njev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None, disp=False, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the BFGS algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter : int Maximum number of iterations to perform. gtol : float Gradient norm must be less than `gtol` before successful termination. norm : float Order of norm (Inf is max, -Inf is min). eps : float or ndarray If `jac` is approximated, use this value for the step size. """ _check_unknown_options(unknown_options) f = fun fprime = jac epsilon = eps retall = return_all x0 = asarray(x0).flatten() if x0.ndim == 0: x0.shape = (1,) if maxiter is None: maxiter = len(x0) * 200 func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0) k = 0 N = len(x0) I = numpy.eye(N, dtype=int) Hk = I # Sets the initial step guess to dx ~ 1 old_fval = f(x0) old_old_fval = old_fval + np.linalg.norm(gfk) / 2 xk = x0 if retall: allvecs = [x0] warnflag = 0 gnorm = vecnorm(gfk, ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -numpy.dot(Hk, gfk) try: alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, amin=1e-100, amax=1e100) except _LineSearchError: # Line search failed to find a better solution. warnflag = 2 break xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: gfkp1 = myfprime(xkp1) yk = gfkp1 - gfk gfk = gfkp1 if callback is not None: callback(xk) k += 1 gnorm = vecnorm(gfk, ord=norm) if (gnorm <= gtol): break if not numpy.isfinite(old_fval): # We correctly found +-Inf as optimal value, or something went # wrong. warnflag = 2 break try: # this was handled in numeric, let it remaines for more safety rhok = 1.0 / (numpy.dot(yk, sk)) except ZeroDivisionError: rhok = 1000.0 if disp: print("Divide-by-zero encountered: rhok assumed large") if isinf(rhok): # this is patch for numpy rhok = 1000.0 if disp: print("Divide-by-zero encountered: rhok assumed large") A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] * sk[numpy.newaxis, :]) fval = old_fval if np.isnan(fval): # This can happen if the first call to f returned NaN; # the loop is then never entered. warnflag = 2 if warnflag == 2: msg = _status_message['pr_loss'] elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] else: msg = _status_message['success'] if disp: print("%s%s" % ("Warning: " if warnflag != 0 else "", msg)) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % func_calls[0]) print(" Gradient evaluations: %d" % grad_calls[0]) result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0], njev=grad_calls[0], status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """ Minimize a function using a nonlinear conjugate gradient algorithm. Parameters ---------- f : callable, ``f(x, *args)`` Objective function to be minimized. Here `x` must be a 1-D array of the variables that are to be changed in the search for a minimum, and `args` are the other (fixed) parameters of `f`. x0 : ndarray A user-supplied initial estimate of `xopt`, the optimal value of `x`. It must be a 1-D array of values. fprime : callable, ``fprime(x, *args)``, optional A function that returns the gradient of `f` at `x`. Here `x` and `args` are as described above for `f`. The returned value must be a 1-D array. Defaults to None, in which case the gradient is approximated numerically (see `epsilon`, below). args : tuple, optional Parameter values passed to `f` and `fprime`. Must be supplied whenever additional fixed parameters are needed to completely specify the functions `f` and `fprime`. gtol : float, optional Stop when the norm of the gradient is less than `gtol`. norm : float, optional Order to use for the norm of the gradient (``-np.Inf`` is min, ``np.Inf`` is max). epsilon : float or ndarray, optional Step size(s) to use when `fprime` is approximated numerically. Can be a scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the floating point machine precision. Usually ``sqrt(eps)`` is about 1.5e-8. maxiter : int, optional Maximum number of iterations to perform. Default is ``200 * len(x0)``. full_output : bool, optional If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in addition to `xopt`. See the Returns section below for additional information on optional return values. disp : bool, optional If True, return a convergence message, followed by `xopt`. retall : bool, optional If True, add to the returned values the results of each iteration. callback : callable, optional An optional user-supplied function, called after each iteration. Called as ``callback(xk)``, where ``xk`` is the current value of `x0`. Returns ------- xopt : ndarray Parameters which minimize f, i.e. ``f(xopt) == fopt``. fopt : float, optional Minimum value found, f(xopt). Only returned if `full_output` is True. func_calls : int, optional The number of function_calls made. Only returned if `full_output` is True. grad_calls : int, optional The number of gradient calls made. Only returned if `full_output` is True. warnflag : int, optional Integer value with warning status, only returned if `full_output` is True. 0 : Success. 1 : The maximum number of iterations was exceeded. 2 : Gradient and/or function calls were not changing. May indicate that precision was lost, i.e., the routine did not converge. allvecs : list of ndarray, optional List of arrays, containing the results at each iteration. Only returned if `retall` is True. See Also -------- minimize : common interface to all `scipy.optimize` algorithms for unconstrained and constrained minimization of multivariate functions. It provides an alternative way to call ``fmin_cg``, by specifying ``method='CG'``. Notes ----- This conjugate gradient algorithm is based on that of Polak and Ribiere [1]_. Conjugate gradient methods tend to work better when: 1. `f` has a unique global minimizing point, and no local minima or other stationary points, 2. `f` is, at least locally, reasonably well approximated by a quadratic function of the variables, 3. `f` is continuous and has a continuous gradient, 4. `fprime` is not too large, e.g., has a norm less than 1000, 5. The initial guess, `x0`, is reasonably close to `f` 's global minimizing point, `xopt`. References ---------- .. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122. Examples -------- Example 1: seek the minimum value of the expression ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values of the parameters and an initial guess ``(u, v) = (0, 0)``. >>> args = (2, 3, 7, 8, 9, 10) # parameter values >>> def f(x, *args): ... u, v = x ... a, b, c, d, e, f = args ... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f >>> def gradf(x, *args): ... u, v = x ... a, b, c, d, e, f = args ... gu = 2*a*u + b*v + d # u-component of the gradient ... gv = b*u + 2*c*v + e # v-component of the gradient ... return np.asarray((gu, gv)) >>> x0 = np.asarray((0, 0)) # Initial guess. >>> from scipy import optimize >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args) Optimization terminated successfully. Current function value: 1.617021 Iterations: 4 Function evaluations: 8 Gradient evaluations: 8 >>> res1 array([-1.80851064, -0.25531915]) Example 2: solve the same problem using the `minimize` function. (This `myopts` dictionary shows all of the available options, although in practice only non-default values would be needed. The returned value will be a dictionary.) >>> opts = {'maxiter' : None, # default value. ... 'disp' : True, # non-default value. ... 'gtol' : 1e-5, # default value. ... 'norm' : np.inf, # default value. ... 'eps' : 1.4901161193847656e-08} # default value. >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args, ... method='CG', options=opts) Optimization terminated successfully. Current function value: 1.617021 Iterations: 4 Function evaluations: 8 Gradient evaluations: 8 >>> res2.x # minimum found array([-1.80851064, -0.25531915]) """ opts = {'gtol': gtol, 'norm': norm, 'eps': epsilon, 'disp': disp, 'maxiter': maxiter, 'return_all': retall} res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts) if full_output: retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status'] if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_cg(fun, x0, args=(), jac=None, callback=None, gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None, disp=False, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the conjugate gradient algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter : int Maximum number of iterations to perform. gtol : float Gradient norm must be less than `gtol` before successful termination. norm : float Order of norm (Inf is max, -Inf is min). eps : float or ndarray If `jac` is approximated, use this value for the step size. """ _check_unknown_options(unknown_options) f = fun fprime = jac epsilon = eps retall = return_all x0 = asarray(x0).flatten() if maxiter is None: maxiter = len(x0) * 200 func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0) k = 0 xk = x0 # Sets the initial step guess to dx ~ 1 old_fval = f(xk) old_old_fval = old_fval + np.linalg.norm(gfk) / 2 if retall: allvecs = [xk] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk, ord=norm) sigma_3 = 0.01 while (gnorm > gtol) and (k < maxiter): deltak = numpy.dot(gfk, gfk) cached_step = [None] def polak_ribiere_powell_step(alpha, gfkp1=None): xkp1 = xk + alpha * pk if gfkp1 is None: gfkp1 = myfprime(xkp1) yk = gfkp1 - gfk beta_k = max(0, numpy.dot(yk, gfkp1) / deltak) pkp1 = -gfkp1 + beta_k * pk gnorm = vecnorm(gfkp1, ord=norm) return (alpha, xkp1, pkp1, gfkp1, gnorm) def descent_condition(alpha, xkp1, fp1, gfkp1): # Polak-Ribiere+ needs an explicit check of a sufficient # descent condition, which is not guaranteed by strong Wolfe. # # See Gilbert & Nocedal, "Global convergence properties of # conjugate gradient methods for optimization", # SIAM J. Optimization 2, 21 (1992). cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1) alpha, xk, pk, gfk, gnorm = cached_step # Accept step if it leads to convergence. if gnorm <= gtol: return True # Accept step if sufficient descent condition applies. return numpy.dot(pk, gfk) <= -sigma_3 * numpy.dot(gfk, gfk) try: alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, c2=0.4, amin=1e-100, amax=1e100, extra_condition=descent_condition) except _LineSearchError: # Line search failed to find a better solution. warnflag = 2 break # Reuse already computed results if possible if alpha_k == cached_step[0]: alpha_k, xk, pk, gfk, gnorm = cached_step else: alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1) if retall: allvecs.append(xk) if callback is not None: callback(xk) k += 1 fval = old_fval if warnflag == 2: msg = _status_message['pr_loss'] elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] else: msg = _status_message['success'] if disp: print("%s%s" % ("Warning: " if warnflag != 0 else "", msg)) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % func_calls[0]) print(" Gradient evaluations: %d" % grad_calls[0]) result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0], njev=grad_calls[0], status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """ Unconstrained minimization of a function using the Newton-CG method. Parameters ---------- f : callable ``f(x, *args)`` Objective function to be minimized. x0 : ndarray Initial guess. fprime : callable ``f'(x, *args)`` Gradient of f. fhess_p : callable ``fhess_p(x, p, *args)``, optional Function which computes the Hessian of f times an arbitrary vector, p. fhess : callable ``fhess(x, *args)``, optional Function to compute the Hessian matrix of f. args : tuple, optional Extra arguments passed to f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon : float or ndarray, optional If fhess is approximated, use this value for the step size. callback : callable, optional An optional user-supplied function which is called after each iteration. Called as callback(xk), where xk is the current parameter vector. avextol : float, optional Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter : int, optional Maximum number of iterations to perform. full_output : bool, optional If True, return the optional outputs. disp : bool, optional If True, print convergence message. retall : bool, optional If True, return a list of results at each iteration. Returns ------- xopt : ndarray Parameters which minimize f, i.e. ``f(xopt) == fopt``. fopt : float Value of the function at xopt, i.e. ``fopt = f(xopt)``. fcalls : int Number of function calls made. gcalls : int Number of gradient calls made. hcalls : int Number of hessian calls made. warnflag : int Warnings generated by the algorithm. 1 : Maximum number of iterations exceeded. allvecs : list The result at each iteration, if retall is True (see below). See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'Newton-CG' `method` in particular. Notes ----- Only one of `fhess_p` or `fhess` need to be given. If `fhess` is provided, then `fhess_p` will be ignored. If neither `fhess` nor `fhess_p` is provided, then the hessian product will be approximated using finite differences on `fprime`. `fhess_p` must compute the hessian times an arbitrary vector. If it is not given, finite-differences on `fprime` are used to compute it. Newton-CG methods are also called truncated Newton methods. This function differs from scipy.optimize.fmin_tnc because 1. scipy.optimize.fmin_ncg is written purely in python using numpy and scipy while scipy.optimize.fmin_tnc calls a C function. 2. scipy.optimize.fmin_ncg is only for unconstrained minimization while scipy.optimize.fmin_tnc is for unconstrained minimization or box constrained minimization. (Box constraints give lower and upper bounds for each variable separately.) References ---------- Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140. """ opts = {'xtol': avextol, 'eps': epsilon, 'maxiter': maxiter, 'disp': disp, 'return_all': retall} res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['nfev'], res['njev'], res['nhev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None, callback=None, xtol=1e-5, eps=_epsilon, maxiter=None, disp=False, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the Newton-CG algorithm. Note that the `jac` parameter (Jacobian) is required. Options ------- disp : bool Set to True to print convergence messages. xtol : float Average relative error in solution `xopt` acceptable for convergence. maxiter : int Maximum number of iterations to perform. eps : float or ndarray If `jac` is approximated, use this value for the step size. """ _check_unknown_options(unknown_options) if jac is None: raise ValueError('Jacobian is required for Newton-CG method') f = fun fprime = jac fhess_p = hessp fhess = hess avextol = xtol epsilon = eps retall = return_all def terminate(warnflag, msg): if disp: print(msg) print(" Current function value: %f" % old_fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % fcalls[0]) print(" Gradient evaluations: %d" % gcalls[0]) print(" Hessian evaluations: %d" % hcalls) fval = old_fval result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0], nhev=hcalls, status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result x0 = asarray(x0).flatten() fcalls, f = wrap_function(f, args) gcalls, fprime = wrap_function(fprime, args) hcalls = 0 if maxiter is None: maxiter = len(x0)*200 cg_maxiter = 20*len(x0) xtol = len(x0) * avextol update = [2 * xtol] xk = x0 if retall: allvecs = [xk] k = 0 gfk = None old_fval = f(x0) old_old_fval = None float64eps = numpy.finfo(numpy.float64).eps while numpy.add.reduce(numpy.abs(update)) > xtol: if k >= maxiter: msg = "Warning: " + _status_message['maxiter'] return terminate(1, msg) # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -fprime(xk) maggrad = numpy.add.reduce(numpy.abs(b)) eta = numpy.min([0.5, numpy.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), dtype=x0.dtype) ri = -b psupi = -ri i = 0 dri0 = numpy.dot(ri, ri) if fhess is not None: # you want to compute hessian once. A = fhess(*(xk,) + args) hcalls = hcalls + 1 for k2 in xrange(cg_maxiter): if numpy.add.reduce(numpy.abs(ri)) <= termcond: break if fhess is None: if fhess_p is None: Ap = approx_fhess_p(xk, psupi, fprime, epsilon) else: Ap = fhess_p(xk, psupi, *args) hcalls = hcalls + 1 else: Ap = numpy.dot(A, psupi) # check curvature Ap = asarray(Ap).squeeze() # get rid of matrices... curv = numpy.dot(psupi, Ap) if 0 <= curv <= 3 * float64eps: break elif curv < 0: if (i > 0): break else: # fall back to steepest descent direction xsupi = dri0 / (-curv) * b break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = numpy.dot(ri, ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update numpy.dot(ri,ri) for next time. else: # curvature keeps increasing, bail out msg = ("Warning: CG iterations didn't converge. The Hessian is not " "positive definite.") return terminate(3, msg) pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk try: alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval) except _LineSearchError: # Line search failed to find a better solution. msg = "Warning: " + _status_message['pr_loss'] return terminate(2, msg) update = alphak * pk xk = xk + update # upcast if necessary if callback is not None: callback(xk) if retall: allvecs.append(xk) k += 1 else: msg = _status_message['success'] return terminate(0, msg) def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, full_output=0, disp=1): """Bounded minimization for scalar functions. Parameters ---------- func : callable f(x,*args) Objective function to be minimized (must accept and return scalars). x1, x2 : float or array scalar The optimization bounds. args : tuple, optional Extra arguments passed to function. xtol : float, optional The convergence tolerance. maxfun : int, optional Maximum number of function evaluations allowed. full_output : bool, optional If True, return optional outputs. disp : int, optional If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Returns ------- xopt : ndarray Parameters (over given interval) which minimize the objective function. fval : number The function value at the minimum point. ierr : int An error flag (0 if converged, 1 if maximum number of function calls reached). numfunc : int The number of function calls made. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Bounded' `method` in particular. Notes ----- Finds a local minimizer of the scalar function `func` in the interval x1 < xopt < x2 using Brent's method. (See `brent` for auto-bracketing). Examples -------- `fminbound` finds the minimum of the function in the given range. The following examples illustrate the same >>> def f(x): ... return x**2 >>> from scipy import optimize >>> minimum = optimize.fminbound(f, -1, 2) >>> minimum 0.0 >>> minimum = optimize.fminbound(f, 1, 2) >>> minimum 1.0000059608609866 """ options = {'xatol': xtol, 'maxiter': maxfun, 'disp': disp} res = _minimize_scalar_bounded(func, (x1, x2), args, **options) if full_output: return res['x'], res['fun'], res['status'], res['nfev'] else: return res['x'] def _minimize_scalar_bounded(func, bounds, args=(), xatol=1e-5, maxiter=500, disp=0, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. disp: int, optional If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. xatol : float Absolute error in solution `xopt` acceptable for convergence. """ _check_unknown_options(unknown_options) maxfun = maxiter # Test bounds are of correct form if len(bounds) != 2: raise ValueError('bounds must have two elements.') x1, x2 = bounds if not (is_array_scalar(x1) and is_array_scalar(x2)): raise ValueError("Optimisation bounds must be scalars" " or array scalars.") if x1 > x2: raise ValueError("The lower bound exceeds the upper bound.") flag = 0 header = ' Func-count x f(x) Procedure' step = ' initial' sqrt_eps = sqrt(2.2e-16) golden_mean = 0.5 * (3.0 - sqrt(5.0)) a, b = x1, x2 fulc = a + golden_mean * (b - a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = func(x, *args) num = 1 fmin_data = (1, xf, fx) ffulc = fnfc = fx xm = 0.5 * (a + b) tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0 tol2 = 2.0 * tol1 if disp > 2: print(" ") print(header) print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))): golden = 1 # Check for parabolic fit if numpy.abs(e) > tol1: golden = 0 r = (xf - nfc) * (fx - ffulc) q = (xf - fulc) * (fx - fnfc) p = (xf - fulc) * q - (xf - nfc) * r q = 2.0 * (q - r) if q > 0.0: p = -p q = numpy.abs(q) r = e e = rat # Check for acceptability of parabola if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and (p < q * (b - xf))): rat = (p + 0.0) / q x = xf + rat step = ' parabolic' if ((x - a) < tol2) or ((b - x) < tol2): si = numpy.sign(xm - xf) + ((xm - xf) == 0) rat = tol1 * si else: # do a golden section step golden = 1 if golden: # Do a golden-section step if xf >= xm: e = a - xf else: e = b - xf rat = golden_mean*e step = ' golden' si = numpy.sign(rat) + (rat == 0) x = xf + si * numpy.max([numpy.abs(rat), tol1]) fu = func(x, *args) num += 1 fmin_data = (num, x, fu) if disp > 2: print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5 * (a + b) tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0 tol2 = 2.0 * tol1 if num >= maxfun: flag = 1 break fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xatol, disp) result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), message={0: 'Solution found.', 1: 'Maximum number of function calls ' 'reached.'}.get(flag, ''), x=xf, nfev=num) return result class Brent: #need to rethink design of __init__ def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, full_output=0): self.func = func self.args = args self.tol = tol self.maxiter = maxiter self._mintol = 1.0e-11 self._cg = 0.3819660 self.xmin = None self.fval = None self.iter = 0 self.funcalls = 0 # need to rethink design of set_bracket (new options, etc) def set_bracket(self, brack=None): self.brack = brack def get_bracket_info(self): #set up func = self.func args = self.args brack = self.brack ### BEGIN core bracket_info code ### ### carefully DOCUMENT any CHANGES in core ## if brack is None: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) elif len(brack) == 2: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa, xb, xc = brack if (xa > xc): # swap so xa < xc can be assumed xc, xa = xa, xc if not ((xa < xb) and (xb < xc)): raise ValueError("Not a bracketing interval.") fa = func(*((xa,) + args)) fb = func(*((xb,) + args)) fc = func(*((xc,) + args)) if not ((fb < fa) and (fb < fc)): raise ValueError("Not a bracketing interval.") funcalls = 3 else: raise ValueError("Bracketing interval must be " "length 2 or 3 sequence.") ### END core bracket_info code ### return xa, xb, xc, fa, fb, fc, funcalls def optimize(self): # set up for optimization func = self.func xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info() _mintol = self._mintol _cg = self._cg ################################# #BEGIN CORE ALGORITHM ################################# x = w = v = xb fw = fv = fx = func(*((x,) + self.args)) if (xa < xc): a = xa b = xc else: a = xc b = xa deltax = 0.0 funcalls += 1 iter = 0 while (iter < self.maxiter): tol1 = self.tol * numpy.abs(x) + _mintol tol2 = 2.0 * tol1 xmid = 0.5 * (a + b) # check for convergence if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)): break # XXX In the first iteration, rat is only bound in the true case # of this conditional. This used to cause an UnboundLocalError # (gh-4140). It should be set before the if (but to what?). if (numpy.abs(deltax) <= tol1): if (x >= xmid): deltax = a - x # do a golden section step else: deltax = b - x rat = _cg * deltax else: # do a parabolic step tmp1 = (x - w) * (fx - fv) tmp2 = (x - v) * (fx - fw) p = (x - v) * tmp2 - (x - w) * tmp1 tmp2 = 2.0 * (tmp2 - tmp1) if (tmp2 > 0.0): p = -p tmp2 = numpy.abs(tmp2) dx_temp = deltax deltax = rat # check parabolic fit if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and (numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))): rat = p * 1.0 / tmp2 # if parabolic step is useful. u = x + rat if ((u - a) < tol2 or (b - u) < tol2): if xmid - x >= 0: rat = tol1 else: rat = -tol1 else: if (x >= xmid): deltax = a - x # if it's not do a golden section step else: deltax = b - x rat = _cg * deltax if (numpy.abs(rat) < tol1): # update by at least tol1 if rat >= 0: u = x + tol1 else: u = x - tol1 else: u = x + rat fu = func(*((u,) + self.args)) # calculate new output value funcalls += 1 if (fu > fx): # if it's bigger than current if (u < x): a = u else: b = u if (fu <= fw) or (w == x): v = w w = u fv = fw fw = fu elif (fu <= fv) or (v == x) or (v == w): v = u fv = fu else: if (u >= x): a = x else: b = x v = w w = x x = u fv = fw fw = fx fx = fu iter += 1 ################################# #END CORE ALGORITHM ################################# self.xmin = x self.fval = fx self.iter = iter self.funcalls = funcalls def get_result(self, full_output=False): if full_output: return self.xmin, self.fval, self.iter, self.funcalls else: return self.xmin def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): """ Given a function of one-variable and a possible bracket, return the local minimum of the function isolated to a fractional precision of tol. Parameters ---------- func : callable f(x,*args) Objective function. args : tuple, optional Additional arguments (if present). brack : tuple, optional Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) < func(xa), func(xc) or a pair (xa,xb) which are used as a starting interval for a downhill bracket search (see `bracket`). Providing the pair (xa,xb) does not always mean the obtained solution will satisfy xa<=x<=xb. tol : float, optional Stop if between iteration change is less than `tol`. full_output : bool, optional If True, return all output args (xmin, fval, iter, funcalls). maxiter : int, optional Maximum number of iterations in solution. Returns ------- xmin : ndarray Optimum point. fval : float Optimum value. iter : int Number of iterations. funcalls : int Number of objective function evaluations made. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Brent' `method` in particular. Notes ----- Uses inverse parabolic interpolation when possible to speed up convergence of golden section method. Does not ensure that the minimum lies in the range specified by `brack`. See `fminbound`. Examples -------- We illustrate the behaviour of the function when `brack` is of size 2 and 3 respectively. In the case where `brack` is of the form (xa,xb), we can see for the given values, the output need not necessarily lie in the range (xa,xb). >>> def f(x): ... return x**2 >>> from scipy import optimize >>> minimum = optimize.brent(f,brack=(1,2)) >>> minimum 0.0 >>> minimum = optimize.brent(f,brack=(-1,0.5,2)) >>> minimum -2.7755575615628914e-17 """ options = {'xtol': tol, 'maxiter': maxiter} res = _minimize_scalar_brent(func, brack, args, **options) if full_output: return res['x'], res['fun'], res['nit'], res['nfev'] else: return res['x'] def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8, maxiter=500, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. xtol : float Relative error in solution `xopt` acceptable for convergence. Notes ----- Uses inverse parabolic interpolation when possible to speed up convergence of golden section method. """ _check_unknown_options(unknown_options) tol = xtol if tol < 0: raise ValueError('tolerance should be >= 0, got %r' % tol) brent = Brent(func=func, args=args, tol=tol, full_output=True, maxiter=maxiter) brent.set_bracket(brack) brent.optimize() x, fval, nit, nfev = brent.get_result(full_output=True) return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, success=nit < maxiter) def golden(func, args=(), brack=None, tol=_epsilon, full_output=0, maxiter=5000): """ Return the minimum of a function of one variable using golden section method. Given a function of one variable and a possible bracketing interval, return the minimum of the function isolated to a fractional precision of tol. Parameters ---------- func : callable func(x,*args) Objective function to minimize. args : tuple, optional Additional arguments (if present), passed to func. brack : tuple, optional Triple (a,b,c), where (a<b<c) and func(b) < func(a),func(c). If bracket consists of two numbers (a, c), then they are assumed to be a starting interval for a downhill bracket search (see `bracket`); it doesn't always mean that obtained solution will satisfy a<=x<=c. tol : float, optional x tolerance stop criterion full_output : bool, optional If True, return optional outputs. maxiter : int Maximum number of iterations to perform. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Golden' `method` in particular. Notes ----- Uses analog of bisection method to decrease the bracketed interval. Examples -------- We illustrate the behaviour of the function when `brack` is of size 2 and 3 respectively. In the case where `brack` is of the form (xa,xb), we can see for the given values, the output need not necessarily lie in the range ``(xa, xb)``. >>> def f(x): ... return x**2 >>> from scipy import optimize >>> minimum = optimize.golden(f, brack=(1, 2)) >>> minimum 1.5717277788484873e-162 >>> minimum = optimize.golden(f, brack=(-1, 0.5, 2)) >>> minimum -1.5717277788484873e-162 """ options = {'xtol': tol, 'maxiter': maxiter} res = _minimize_scalar_golden(func, brack, args, **options) if full_output: return res['x'], res['fun'], res['nfev'] else: return res['x'] def _minimize_scalar_golden(func, brack=None, args=(), xtol=_epsilon, maxiter=5000, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. xtol : float Relative error in solution `xopt` acceptable for convergence. """ _check_unknown_options(unknown_options) tol = xtol if brack is None: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) elif len(brack) == 2: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa, xb, xc = brack if (xa > xc): # swap so xa < xc can be assumed xc, xa = xa, xc if not ((xa < xb) and (xb < xc)): raise ValueError("Not a bracketing interval.") fa = func(*((xa,) + args)) fb = func(*((xb,) + args)) fc = func(*((xc,) + args)) if not ((fb < fa) and (fb < fc)): raise ValueError("Not a bracketing interval.") funcalls = 3 else: raise ValueError("Bracketing interval must be length 2 or 3 sequence.") _gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0)) _gC = 1.0 - _gR x3 = xc x0 = xa if (numpy.abs(xc - xb) > numpy.abs(xb - xa)): x1 = xb x2 = xb + _gC * (xc - xb) else: x2 = xb x1 = xb - _gC * (xb - xa) f1 = func(*((x1,) + args)) f2 = func(*((x2,) + args)) funcalls += 2 nit = 0 for i in xrange(maxiter): if numpy.abs(x3 - x0) <= tol * (numpy.abs(x1) + numpy.abs(x2)): break if (f2 < f1): x0 = x1 x1 = x2 x2 = _gR * x1 + _gC * x3 f1 = f2 f2 = func(*((x2,) + args)) else: x3 = x2 x2 = x1 x1 = _gR * x2 + _gC * x0 f2 = f1 f1 = func(*((x1,) + args)) funcalls += 1 nit += 1 if (f1 < f2): xmin = x1 fval = f1 else: xmin = x2 fval = f2 return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit, success=nit < maxiter) def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): """ Bracket the minimum of the function. Given a function and distinct initial points, search in the downhill direction (as defined by the initital points) and return new points xa, xb, xc that bracket the minimum of the function f(xa) > f(xb) < f(xc). It doesn't always mean that obtained solution will satisfy xa<=x<=xb Parameters ---------- func : callable f(x,*args) Objective function to minimize. xa, xb : float, optional Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0. args : tuple, optional Additional arguments (if present), passed to `func`. grow_limit : float, optional Maximum grow limit. Defaults to 110.0 maxiter : int, optional Maximum number of iterations to perform. Defaults to 1000. Returns ------- xa, xb, xc : float Bracket. fa, fb, fc : float Objective function values in bracket. funcalls : int Number of function evaluations made. """ _gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0 _verysmall_num = 1e-21 fa = func(*(xa,) + args) fb = func(*(xb,) + args) if (fa < fb): # Switch so fa > fb xa, xb = xb, xa fa, fb = fb, fa xc = xb + _gold * (xb - xa) fc = func(*((xc,) + args)) funcalls = 3 iter = 0 while (fc < fb): tmp1 = (xb - xa) * (fb - fc) tmp2 = (xb - xc) * (fb - fa) val = tmp2 - tmp1 if numpy.abs(val) < _verysmall_num: denom = 2.0 * _verysmall_num else: denom = 2.0 * val w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom wlim = xb + grow_limit * (xc - xb) if iter > maxiter: raise RuntimeError("Too many iterations.") iter += 1 if (w - xc) * (xb - w) > 0.0: fw = func(*((w,) + args)) funcalls += 1 if (fw < fc): xa = xb xb = w fa = fb fb = fw return xa, xb, xc, fa, fb, fc, funcalls elif (fw > fb): xc = w fc = fw return xa, xb, xc, fa, fb, fc, funcalls w = xc + _gold * (xc - xb) fw = func(*((w,) + args)) funcalls += 1 elif (w - wlim)*(wlim - xc) >= 0.0: w = wlim fw = func(*((w,) + args)) funcalls += 1 elif (w - wlim)*(xc - w) > 0.0: fw = func(*((w,) + args)) funcalls += 1 if (fw < fc): xb = xc xc = w w = xc + _gold * (xc - xb) fb = fc fc = fw fw = func(*((w,) + args)) funcalls += 1 else: w = xc + _gold * (xc - xb) fw = func(*((w,) + args)) funcalls += 1 xa = xb xb = xc xc = w fa = fb fb = fc fc = fw return xa, xb, xc, fa, fb, fc, funcalls def _linesearch_powell(func, p, xi, tol=1e-3): """Line-search algorithm using fminbound. Find the minimium of the function ``func(x0+ alpha*direc)``. """ def myfunc(alpha): return func(p + alpha*xi) alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol) xi = alpha_min*xi return squeeze(fret), p + xi, xi def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, direc=None): """ Minimize a function using modified Powell's method. This method only uses function values, not derivatives. Parameters ---------- func : callable f(x,*args) Objective function to be minimized. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to func. callback : callable, optional An optional user-supplied function, called after each iteration. Called as ``callback(xk)``, where ``xk`` is the current parameter vector. direc : ndarray, optional Initial direction set. xtol : float, optional Line-search error tolerance. ftol : float, optional Relative error in ``func(xopt)`` acceptable for convergence. maxiter : int, optional Maximum number of iterations to perform. maxfun : int, optional Maximum number of function evaluations to make. full_output : bool, optional If True, fopt, xi, direc, iter, funcalls, and warnflag are returned. disp : bool, optional If True, print convergence messages. retall : bool, optional If True, return a list of the solution at each iteration. Returns ------- xopt : ndarray Parameter which minimizes `func`. fopt : number Value of function at minimum: ``fopt = func(xopt)``. direc : ndarray Current direction set. iter : int Number of iterations. funcalls : int Number of function calls made. warnflag : int Integer warning flag: 1 : Maximum number of function evaluations. 2 : Maximum number of iterations. allvecs : list List of solutions at each iteration. See also -------- minimize: Interface to unconstrained minimization algorithms for multivariate functions. See the 'Powell' `method` in particular. Notes ----- Uses a modification of Powell's method to find the minimum of a function of N variables. Powell's method is a conjugate direction method. The algorithm has two loops. The outer loop merely iterates over the inner loop. The inner loop minimizes over each current direction in the direction set. At the end of the inner loop, if certain conditions are met, the direction that gave the largest decrease is dropped and replaced with the difference between the current estimated x and the estimated x from the beginning of the inner-loop. The technical conditions for replacing the direction of greatest increase amount to checking that 1. No further gain can be made along the direction of greatest increase from that iteration. 2. The direction of greatest increase accounted for a large sufficient fraction of the decrease in the function value from that iteration of the inner loop. Examples -------- >>> def f(x): ... return x**2 >>> from scipy import optimize >>> minimum = optimize.fmin_powell(f, -1) Optimization terminated successfully. Current function value: 0.000000 Iterations: 2 Function evaluations: 18 >>> minimum array(0.0) References ---------- Powell M.J.D. (1964) An efficient method for finding the minimum of a function of several variables without calculating derivatives, Computer Journal, 7 (2):155-162. Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.: Numerical Recipes (any edition), Cambridge University Press """ opts = {'xtol': xtol, 'ftol': ftol, 'maxiter': maxiter, 'maxfev': maxfun, 'disp': disp, 'direc': direc, 'return_all': retall} res = _minimize_powell(func, x0, args, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['direc'], res['nit'], res['nfev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_powell(func, x0, args=(), callback=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, disp=False, direc=None, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the modified Powell algorithm. Options ------- disp : bool Set to True to print convergence messages. xtol : float Relative error in solution `xopt` acceptable for convergence. ftol : float Relative error in ``fun(xopt)`` acceptable for convergence. maxiter, maxfev : int Maximum allowed number of iterations and function evaluations. Will default to ``N*1000``, where ``N`` is the number of variables, if neither `maxiter` or `maxfev` is set. If both `maxiter` and `maxfev` are set, minimization will stop at the first reached. direc : ndarray Initial set of direction vectors for the Powell method. """ _check_unknown_options(unknown_options) maxfun = maxfev retall = return_all # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) x = asarray(x0).flatten() if retall: allvecs = [x] N = len(x) # If neither are set, then set both to default if maxiter is None and maxfun is None: maxiter = N * 1000 maxfun = N * 1000 elif maxiter is None: # Convert remaining Nones, to np.inf, unless the other is np.inf, in # which case use the default to avoid unbounded iteration if maxfun == np.inf: maxiter = N * 1000 else: maxiter = np.inf elif maxfun is None: if maxiter == np.inf: maxfun = N * 1000 else: maxfun = np.inf if direc is None: direc = eye(N, dtype=float) else: direc = asarray(direc, dtype=float) fval = squeeze(func(x)) x1 = x.copy() iter = 0 ilist = list(range(N)) while True: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol * 100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if callback is not None: callback(x) if retall: allvecs.append(x) bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20 if 2.0 * (fx - fval) <= bnd: break if fcalls[0] >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(func(x2)) if (fx > fx2): t = 2.0*(fx + fx2 - 2.0*fval) temp = (fx - fval - delta) t *= temp*temp temp = fx - fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 msg = _status_message['maxfev'] if disp: print("Warning: " + msg) elif iter >= maxiter: warnflag = 2 msg = _status_message['maxiter'] if disp: print("Warning: " + msg) else: msg = _status_message['success'] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % iter) print(" Function evaluations: %d" % fcalls[0]) x = squeeze(x) result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0], status=warnflag, success=(warnflag == 0), message=msg, x=x) if retall: result['allvecs'] = allvecs return result def _endprint(x, flag, fval, maxfun, xtol, disp): if flag == 0: if disp > 1: print("\nOptimization terminated successfully;\n" "The returned value satisfies the termination criteria\n" "(using xtol = ", xtol, ")") if flag == 1: if disp: print("\nMaximum number of function evaluations exceeded --- " "increase maxfun argument.\n") return def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, disp=False): """Minimize a function over a given range by brute force. Uses the "brute force" method, i.e. computes the function's value at each point of a multidimensional grid of points, to find the global minimum of the function. The function is evaluated everywhere in the range with the datatype of the first call to the function, as enforced by the ``vectorize`` NumPy function. The value and type of the function evaluation returned when ``full_output=True`` are affected in addition by the ``finish`` argument (see Notes). The brute force approach is inefficient because the number of grid points increases exponentially - the number of grid points to evaluate is ``Ns ** len(x)``. Consequently, even with coarse grid spacing, even moderately sized problems can take a long time to run, and/or run into memory limitations. Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. ranges : tuple Each component of the `ranges` tuple must be either a "slice object" or a range tuple of the form ``(low, high)``. The program uses these to create the grid of points on which the objective function will be computed. See `Note 2` for more detail. args : tuple, optional Any additional fixed parameters needed to completely specify the function. Ns : int, optional Number of grid points along the axes, if not otherwise specified. See `Note2`. full_output : bool, optional If True, return the evaluation grid and the objective function's values on it. finish : callable, optional An optimization function that is called with the result of brute force minimization as initial guess. `finish` should take `func` and the initial guess as positional arguments, and take `args` as keyword arguments. It may additionally take `full_output` and/or `disp` as keyword arguments. Use None if no "polishing" function is to be used. See Notes for more details. disp : bool, optional Set to True to print convergence messages. Returns ------- x0 : ndarray A 1-D array containing the coordinates of a point at which the objective function had its minimum value. (See `Note 1` for which point is returned.) fval : float Function value at the point `x0`. (Returned when `full_output` is True.) grid : tuple Representation of the evaluation grid. It has the same length as `x0`. (Returned when `full_output` is True.) Jout : ndarray Function values at each point of the evaluation grid, `i.e.`, ``Jout = func(*grid)``. (Returned when `full_output` is True.) See Also -------- basinhopping, differential_evolution Notes ----- *Note 1*: The program finds the gridpoint at which the lowest value of the objective function occurs. If `finish` is None, that is the point returned. When the global minimum occurs within (or not very far outside) the grid's boundaries, and the grid is fine enough, that point will be in the neighborhood of the global minimum. However, users often employ some other optimization program to "polish" the gridpoint values, `i.e.`, to seek a more precise (local) minimum near `brute's` best gridpoint. The `brute` function's `finish` option provides a convenient way to do that. Any polishing program used must take `brute's` output as its initial guess as a positional argument, and take `brute's` input values for `args` as keyword arguments, otherwise an error will be raised. It may additionally take `full_output` and/or `disp` as keyword arguments. `brute` assumes that the `finish` function returns either an `OptimizeResult` object or a tuple in the form: ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing value of the argument, ``Jmin`` is the minimum value of the objective function, "..." may be some other returned values (which are not used by `brute`), and ``statuscode`` is the status code of the `finish` program. Note that when `finish` is not None, the values returned are those of the `finish` program, *not* the gridpoint ones. Consequently, while `brute` confines its search to the input grid points, the `finish` program's results usually will not coincide with any gridpoint, and may fall outside the grid's boundary. Thus, if a minimum only needs to be found over the provided grid points, make sure to pass in `finish=None`. *Note 2*: The grid of points is a `numpy.mgrid` object. For `brute` the `ranges` and `Ns` inputs have the following effect. Each component of the `ranges` tuple can be either a slice object or a two-tuple giving a range of values, such as (0, 5). If the component is a slice object, `brute` uses it directly. If the component is a two-tuple range, `brute` internally converts it to a slice object that interpolates `Ns` points from its low-value to its high-value, inclusive. Examples -------- We illustrate the use of `brute` to seek the global minimum of a function of two variables that is given as the sum of a positive-definite quadratic and two deep "Gaussian-shaped" craters. Specifically, define the objective function `f` as the sum of three other functions, ``f = f1 + f2 + f3``. We suppose each of these has a signature ``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions are as defined below. >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) >>> def f1(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) >>> def f2(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) >>> def f3(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) >>> def f(z, *params): ... return f1(z, *params) + f2(z, *params) + f3(z, *params) Thus, the objective function may have local minima near the minimum of each of the three functions of which it is composed. To use `fmin` to polish its gridpoint result, we may then continue as follows: >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) >>> from scipy import optimize >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True, ... finish=optimize.fmin) >>> resbrute[0] # global minimum array([-1.05665192, 1.80834843]) >>> resbrute[1] # function value at global minimum -3.4085818767 Note that if `finish` had been set to None, we would have gotten the gridpoint [-1.0 1.75] where the rounded function value is -2.892. """ N = len(ranges) if N > 40: raise ValueError("Brute Force not possible with more " "than 40 variables.") lrange = list(ranges) for k in range(N): if type(lrange[k]) is not type(slice(None)): if len(lrange[k]) < 3: lrange[k] = tuple(lrange[k]) + (complex(Ns),) lrange[k] = slice(*lrange[k]) if (N == 1): lrange = lrange[0] def _scalarfunc(*params): params = asarray(params).flatten() return func(params, *args) vecfunc = vectorize(_scalarfunc) grid = mgrid[lrange] if (N == 1): grid = (grid,) Jout = vecfunc(*grid) Nshape = shape(Jout) indx = argmin(Jout.ravel(), axis=-1) Nindx = zeros(N, int) xmin = zeros(N, float) for k in range(N - 1, -1, -1): thisN = Nshape[k] Nindx[k] = indx % Nshape[k] indx = indx // thisN for k in range(N): xmin[k] = grid[k][tuple(Nindx)] Jmin = Jout[tuple(Nindx)] if (N == 1): grid = grid[0] xmin = xmin[0] if callable(finish): # set up kwargs for `finish` function finish_args = _getargspec(finish).args finish_kwargs = dict() if 'full_output' in finish_args: finish_kwargs['full_output'] = 1 if 'disp' in finish_args: finish_kwargs['disp'] = disp elif 'options' in finish_args: # pass 'disp' as `options` # (e.g. if `finish` is `minimize`) finish_kwargs['options'] = {'disp': disp} # run minimizer res = finish(func, xmin, args=args, **finish_kwargs) if isinstance(res, OptimizeResult): xmin = res.x Jmin = res.fun success = res.success else: xmin = res[0] Jmin = res[1] success = res[-1] == 0 if not success: if disp: print("Warning: Either final optimization did not succeed " "or `finish` does not return `statuscode` as its last " "argument.") if full_output: return xmin, Jmin, grid, Jout else: return xmin def show_options(solver=None, method=None, disp=True): """ Show documentation for additional options of optimization solvers. These are method-specific options that can be supplied through the ``options`` dict. Parameters ---------- solver : str Type of optimization solver. One of 'minimize', 'minimize_scalar', 'root', or 'linprog'. method : str, optional If not given, shows all methods of the specified solver. Otherwise, show only the options for the specified method. Valid values corresponds to methods' names of respective solver (e.g. 'BFGS' for 'minimize'). disp : bool, optional Whether to print the result rather than returning it. Returns ------- text Either None (for disp=False) or the text string (disp=True) Notes ----- The solver-specific methods are: `scipy.optimize.minimize` - :ref:`Nelder-Mead <optimize.minimize-neldermead>` - :ref:`Powell <optimize.minimize-powell>` - :ref:`CG <optimize.minimize-cg>` - :ref:`BFGS <optimize.minimize-bfgs>` - :ref:`Newton-CG <optimize.minimize-newtoncg>` - :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` - :ref:`TNC <optimize.minimize-tnc>` - :ref:`COBYLA <optimize.minimize-cobyla>` - :ref:`SLSQP <optimize.minimize-slsqp>` - :ref:`dogleg <optimize.minimize-dogleg>` - :ref:`trust-ncg <optimize.minimize-trustncg>` `scipy.optimize.root` - :ref:`hybr <optimize.root-hybr>` - :ref:`lm <optimize.root-lm>` - :ref:`broyden1 <optimize.root-broyden1>` - :ref:`broyden2 <optimize.root-broyden2>` - :ref:`anderson <optimize.root-anderson>` - :ref:`linearmixing <optimize.root-linearmixing>` - :ref:`diagbroyden <optimize.root-diagbroyden>` - :ref:`excitingmixing <optimize.root-excitingmixing>` - :ref:`krylov <optimize.root-krylov>` - :ref:`df-sane <optimize.root-dfsane>` `scipy.optimize.minimize_scalar` - :ref:`brent <optimize.minimize_scalar-brent>` - :ref:`golden <optimize.minimize_scalar-golden>` - :ref:`bounded <optimize.minimize_scalar-bounded>` `scipy.optimize.linprog` - :ref:`simplex <optimize.linprog-simplex>` - :ref:`interior-point <optimize.linprog-interior-point>` """ import textwrap doc_routines = { 'minimize': ( ('bfgs', 'scipy.optimize.optimize._minimize_bfgs'), ('cg', 'scipy.optimize.optimize._minimize_cg'), ('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'), ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'), ('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'), ('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'), ('newton-cg', 'scipy.optimize.optimize._minimize_newtoncg'), ('powell', 'scipy.optimize.optimize._minimize_powell'), ('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'), ('tnc', 'scipy.optimize.tnc._minimize_tnc'), ('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'), ), 'root': ( ('hybr', 'scipy.optimize.minpack._root_hybr'), ('lm', 'scipy.optimize._root._root_leastsq'), ('broyden1', 'scipy.optimize._root._root_broyden1_doc'), ('broyden2', 'scipy.optimize._root._root_broyden2_doc'), ('anderson', 'scipy.optimize._root._root_anderson_doc'), ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'), ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'), ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'), ('krylov', 'scipy.optimize._root._root_krylov_doc'), ('df-sane', 'scipy.optimize._spectral._root_df_sane'), ), 'linprog': ( ('simplex', 'scipy.optimize._linprog._linprog_simplex'), ('interior-point', 'scipy.optimize._linprog._linprog_ip'), ), 'minimize_scalar': ( ('brent', 'scipy.optimize.optimize._minimize_scalar_brent'), ('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'), ('golden', 'scipy.optimize.optimize._minimize_scalar_golden'), ), } if solver is None: text = ["\n\n\n========\n", "minimize\n", "========\n"] text.append(show_options('minimize', disp=False)) text.extend(["\n\n===============\n", "minimize_scalar\n", "===============\n"]) text.append(show_options('minimize_scalar', disp=False)) text.extend(["\n\n\n====\n", "root\n", "====\n"]) text.append(show_options('root', disp=False)) text.extend(['\n\n\n=======\n', 'linprog\n', '=======\n']) text.append(show_options('linprog', disp=False)) text = "".join(text) else: solver = solver.lower() if solver not in doc_routines: raise ValueError('Unknown solver %r' % (solver,)) if method is None: text = [] for name, _ in doc_routines[solver]: text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"]) text.append(show_options(solver, name, disp=False)) text = "".join(text) else: method = method.lower() methods = dict(doc_routines[solver]) if method not in methods: raise ValueError("Unknown method %r" % (method,)) name = methods[method] # Import function object parts = name.split('.') mod_name = ".".join(parts[:-1]) __import__(mod_name) obj = getattr(sys.modules[mod_name], parts[-1]) # Get doc doc = obj.__doc__ if doc is not None: text = textwrap.dedent(doc).strip() else: text = "" if disp: print(text) return else: return text def main(): import time times = [] algor = [] x0 = [0.8, 1.2, 0.7] print("Nelder-Mead Simplex") print("===================") start = time.time() x = fmin(rosen, x0) print(x) times.append(time.time() - start) algor.append('Nelder-Mead Simplex\t') print() print("Powell Direction Set Method") print("===========================") start = time.time() x = fmin_powell(rosen, x0) print(x) times.append(time.time() - start) algor.append('Powell Direction Set Method.') print() print("Nonlinear CG") print("============") start = time.time() x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) print(x) times.append(time.time() - start) algor.append('Nonlinear CG \t') print() print("BFGS Quasi-Newton") print("=================") start = time.time() x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) print(x) times.append(time.time() - start) algor.append('BFGS Quasi-Newton\t') print() print("BFGS approximate gradient") print("=========================") start = time.time() x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) print(x) times.append(time.time() - start) algor.append('BFGS without gradient\t') print() print("Newton-CG with Hessian product") print("==============================") start = time.time() x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80) print(x) times.append(time.time() - start) algor.append('Newton-CG with hessian product') print() print("Newton-CG with full Hessian") print("===========================") start = time.time() x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80) print(x) times.append(time.time() - start) algor.append('Newton-CG with full hessian') print() print("\nMinimizing the Rosenbrock function of order 3\n") print(" Algorithm \t\t\t Seconds") print("===========\t\t\t =========") for k in range(len(algor)): print(algor[k], "\t -- ", times[k]) if __name__ == "__main__": main()
104,490
32.501443
90
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion.py
"""Trust-region optimization.""" from __future__ import division, print_function, absolute_import import math import numpy as np import scipy.linalg from .optimize import (_check_unknown_options, wrap_function, _status_message, OptimizeResult) __all__ = [] class BaseQuadraticSubproblem(object): """ Base/abstract class defining the quadratic model for trust-region minimization. Child classes must implement the ``solve`` method. Values of the objective function, jacobian and hessian (if provided) at the current iterate ``x`` are evaluated on demand and then stored as attributes ``fun``, ``jac``, ``hess``. """ def __init__(self, x, fun, jac, hess=None, hessp=None): self._x = x self._f = None self._g = None self._h = None self._g_mag = None self._cauchy_point = None self._newton_point = None self._fun = fun self._jac = jac self._hess = hess self._hessp = hessp def __call__(self, p): return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p)) @property def fun(self): """Value of objective function at current iteration.""" if self._f is None: self._f = self._fun(self._x) return self._f @property def jac(self): """Value of jacobian of objective function at current iteration.""" if self._g is None: self._g = self._jac(self._x) return self._g @property def hess(self): """Value of hessian of objective function at current iteration.""" if self._h is None: self._h = self._hess(self._x) return self._h def hessp(self, p): if self._hessp is not None: return self._hessp(self._x, p) else: return np.dot(self.hess, p) @property def jac_mag(self): """Magniture of jacobian of objective function at current iteration.""" if self._g_mag is None: self._g_mag = scipy.linalg.norm(self.jac) return self._g_mag def get_boundaries_intersections(self, z, d, trust_radius): """ Solve the scalar quadratic equation ||z + t d|| == trust_radius. This is like a line-sphere intersection. Return the two values of t, sorted from low to high. """ a = np.dot(d, d) b = 2 * np.dot(z, d) c = np.dot(z, z) - trust_radius**2 sqrt_discriminant = math.sqrt(b*b - 4*a*c) # The following calculation is mathematically # equivalent to: # ta = (-b - sqrt_discriminant) / (2*a) # tb = (-b + sqrt_discriminant) / (2*a) # but produce smaller round off errors. # Look at Matrix Computation p.97 # for a better justification. aux = b + math.copysign(sqrt_discriminant, b) ta = -aux / (2*a) tb = -2*c / aux return sorted([ta, tb]) def solve(self, trust_radius): raise NotImplementedError('The solve method should be implemented by ' 'the child class') def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None, subproblem=None, initial_trust_radius=1.0, max_trust_radius=1000.0, eta=0.15, gtol=1e-4, maxiter=None, disp=False, return_all=False, callback=None, inexact=True, **unknown_options): """ Minimization of scalar function of one or more variables using a trust-region algorithm. Options for the trust-region algorithm are: initial_trust_radius : float Initial trust radius. max_trust_radius : float Never propose steps that are longer than this value. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `gtol` before successful termination. maxiter : int Maximum number of iterations to perform. disp : bool If True, print convergence message. inexact : bool Accuracy to solve subproblems. If True requires less nonlinear iterations, but more vector products. Only effective for method trust-krylov. This function is called by the `minimize` function. It is not supposed to be called directly. """ _check_unknown_options(unknown_options) if jac is None: raise ValueError('Jacobian is currently required for trust-region ' 'methods') if hess is None and hessp is None: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is currently required for trust-region methods') if subproblem is None: raise ValueError('A subproblem solving strategy is required for ' 'trust-region methods') if not (0 <= eta < 0.25): raise Exception('invalid acceptance stringency') if max_trust_radius <= 0: raise Exception('the max trust radius must be positive') if initial_trust_radius <= 0: raise ValueError('the initial trust radius must be positive') if initial_trust_radius >= max_trust_radius: raise ValueError('the initial trust radius must be less than the ' 'max trust radius') # force the initial guess into a nice format x0 = np.asarray(x0).flatten() # Wrap the functions, for a couple reasons. # This tracks how many times they have been called # and it automatically passes the args. nfun, fun = wrap_function(fun, args) njac, jac = wrap_function(jac, args) nhess, hess = wrap_function(hess, args) nhessp, hessp = wrap_function(hessp, args) # limit the number of iterations if maxiter is None: maxiter = len(x0)*200 # init the search status warnflag = 0 # initialize the search trust_radius = initial_trust_radius x = x0 if return_all: allvecs = [x] m = subproblem(x, fun, jac, hess, hessp) k = 0 # search for the function min # do not even start if the gradient is small enough while m.jac_mag >= gtol: # Solve the sub-problem. # This gives us the proposed step relative to the current position # and it tells us whether the proposed step # has reached the trust region boundary or not. try: p, hits_boundary = m.solve(trust_radius) except np.linalg.linalg.LinAlgError as e: warnflag = 3 break # calculate the predicted value at the proposed point predicted_value = m(p) # define the local approximation at the proposed point x_proposed = x + p m_proposed = subproblem(x_proposed, fun, jac, hess, hessp) # evaluate the ratio defined in equation (4.4) actual_reduction = m.fun - m_proposed.fun predicted_reduction = m.fun - predicted_value if predicted_reduction <= 0: warnflag = 2 break rho = actual_reduction / predicted_reduction # update the trust radius according to the actual/predicted ratio if rho < 0.25: trust_radius *= 0.25 elif rho > 0.75 and hits_boundary: trust_radius = min(2*trust_radius, max_trust_radius) # if the ratio is high enough then accept the proposed step if rho > eta: x = x_proposed m = m_proposed # append the best guess, call back, increment the iteration count if return_all: allvecs.append(np.copy(x)) if callback is not None: callback(np.copy(x)) k += 1 # check if the gradient is small enough to stop if m.jac_mag < gtol: warnflag = 0 break # check if we have looked at enough iterations if k >= maxiter: warnflag = 1 break # print some stuff if requested status_messages = ( _status_message['success'], _status_message['maxiter'], 'A bad approximation caused failure to predict improvement.', 'A linalg error occurred, such as a non-psd Hessian.', ) if disp: if warnflag == 0: print(status_messages[warnflag]) else: print('Warning: ' + status_messages[warnflag]) print(" Current function value: %f" % m.fun) print(" Iterations: %d" % k) print(" Function evaluations: %d" % nfun[0]) print(" Gradient evaluations: %d" % njac[0]) print(" Hessian evaluations: %d" % nhess[0]) result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag, fun=m.fun, jac=m.jac, nfev=nfun[0], njev=njac[0], nhev=nhess[0], nit=k, message=status_messages[warnflag]) if hess is not None: result['hess'] = m.hess if return_all: result['allvecs'] = allvecs return result
9,226
33.558052
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_minimize.py
""" Unified interfaces to minimization algorithms. Functions --------- - minimize : minimization of a function of several variables. - minimize_scalar : minimization of a function of one variable. """ from __future__ import division, print_function, absolute_import __all__ = ['minimize', 'minimize_scalar'] from warnings import warn import numpy as np from scipy._lib.six import callable from scipy.sparse.linalg import LinearOperator # unconstrained minimization from .optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, _minimize_bfgs, _minimize_newtoncg, _minimize_scalar_brent, _minimize_scalar_bounded, _minimize_scalar_golden, MemoizeJac) from ._trustregion_dogleg import _minimize_dogleg from ._trustregion_ncg import _minimize_trust_ncg from ._trustregion_krylov import _minimize_trust_krylov from ._trustregion_exact import _minimize_trustregion_exact from ._trustregion_constr import _minimize_trustregion_constr from ._constraints import Bounds, new_bounds_to_old, old_bound_to_new # constrained minimization from .lbfgsb import _minimize_lbfgsb from .tnc import _minimize_tnc from .cobyla import _minimize_cobyla from .slsqp import _minimize_slsqp def minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None): """Minimization of scalar function of one or more variables. Parameters ---------- fun : callable The objective function to be minimized. ``fun(x, *args) -> float`` where x is an 1-D array with shape (n,) and `args` is a tuple of the fixed parameters needed to completely specify the function. x0 : ndarray, shape (n,) Initial guess. Array of real elements of size (n,), where 'n' is the number of independent variables. args : tuple, optional Extra arguments passed to the objective function and its derivatives (`fun`, `jac` and `hess` functions). method : str or callable, optional Type of solver. Should be one of - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>` - 'Powell' :ref:`(see here) <optimize.minimize-powell>` - 'CG' :ref:`(see here) <optimize.minimize-cg>` - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>` - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>` - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>` - 'TNC' :ref:`(see here) <optimize.minimize-tnc>` - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>` - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>` - 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>` - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>` - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>` - 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>` - 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>` - custom - a callable object (added in version 0.14.0), see below for description. If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``, depending if the problem has constraints or bounds. jac : {callable, '2-point', '3-point', 'cs', bool}, optional Method for computing the gradient vector. Only for CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, trust-exact and trust-constr. If it is a callable, it should be a function that returns the gradient vector: ``jac(x, *args) -> array_like, shape (n,)`` where x is an array with shape (n,) and `args` is a tuple with the fixed parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'} select a finite difference scheme for numerical estimation of the gradient. Options '3-point' and 'cs' are available only to 'trust-constr'. If `jac` is a Boolean and is True, `fun` is assumed to return the gradient along with the objective function. If False, the gradient will be estimated using '2-point' finite difference estimation. hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional Method for computing the Hessian matrix. Only for Newton-CG, dogleg, trust-ncg, trust-krylov, trust-exact and trust-constr. If it is callable, it should return the Hessian matrix: ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` where x is a (n,) ndarray and `args` is a tuple with the fixed parameters. LinearOperator and sparse matrix returns are allowed only for 'trust-constr' method. Alternatively, the keywords {'2-point', '3-point', 'cs'} select a finite difference scheme for numerical estimation. Or, objects implementing `HessianUpdateStrategy` interface can be used to approximate the Hessian. Available quasi-Newton methods implementing this interface are: - `BFGS`; - `SR1`. Whenever the gradient is estimated via finite-differences, the Hessian cannot be estimated with options {'2-point', '3-point', 'cs'} and needs to be estimated using one of the quasi-Newton strategies. Finite-difference options {'2-point', '3-point', 'cs'} and `HessianUpdateStrategy` are available only for 'trust-constr' method. hessp : callable, optional Hessian of objective function times an arbitrary vector p. Only for Newton-CG, trust-ncg, trust-krylov, trust-constr. Only one of `hessp` or `hess` needs to be given. If `hess` is provided, then `hessp` will be ignored. `hessp` must compute the Hessian times an arbitrary vector: ``hessp(x, p, *args) -> ndarray shape (n,)`` where x is a (n,) ndarray, p is an arbitrary vector with dimension (n,) and `args` is a tuple with the fixed parameters. bounds : sequence or `Bounds`, optional Bounds on variables for L-BFGS-B, TNC, SLSQP and trust-constr methods. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. Sequence of ``(min, max)`` pairs for each element in `x`. None is used to specify no bound. constraints : {Constraint, dict} or List of {Constraint, dict}, optional Constraints definition (only for COBYLA, SLSQP and trust-constr). Constraints for 'trust-constr' are defined as a single object or a list of objects specifying constraints to the optimization problem. Available constraints are: - `LinearConstraint` - `NonlinearConstraint` Constraints for COBYLA, SLSQP are defined as a list of dictionaries. Each dictionary with fields: type : str Constraint type: 'eq' for equality, 'ineq' for inequality. fun : callable The function defining the constraint. jac : callable, optional The Jacobian of `fun` (only for SLSQP). args : sequence, optional Extra arguments to be passed to the function and Jacobian. Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative. Note that COBYLA only supports inequality constraints. tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options()`. callback : callable, optional Called after each iteration. For 'trust-constr' it is a callable with the signature: ``callback(xk, OptimizeResult state) -> bool`` where ``xk`` is the current parameter vector. and ``state`` is an `OptimizeResult` object, with the same fields as the ones from the return. If callback returns True the algorithm execution is terminated. For all the other methods, the signature is: ``callback(xk)`` where ``xk`` is the current parameter vector. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize_scalar : Interface to minimization algorithms for scalar univariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *BFGS*. **Unconstrained minimization** Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the Simplex algorithm [1]_, [2]_. This algorithm is robust in many applications. However, if numerical computation of derivative can be trusted, other algorithms using the first and/or second derivatives information might be preferred for their better performance in general. Method :ref:`Powell <optimize.minimize-powell>` is a modification of Powell's method [3]_, [4]_ which is a conjugate direction method. It performs sequential one-dimensional minimizations along each vector of the directions set (`direc` field in `options` and `info`), which is updated at each iteration of the main minimization loop. The function need not be differentiable, and no derivatives are taken. Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate gradient algorithm by Polak and Ribiere, a variant of the Fletcher-Reeves method described in [5]_ pp. 120-122. Only the first derivatives are used. Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_ pp. 136. It uses the first derivatives only. BFGS has proven good performance even for non-smooth optimizations. This method also returns an approximation of the Hessian inverse, stored as `hess_inv` in the OptimizeResult object. Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a Newton-CG algorithm [5]_ pp. 168 (also known as the truncated Newton method). It uses a CG method to the compute the search direction. See also *TNC* method for a box-constrained minimization with a similar algorithm. Suitable for large-scale problems. Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and Hessian; furthermore the Hessian is required to be positive definite. Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the Newton conjugate gradient trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and either the Hessian or a function that computes the product of the Hessian with a given vector. Suitable for large-scale problems. Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained minimization. This algorithm requires the gradient and either the Hessian or a function that computes the product of the Hessian with a given vector. Suitable for large-scale problems. On indefinite problems it requires usually less iterations than the `trust-ncg` method and is recommended for medium and large-scale problems. Method :ref:`trust-exact <optimize.minimize-trustexact>` is a trust-region method for unconstrained minimization in which quadratic subproblems are solved almost exactly [13]_. This algorithm requires the gradient and the Hessian (which is *not* required to be positive definite). It is, in many situations, the Newton method to converge in fewer iteraction and the most recommended for small and medium-size problems. **Bound-Constrained minimization** Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B algorithm [6]_, [7]_ for bound constrained minimization. Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton algorithm [5]_, [8]_ to minimize a function with variables subject to bounds. This algorithm uses gradient information; it is also called Newton Conjugate-Gradient. It differs from the *Newton-CG* method described above as it wraps a C implementation and allows each variable to be given upper and lower bounds. **Constrained Minimization** Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the Constrained Optimization BY Linear Approximation (COBYLA) method [9]_, [10]_, [11]_. The algorithm is based on linear approximations to the objective function and each constraint. The method wraps a FORTRAN implementation of the algorithm. The constraints functions 'fun' may return either a single number or an array or list of numbers. Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential Least SQuares Programming to minimize a function of several variables with any combination of bounds, equality and inequality constraints. The method wraps the SLSQP Optimization subroutine originally implemented by Dieter Kraft [12]_. Note that the wrapper handles infinite values in bounds by converting them into large floating values. Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a trust-region algorithm for constrained optimization. It swiches between two implementations depending on the problem definition. It is the most versatile constrained minimization algorithm implemented in SciPy and the most appropriate for large-scale problems. For equality constrained problems it is an implementation of Byrd-Omojokun Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When inequality constraints are imposed as well, it swiches to the trust-region interior point method described in [16]_. This interior point algorithm, in turn, solves inequality constraints by introducing slack variables and solving a sequence of equality-constrained barrier problems for progressively smaller values of the barrier parameter. The previously described equality constrained SQP method is used to solve the subproblems with increasing levels of accuracy as the iterate gets closer to a solution. **Finite-Difference Options** For Method :ref:`trust-constr <optimize.minimize-trustconstr>` the gradient and the Hessian may be approximated using three finite-difference schemes: {'2-point', '3-point', 'cs'}. The scheme 'cs' is, potentially, the most accurate but it requires the function to correctly handles complex inputs and to be differentiable in the complex plane. The scheme '3-point' is more accurate than '2-point' but requires twice as much operations. **Custom minimizers** It may be useful to pass a custom minimization method, for example when using a frontend to this method such as `scipy.optimize.basinhopping` or a different library. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, x0, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `callback`, `hess`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. Also, if `jac` has been passed as a bool type, `jac` and `fun` are mangled so that `fun` returns just the function values and `jac` is converted to a function returning the Jacobian. The method shall return an ``OptimizeResult`` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. .. versionadded:: 0.11.0 References ---------- .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function Minimization. The Computer Journal 7: 308-13. .. [2] Wright M H. 1996. Direct search methods: Once scorned, now respectable, in Numerical Analysis 1995: Proceedings of the 1995 Dundee Biennial Conference in Numerical Analysis (Eds. D F Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK. 191-208. .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of a function of several variables without calculating derivatives. The Computer Journal 7: 155-162. .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery. Numerical Recipes (any edition), Cambridge University Press. .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization. Springer New York. .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory Algorithm for Bound Constrained Optimization. SIAM Journal on Scientific and Statistical Computing 16 (5): 1190-1208. .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization. ACM Transactions on Mathematical Software 23 (4): 550-560. .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method. 1984. SIAM Journal of Numerical Analysis 21: 770-778. .. [9] Powell, M J D. A direct search optimization method that models the objective and constraint functions by linear interpolation. 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez and J-P Hennart, Kluwer Academic (Dordrecht), 51-67. .. [10] Powell M J D. Direct search algorithms for optimization calculations. 1998. Acta Numerica 7: 287-336. .. [11] Powell M J D. A view of algorithms for optimization without derivatives. 2007.Cambridge University Technical Report DAMTP 2007/NA03 .. [12] Kraft, D. A software package for sequential quadratic programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace Center -- Institute for Flight Mechanics, Koln, Germany. .. [13] Conn, A. R., Gould, N. I., and Toint, P. L. Trust region methods. 2000. Siam. pp. 169-200. .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free implementation of the GLTR method for iterative solution of the trust region problem", https://arxiv.org/abs/1611.04718 .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the Trust-Region Subproblem using the Lanczos Method", SIAM J. Optim., 9(2), 504--525, (1999). .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999. An interior point algorithm for large-scale nonlinear programming. SIAM Journal on Optimization 9.4: 877-900. .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the implementation of an algorithm for large-scale equality constrained optimization. SIAM Journal on Optimization 8.3: 682-706. Examples -------- Let us consider the problem of minimizing the Rosenbrock function. This function (and its respective derivatives) is implemented in `rosen` (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`. >>> from scipy.optimize import minimize, rosen, rosen_der A simple application of the *Nelder-Mead* method is: >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6) >>> res.x array([ 1., 1., 1., 1., 1.]) Now using the *BFGS* algorithm, using the first derivative and a few options: >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der, ... options={'gtol': 1e-6, 'disp': True}) Optimization terminated successfully. Current function value: 0.000000 Iterations: 26 Function evaluations: 31 Gradient evaluations: 31 >>> res.x array([ 1., 1., 1., 1., 1.]) >>> print(res.message) Optimization terminated successfully. >>> res.hess_inv array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269], [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151], [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ], [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]]) Next, consider a minimization problem with several constraints (namely Example 16.4 from [5]_). The objective function is: >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 There are three constraints defined as: >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) And variables must be positive, hence the following bounds: >>> bnds = ((0, None), (0, None)) The optimization problem is solved using the SLSQP method as: >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds, ... constraints=cons) It should converge to the theoretical solution (1.4 ,1.7). """ x0 = np.asarray(x0) if x0.dtype.kind in np.typecodes["AllInteger"]: x0 = np.asarray(x0, dtype=float) if not isinstance(args, tuple): args = (args,) if method is None: # Select automatically if constraints: method = 'SLSQP' elif bounds is not None: method = 'L-BFGS-B' else: method = 'BFGS' if callable(method): meth = "_custom" else: meth = method.lower() if options is None: options = {} # check if optional parameters are supported by the selected method # - jac if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac): warn('Method %s does not use gradient information (jac).' % method, RuntimeWarning) # - hess if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr', 'trust-krylov', 'trust-exact', '_custom') and hess is not None: warn('Method %s does not use Hessian information (hess).' % method, RuntimeWarning) # - hessp if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr', 'trust-krylov', '_custom') \ and hessp is not None: warn('Method %s does not use Hessian-vector product ' 'information (hessp).' % method, RuntimeWarning) # - constraints or bounds if (meth in ('nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'dogleg', 'trust-ncg') and (bounds is not None or np.any(constraints))): warn('Method %s cannot handle constraints nor bounds.' % method, RuntimeWarning) if meth in ('l-bfgs-b', 'tnc') and np.any(constraints): warn('Method %s cannot handle constraints.' % method, RuntimeWarning) if meth == 'cobyla' and bounds is not None: warn('Method %s cannot handle bounds.' % method, RuntimeWarning) # - callback if (meth in ('cobyla',) and callback is not None): warn('Method %s does not support callback.' % method, RuntimeWarning) # - return_all if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and options.get('return_all', False)): warn('Method %s does not support the return_all option.' % method, RuntimeWarning) # check gradient vector if meth == 'trust-constr': if type(jac) is bool: if jac: fun = MemoizeJac(fun) jac = fun.derivative else: jac = '2-point' elif not callable(jac) and jac not in ('2-point', '3-point', 'cs'): raise ValueError("Unsupported jac definition.") else: if jac in ('2-point', '3-point', 'cs'): if jac in ('3-point', 'cs'): warn("Only 'trust-constr' method accept %s " "options for 'jac'. Using '2-point' instead." % jac) jac = None elif not callable(jac): if bool(jac): fun = MemoizeJac(fun) jac = fun.derivative else: jac = None # set default tolerances if tol is not None: options = dict(options) if meth == 'nelder-mead': options.setdefault('xatol', tol) options.setdefault('fatol', tol) if meth in ('newton-cg', 'powell', 'tnc'): options.setdefault('xtol', tol) if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'): options.setdefault('ftol', tol) if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov'): options.setdefault('gtol', tol) if meth in ('cobyla', '_custom'): options.setdefault('tol', tol) if meth == 'trust-constr': options.setdefault('xtol', tol) options.setdefault('gtol', tol) options.setdefault('barrier_tol', tol) if bounds is not None: if meth == 'trust-constr': if not isinstance(bounds, Bounds): lb, ub = old_bound_to_new(bounds) bounds = Bounds(lb, ub) elif meth in ('l-bfgs-b', 'tnc', 'slsqp'): if isinstance(bounds, Bounds): bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0]) if meth == '_custom': return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, bounds=bounds, constraints=constraints, callback=callback, **options) elif meth == 'nelder-mead': return _minimize_neldermead(fun, x0, args, callback, **options) elif meth == 'powell': return _minimize_powell(fun, x0, args, callback, **options) elif meth == 'cg': return _minimize_cg(fun, x0, args, jac, callback, **options) elif meth == 'bfgs': return _minimize_bfgs(fun, x0, args, jac, callback, **options) elif meth == 'newton-cg': return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, **options) elif meth == 'l-bfgs-b': return _minimize_lbfgsb(fun, x0, args, jac, bounds, callback=callback, **options) elif meth == 'tnc': return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **options) elif meth == 'cobyla': return _minimize_cobyla(fun, x0, args, constraints, **options) elif meth == 'slsqp': return _minimize_slsqp(fun, x0, args, jac, bounds, constraints, callback=callback, **options) elif meth == 'trust-constr': return _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp, bounds, constraints, callback=callback, **options) elif meth == 'dogleg': return _minimize_dogleg(fun, x0, args, jac, hess, callback=callback, **options) elif meth == 'trust-ncg': return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp, callback=callback, **options) elif meth == 'trust-krylov': return _minimize_trust_krylov(fun, x0, args, jac, hess, hessp, callback=callback, **options) elif meth == 'trust-exact': return _minimize_trustregion_exact(fun, x0, args, jac, hess, callback=callback, **options) else: raise ValueError('Unknown solver %s' % method) def minimize_scalar(fun, bracket=None, bounds=None, args=(), method='brent', tol=None, options=None): """Minimization of scalar function of one variable. Parameters ---------- fun : callable Objective function. Scalar function, must return a scalar. bracket : sequence, optional For methods 'brent' and 'golden', `bracket` defines the bracketing interval and can either have three items ``(a, b, c)`` so that ``a < b < c`` and ``fun(b) < fun(a), fun(c)`` or two items ``a`` and ``c`` which are assumed to be a starting interval for a downhill bracket search (see `bracket`); it doesn't always mean that the obtained solution will satisfy ``a <= x <= c``. bounds : sequence, optional For method 'bounded', `bounds` is mandatory and must have two items corresponding to the optimization bounds. args : tuple, optional Extra arguments passed to the objective function. method : str or callable, optional Type of solver. Should be one of: - 'Brent' :ref:`(see here) <optimize.minimize_scalar-brent>` - 'Bounded' :ref:`(see here) <optimize.minimize_scalar-bounded>` - 'Golden' :ref:`(see here) <optimize.minimize_scalar-golden>` - custom - a callable object (added in version 0.14.0), see below tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. See :func:`show_options()` for solver-specific options. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize : Interface to minimization algorithms for scalar multivariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *Brent*. Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's algorithm to find a local minimum. The algorithm uses inverse parabolic interpolation when possible to speed up convergence of the golden section method. Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the golden section search technique. It uses analog of the bisection method to decrease the bracketed interval. It is usually preferable to use the *Brent* method. Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can perform bounded minimization. It uses the Brent method to find a local minimum in the interval x1 < xopt < x2. **Custom minimizers** It may be useful to pass a custom minimization method, for example when using some library frontend to minimize_scalar. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `bracket`, `tol`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. The method shall return an ``OptimizeResult`` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. .. versionadded:: 0.11.0 Examples -------- Consider the problem of minimizing the following function. >>> def f(x): ... return (x - 2) * x * (x + 2)**2 Using the *Brent* method, we find the local minimum as: >>> from scipy.optimize import minimize_scalar >>> res = minimize_scalar(f) >>> res.x 1.28077640403 Using the *Bounded* method, we find a local minimum with specified bounds as: >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded') >>> res.x -2.0000002026 """ if not isinstance(args, tuple): args = (args,) if callable(method): meth = "_custom" else: meth = method.lower() if options is None: options = {} if tol is not None: options = dict(options) if meth == 'bounded' and 'xatol' not in options: warn("Method 'bounded' does not support relative tolerance in x; " "defaulting to absolute tolerance.", RuntimeWarning) options['xatol'] = tol elif meth == '_custom': options.setdefault('tol', tol) else: options.setdefault('xtol', tol) if meth == '_custom': return method(fun, args=args, bracket=bracket, bounds=bounds, **options) elif meth == 'brent': return _minimize_scalar_brent(fun, bracket, args, **options) elif meth == 'bounded': if bounds is None: raise ValueError('The `bounds` parameter is mandatory for ' 'method `bounded`.') # replace boolean "disp" option, if specified, by an integer value, as # expected by _minimize_scalar_bounded() disp = options.get('disp') if isinstance(disp, bool): options['disp'] = 2 * int(disp) return _minimize_scalar_bounded(fun, bounds, args, **options) elif meth == 'golden': return _minimize_scalar_golden(fun, bracket, args, **options) else: raise ValueError('Unknown solver %s' % method)
35,095
43.538071
89
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_linprog_ip.py
""" An interior-point method for linear programming. """ # Author: Matt Haberland from __future__ import print_function, division, absolute_import import numpy as np import scipy as sp import scipy.sparse as sps from warnings import warn from scipy.linalg import LinAlgError from .optimize import OptimizeResult, OptimizeWarning, _check_unknown_options from scipy.optimize._remove_redundancy import _remove_redundancy from scipy.optimize._remove_redundancy import _remove_redundancy_sparse from scipy.optimize._remove_redundancy import _remove_redundancy_dense def _clean_inputs( c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): """ Given user inputs for a linear programming problem, return the objective vector, upper bound constraints, equality constraints, and simple bounds in a preferred format. Parameters ---------- c : array_like Coefficients of the linear objective function to be minimized. A_ub : array_like, optional 2-D array which, when matrix-multiplied by ``x``, gives the values of the upper-bound inequality constraints at ``x``. b_ub : array_like, optional 1-D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. A_eq : array_like, optional 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. b_eq : array_like, optional 1-D array of values representing the RHS of each equality constraint (row) in ``A_eq``. bounds : sequence, optional ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for one of ``min`` or ``max`` when there is no bound in that direction. By default bounds are ``(0, None)`` (non-negative) If a sequence containing a single tuple is provided, then ``min`` and ``max`` will be applied to all variables in the problem. Returns ------- c : 1-D array Coefficients of the linear objective function to be minimized. A_ub : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the upper-bound inequality constraints at ``x``. b_ub : 1-D array 1-D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. A_eq : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. b_eq : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in ``A_eq``. bounds : sequence of tuples ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for each of ``min`` or ``max`` when there is no bound in that direction. By default bounds are ``(0, None)`` (non-negative) """ try: if c is None: raise TypeError try: c = np.asarray(c, dtype=float).copy().squeeze() except BaseException: # typically a ValueError and shouldn't be, IMO raise TypeError if c.size == 1: c = c.reshape((-1)) n_x = len(c) if n_x == 0 or len(c.shape) != 1: raise ValueError( "Invalid input for linprog: c should be a 1D array; it must " "not have more than one non-singleton dimension") if not(np.isfinite(c).all()): raise ValueError( "Invalid input for linprog: c must not contain values " "inf, nan, or None") except TypeError: raise TypeError( "Invalid input for linprog: c must be a 1D array of numerical " "coefficients") try: try: if sps.issparse(A_eq) or sps.issparse(A_ub): A_ub = sps.coo_matrix( (0, n_x), dtype=float) if A_ub is None else sps.coo_matrix( A_ub, dtype=float).copy() else: A_ub = np.zeros( (0, n_x), dtype=float) if A_ub is None else np.asarray( A_ub, dtype=float).copy() except BaseException: raise TypeError n_ub = A_ub.shape[0] if len(A_ub.shape) != 2 or A_ub.shape[1] != len(c): raise ValueError( "Invalid input for linprog: A_ub must have exactly two " "dimensions, and the number of columns in A_ub must be " "equal to the size of c ") if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all() or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()): raise ValueError( "Invalid input for linprog: A_ub must not contain values " "inf, nan, or None") except TypeError: raise TypeError( "Invalid input for linprog: A_ub must be a numerical 2D array " "with each row representing an upper bound inequality constraint") try: try: b_ub = np.array( [], dtype=float) if b_ub is None else np.asarray( b_ub, dtype=float).copy().squeeze() except BaseException: raise TypeError if b_ub.size == 1: b_ub = b_ub.reshape((-1)) if len(b_ub.shape) != 1: raise ValueError( "Invalid input for linprog: b_ub should be a 1D array; it " "must not have more than one non-singleton dimension") if len(b_ub) != n_ub: raise ValueError( "Invalid input for linprog: The number of rows in A_ub must " "be equal to the number of values in b_ub") if not(np.isfinite(b_ub).all()): raise ValueError( "Invalid input for linprog: b_ub must not contain values " "inf, nan, or None") except TypeError: raise TypeError( "Invalid input for linprog: b_ub must be a 1D array of " "numerical values, each representing the upper bound of an " "inequality constraint (row) in A_ub") try: try: if sps.issparse(A_eq) or sps.issparse(A_ub): A_eq = sps.coo_matrix( (0, n_x), dtype=float) if A_eq is None else sps.coo_matrix( A_eq, dtype=float).copy() else: A_eq = np.zeros( (0, n_x), dtype=float) if A_eq is None else np.asarray( A_eq, dtype=float).copy() except BaseException: raise TypeError n_eq = A_eq.shape[0] if len(A_eq.shape) != 2 or A_eq.shape[1] != len(c): raise ValueError( "Invalid input for linprog: A_eq must have exactly two " "dimensions, and the number of columns in A_eq must be " "equal to the size of c ") if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all() or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()): raise ValueError( "Invalid input for linprog: A_eq must not contain values " "inf, nan, or None") except TypeError: raise TypeError( "Invalid input for linprog: A_eq must be a 2D array with each " "row representing an equality constraint") try: try: b_eq = np.array( [], dtype=float) if b_eq is None else np.asarray( b_eq, dtype=float).copy().squeeze() except BaseException: raise TypeError if b_eq.size == 1: b_eq = b_eq.reshape((-1)) if len(b_eq.shape) != 1: raise ValueError( "Invalid input for linprog: b_eq should be a 1D array; it " "must not have more than one non-singleton dimension") if len(b_eq) != n_eq: raise ValueError( "Invalid input for linprog: the number of rows in A_eq " "must be equal to the number of values in b_eq") if not(np.isfinite(b_eq).all()): raise ValueError( "Invalid input for linprog: b_eq must not contain values " "inf, nan, or None") except TypeError: raise TypeError( "Invalid input for linprog: b_eq must be a 1D array of " "numerical values, each representing the right hand side of an " "equality constraints (row) in A_eq") # "If a sequence containing a single tuple is provided, then min and max # will be applied to all variables in the problem." # linprog doesn't treat this right: it didn't accept a list with one tuple # in it try: if isinstance(bounds, str): raise TypeError if bounds is None or len(bounds) == 0: bounds = [(0, None)] * n_x elif len(bounds) == 1: b = bounds[0] if len(b) != 2: raise ValueError( "Invalid input for linprog: exactly one lower bound and " "one upper bound must be specified for each element of x") bounds = [b] * n_x elif len(bounds) == n_x: try: len(bounds[0]) except BaseException: bounds = [(bounds[0], bounds[1])] * n_x for i, b in enumerate(bounds): if len(b) != 2: raise ValueError( "Invalid input for linprog, bound " + str(i) + " " + str(b) + ": exactly one lower bound and one upper bound must " "be specified for each element of x") elif (len(bounds) == 2 and np.isreal(bounds[0]) and np.isreal(bounds[1])): bounds = [(bounds[0], bounds[1])] * n_x else: raise ValueError( "Invalid input for linprog: exactly one lower bound and one " "upper bound must be specified for each element of x") clean_bounds = [] # also creates a copy so user's object isn't changed for i, b in enumerate(bounds): if b[0] is not None and b[1] is not None and b[0] > b[1]: raise ValueError( "Invalid input for linprog, bound " + str(i) + " " + str(b) + ": a lower bound must be less than or equal to the " "corresponding upper bound") if b[0] == np.inf: raise ValueError( "Invalid input for linprog, bound " + str(i) + " " + str(b) + ": infinity is not a valid lower bound") if b[1] == -np.inf: raise ValueError( "Invalid input for linprog, bound " + str(i) + " " + str(b) + ": negative infinity is not a valid upper bound") lb = float(b[0]) if b[0] is not None and b[0] != -np.inf else None ub = float(b[1]) if b[1] is not None and b[1] != np.inf else None clean_bounds.append((lb, ub)) bounds = clean_bounds except ValueError as e: if "could not convert string to float" in e.args[0]: raise TypeError else: raise e except TypeError as e: print(e) raise TypeError( "Invalid input for linprog: bounds must be a sequence of " "(min,max) pairs, each defining bounds on an element of x ") return c, A_ub, b_ub, A_eq, b_eq, bounds def _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, rr): """ Given inputs for a linear programming problem in preferred format, presolve the problem: identify trivial infeasibilities, redundancies, and unboundedness, tighten bounds where possible, and eliminate fixed variables. Parameters ---------- c : 1-D array Coefficients of the linear objective function to be minimized. A_ub : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the upper-bound inequality constraints at ``x``. b_ub : 1-D array 1-D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. A_eq : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. b_eq : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in ``A_eq``. bounds : sequence of tuples ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for each of ``min`` or ``max`` when there is no bound in that direction. Returns ------- c : 1-D array Coefficients of the linear objective function to be minimized. c0 : 1-D array Constant term in objective function due to fixed (and eliminated) variables. A_ub : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the upper-bound inequality constraints at ``x``. Unnecessary rows/columns have been removed. b_ub : 1-D array 1-D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. Unnecessary elements have been removed. A_eq : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. Unnecessary rows/columns have been removed. b_eq : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in ``A_eq``. Unnecessary elements have been removed. bounds : sequence of tuples ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for each of ``min`` or ``max`` when there is no bound in that direction. Bounds have been tightened where possible. x : 1-D array Solution vector (when the solution is trivial and can be determined in presolve) undo: list of tuples (index, value) pairs that record the original index and fixed value for each variable removed from the problem complete: bool Whether the solution is complete (solved or determined to be infeasible or unbounded in presolve) status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded message : str A string descriptor of the exit status of the optimization. References ---------- .. [2] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. .. [5] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear programming." Mathematical Programming 71.2 (1995): 221-245. """ # ideas from Reference [5] by Andersen and Andersen # however, unlike the reference, this is performed before converting # problem to standard form # There are a few advantages: # * artificial variables have not been added, so matrices are smaller # * bounds have not been converted to constraints yet. (It is better to # do that after presolve because presolve may adjust the simple bounds.) # There are many improvements that can be made, namely: # * implement remaining checks from [5] # * loop presolve until no additional changes are made # * implement additional efficiency improvements in redundancy removal [2] tol = 1e-9 # tolerance for equality. should this be exposed to user? undo = [] # record of variables eliminated from problem # constant term in cost function may be added if variables are eliminated c0 = 0 complete = False # complete is True if detected infeasible/unbounded x = np.zeros(c.shape) # this is solution vector if completed in presolve status = 0 # all OK unless determined otherwise message = "" # Standard form for bounds (from _clean_inputs) is list of tuples # but numpy array is more convenient here # In retrospect, numpy array should have been the standard bounds = np.array(bounds) lb = bounds[:, 0] ub = bounds[:, 1] lb[np.equal(lb, None)] = -np.inf ub[np.equal(ub, None)] = np.inf bounds = bounds.astype(float) lb = lb.astype(float) ub = ub.astype(float) m_eq, n = A_eq.shape m_ub, n = A_ub.shape if (sps.issparse(A_eq)): A_eq = A_eq.tolil() A_ub = A_ub.tolil() def where(A): return A.nonzero() vstack = sps.vstack else: where = np.where vstack = np.vstack # zero row in equality constraints zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten() if np.any(zero_row): if np.any( np.logical_and( zero_row, np.abs(b_eq) > tol)): # test_zero_row_1 # infeasible if RHS is not zero status = 2 message = ("The problem is (trivially) infeasible due to a row " "of zeros in the equality constraint matrix with a " "nonzero corresponding constraint value.") complete = True return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) else: # test_zero_row_2 # if RHS is zero, we can eliminate this equation entirely A_eq = A_eq[np.logical_not(zero_row), :] b_eq = b_eq[np.logical_not(zero_row)] # zero row in inequality constraints zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten() if np.any(zero_row): if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1 # infeasible if RHS is less than zero (because LHS is zero) status = 2 message = ("The problem is (trivially) infeasible due to a row " "of zeros in the equality constraint matrix with a " "nonzero corresponding constraint value.") complete = True return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) else: # test_zero_row_2 # if LHS is >= 0, we can eliminate this constraint entirely A_ub = A_ub[np.logical_not(zero_row), :] b_ub = b_ub[np.logical_not(zero_row)] # zero column in (both) constraints # this indicates that a variable isn't constrained and can be removed A = vstack((A_eq, A_ub)) if A.shape[0] > 0: zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten() # variable will be at upper or lower bound, depending on objective x[np.logical_and(zero_col, c < 0)] = ub[ np.logical_and(zero_col, c < 0)] x[np.logical_and(zero_col, c > 0)] = lb[ np.logical_and(zero_col, c > 0)] if np.any(np.isinf(x)): # if an unconstrained variable has no bound status = 3 message = ("If feasible, the problem is (trivially) unbounded " "due to a zero column in the constraint matrices. If " "you wish to check whether the problem is infeasible, " "turn presolve off.") complete = True return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) # variables will equal upper/lower bounds will be removed later lb[np.logical_and(zero_col, c < 0)] = ub[ np.logical_and(zero_col, c < 0)] ub[np.logical_and(zero_col, c > 0)] = lb[ np.logical_and(zero_col, c > 0)] # row singleton in equality constraints # this fixes a variable and removes the constraint singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten() rows = where(singleton_row)[0] cols = where(A_eq[rows, :])[1] if len(rows) > 0: for row, col in zip(rows, cols): val = b_eq[row] / A_eq[row, col] if not lb[col] - tol <= val <= ub[col] + tol: # infeasible if fixed value is not within bounds status = 2 message = ("The problem is (trivially) infeasible because a " "singleton row in the equality constraints is " "inconsistent with the bounds.") complete = True return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) else: # sets upper and lower bounds at that fixed value - variable # will be removed later lb[col] = val ub[col] = val A_eq = A_eq[np.logical_not(singleton_row), :] b_eq = b_eq[np.logical_not(singleton_row)] # row singleton in inequality constraints # this indicates a simple bound and the constraint can be removed # simple bounds may be adjusted here # After all of the simple bound information is combined here, get_Abc will # turn the simple bounds into constraints singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten() cols = where(A_ub[singleton_row, :])[1] rows = where(singleton_row)[0] if len(rows) > 0: for row, col in zip(rows, cols): val = b_ub[row] / A_ub[row, col] if A_ub[row, col] > 0: # upper bound if val < lb[col] - tol: # infeasible complete = True elif val < ub[col]: # new upper bound ub[col] = val else: # lower bound if val > ub[col] + tol: # infeasible complete = True elif val > lb[col]: # new lower bound lb[col] = val if complete: status = 2 message = ("The problem is (trivially) infeasible because a " "singleton row in the upper bound constraints is " "inconsistent with the bounds.") return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) A_ub = A_ub[np.logical_not(singleton_row), :] b_ub = b_ub[np.logical_not(singleton_row)] # identical bounds indicate that variable can be removed i_f = np.abs(lb - ub) < tol # indices of "fixed" variables i_nf = np.logical_not(i_f) # indices of "not fixed" variables # test_bounds_equal_but_infeasible if np.all(i_f): # if bounds define solution, check for consistency residual = b_eq - A_eq.dot(lb) slack = b_ub - A_ub.dot(lb) if ((A_ub.size > 0 and np.any(slack < 0)) or (A_eq.size > 0 and not np.allclose(residual, 0))): status = 2 message = ("The problem is (trivially) infeasible because the " "bounds fix all variables to values inconsistent with " "the constraints") complete = True return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) ub_mod = ub lb_mod = lb if np.any(i_f): c0 += c[i_f].dot(lb[i_f]) b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f]) b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f]) c = c[i_nf] x = x[i_nf] A_eq = A_eq[:, i_nf] A_ub = A_ub[:, i_nf] # record of variables to be added back in undo = [np.where(i_f)[0], lb[i_f]] # don't remove these entries from bounds; they'll be used later. # but we _also_ need a version of the bounds with these removed lb_mod = lb[i_nf] ub_mod = ub[i_nf] # no constraints indicates that problem is trivial if A_eq.size == 0 and A_ub.size == 0: b_eq = np.array([]) b_ub = np.array([]) # test_empty_constraint_1 if c.size == 0: status = 0 message = ("The solution was determined in presolve as there are " "no non-trivial constraints.") elif (np.any(np.logical_and(c < 0, ub == np.inf)) or np.any(np.logical_and(c > 0, lb == -np.inf))): # test_no_constraints() status = 3 message = ("If feasible, the problem is (trivially) unbounded " "because there are no constraints and at least one " "element of c is negative. If you wish to check " "whether the problem is infeasible, turn presolve " "off.") else: # test_empty_constraint_2 status = 0 message = ("The solution was determined in presolve as there are " "no non-trivial constraints.") complete = True x[c < 0] = ub_mod[c < 0] x[c > 0] = lb_mod[c > 0] # if this is not the last step of presolve, should convert bounds back # to array and return here # *sigh* - convert bounds back to their standard form (list of tuples) # again, in retrospect, numpy array would be standard form lb[np.equal(lb, -np.inf)] = None ub[np.equal(ub, np.inf)] = None bounds = np.hstack((lb[:, np.newaxis], ub[:, np.newaxis])) bounds = bounds.tolist() for i, row in enumerate(bounds): for j, col in enumerate(row): if str( col) == "nan": # comparing col to float("nan") and # np.nan doesn't work. should use np.isnan bounds[i][j] = None # remove redundant (linearly dependent) rows from equality constraints n_rows_A = A_eq.shape[0] redundancy_warning = ("A_eq does not appear to be of full row rank. To " "improve performance, check the problem formulation " "for redundant equality constraints.") if (sps.issparse(A_eq)): if rr and A_eq.size > 0: # TODO: Fast sparse rank check? A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq) if A_eq.shape[0] < n_rows_A: warn(redundancy_warning, OptimizeWarning) if status != 0: complete = True return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) # This is a wild guess for which redundancy removal algorithm will be # faster. More testing would be good. small_nullspace = 5 if rr and A_eq.size > 0: try: # TODO: instead use results of first SVD in _remove_redundancy rank = np.linalg.matrix_rank(A_eq) except: # oh well, we'll have to go with _remove_redundancy_dense rank = 0 if rr and A_eq.size > 0 and rank < A_eq.shape[0]: warn(redundancy_warning, OptimizeWarning) dim_row_nullspace = A_eq.shape[0]-rank if dim_row_nullspace <= small_nullspace: A_eq, b_eq, status, message = _remove_redundancy(A_eq, b_eq) if dim_row_nullspace > small_nullspace or status == 4: A_eq, b_eq, status, message = _remove_redundancy_dense(A_eq, b_eq) if A_eq.shape[0] < rank: message = ("Due to numerical issues, redundant equality " "constraints could not be removed automatically. " "Try providing your constraint matrices as sparse " "matrices to activate sparse presolve, try turning " "off redundancy removal, or try turning off presolve " "altogether.") status = 4 if status != 0: complete = True return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) def _get_Abc( c, c0=0, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, undo=[]): """ Given a linear programming problem of the form: minimize: c^T * x subject to: A_ub * x <= b_ub A_eq * x == b_eq bounds[i][0] < x_i < bounds[i][1] return the problem in standard form: minimize: c'^T * x' subject to: A * x' == b 0 < x' < oo by adding slack variables and making variable substitutions as necessary. Parameters ---------- c : 1-D array Coefficients of the linear objective function to be minimized. Components corresponding with fixed variables have been eliminated. c0 : float Constant term in objective function due to fixed (and eliminated) variables. A_ub : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the upper-bound inequality constraints at ``x``. Unnecessary rows/columns have been removed. b_ub : 1-D array 1-D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. Unnecessary elements have been removed. A_eq : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. Unnecessary rows/columns have been removed. b_eq : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in ``A_eq``. Unnecessary elements have been removed. bounds : sequence of tuples ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for each of ``min`` or ``max`` when there is no bound in that direction. Bounds have been tightened where possible. undo: list of tuples (`index`, `value`) pairs that record the original index and fixed value for each variable removed from the problem Returns ------- A : 2-D array 2-D array which, when matrix-multiplied by x, gives the values of the equality constraints at x (for standard form problem). b : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in A (for standard form problem). c : 1-D array Coefficients of the linear objective function to be minimized (for standard form problem). c0 : float Constant term in objective function due to fixed (and eliminated) variables. References ---------- .. [6] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. """ if sps.issparse(A_eq): sparse = True A_eq = sps.lil_matrix(A_eq) A_ub = sps.lil_matrix(A_ub) def hstack(blocks): return sps.hstack(blocks, format="lil") def vstack(blocks): return sps.vstack(blocks, format="lil") zeros = sps.lil_matrix eye = sps.eye else: sparse = False hstack = np.hstack vstack = np.vstack zeros = np.zeros eye = np.eye fixed_x = set() if len(undo) > 0: # these are indices of variables removed from the problem # however, their bounds are still part of the bounds list fixed_x = set(undo[0]) # they are needed elsewhere, but not here bounds = [bounds[i] for i in range(len(bounds)) if i not in fixed_x] # in retrospect, the standard form of bounds should have been an n x 2 # array. maybe change it someday. # modify problem such that all variables have only non-negativity bounds bounds = np.array(bounds) lbs = bounds[:, 0] ubs = bounds[:, 1] m_ub, n_ub = A_ub.shape lb_none = np.equal(lbs, None) ub_none = np.equal(ubs, None) lb_some = np.logical_not(lb_none) ub_some = np.logical_not(ub_none) # if preprocessing is on, lb == ub can't happen # if preprocessing is off, then it would be best to convert that # to an equality constraint, but it's tricky to make the other # required modifications from inside here. # unbounded below: substitute xi = -xi' (unbounded above) l_nolb_someub = np.logical_and(lb_none, ub_some) i_nolb = np.where(l_nolb_someub)[0] lbs[l_nolb_someub], ubs[l_nolb_someub] = ( -ubs[l_nolb_someub], lbs[l_nolb_someub]) lb_none = np.equal(lbs, None) ub_none = np.equal(ubs, None) lb_some = np.logical_not(lb_none) ub_some = np.logical_not(ub_none) c[i_nolb] *= -1 if len(i_nolb) > 0: if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird A_ub[:, i_nolb] *= -1 if A_eq.shape[0] > 0: A_eq[:, i_nolb] *= -1 # upper bound: add inequality constraint i_newub = np.where(ub_some)[0] ub_newub = ubs[ub_some] n_bounds = np.count_nonzero(ub_some) A_ub = vstack((A_ub, zeros((n_bounds, A_ub.shape[1])))) b_ub = np.concatenate((b_ub, np.zeros(n_bounds))) A_ub[range(m_ub, A_ub.shape[0]), i_newub] = 1 b_ub[m_ub:] = ub_newub A1 = vstack((A_ub, A_eq)) b = np.concatenate((b_ub, b_eq)) c = np.concatenate((c, np.zeros((A_ub.shape[0],)))) # unbounded: substitute xi = xi+ + xi- l_free = np.logical_and(lb_none, ub_none) i_free = np.where(l_free)[0] n_free = len(i_free) A1 = hstack((A1, zeros((A1.shape[0], n_free)))) c = np.concatenate((c, np.zeros(n_free))) A1[:, range(n_ub, A1.shape[1])] = -A1[:, i_free] c[np.arange(n_ub, A1.shape[1])] = -c[i_free] # add slack variables A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))]) A = hstack([A1, A2]) # lower bound: substitute xi = xi' + lb # now there is a constant term in objective i_shift = np.where(lb_some)[0] lb_shift = lbs[lb_some].astype(float) c0 += np.sum(lb_shift * c[i_shift]) if sparse: b = b.reshape(-1, 1) A = A.tocsc() b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1) b = b.ravel() else: b -= (A[:, i_shift] * lb_shift).sum(axis=1) return A, b, c, c0 def _postprocess( x, c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, complete=False, undo=[], status=0, message="", tol=1e-8): """ Given solution x to presolved, standard form linear program x, add fixed variables back into the problem and undo the variable substitutions to get solution to original linear program. Also, calculate the objective function value, slack in original upper bound constraints, and residuals in original equality constraints. Parameters ---------- x : 1-D array Solution vector to the standard-form problem. c : 1-D array Original coefficients of the linear objective function to be minimized. A_ub : 2-D array Original upper bound constraint matrix. b_ub : 1-D array Original upper bound constraint vector. A_eq : 2-D array Original equality constraint matrix. b_eq : 1-D array Original equality constraint vector. bounds : sequence of tuples Bounds, as modified in presolve complete : bool Whether the solution is was determined in presolve (``True`` if so) undo: list of tuples (`index`, `value`) pairs that record the original index and fixed value for each variable removed from the problem status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered message : str A string descriptor of the exit status of the optimization. tol : float Termination tolerance; see [1]_ Section 4.5. Returns ------- x : 1-D array Solution vector to original linear programming problem fun: float optimal objective value for original problem slack: 1-D array The (non-negative) slack in the upper bound constraints, that is, ``b_ub - A_ub * x`` con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq * x`` status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered message : str A string descriptor of the exit status of the optimization. """ # note that all the inputs are the ORIGINAL, unmodified versions # no rows, columns have been removed # the only exception is bounds; it has been modified # we need these modified values to undo the variable substitutions # in retrospect, perhaps this could have been simplified if the "undo" # variable also contained information for undoing variable substitutions n_x = len(c) # we don't have to undo variable substitutions for fixed variables that # were removed from the problem no_adjust = set() # if there were variables removed from the problem, add them back into the # solution vector if len(undo) > 0: no_adjust = set(undo[0]) x = x.tolist() for i, val in zip(undo[0], undo[1]): x.insert(i, val) x = np.array(x) # now undo variable substitutions # if "complete", problem was solved in presolve; don't do anything here if not complete and bounds is not None: # bounds are never none, probably n_unbounded = 0 for i, b in enumerate(bounds): if i in no_adjust: continue lb, ub = b if lb is None and ub is None: n_unbounded += 1 x[i] = x[i] - x[n_x + n_unbounded - 1] else: if lb is None: x[i] = ub - x[i] else: x[i] += lb n_x = len(c) x = x[:n_x] # all the rest of the variables were artificial fun = x.dot(c) slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints # report residuals of ORIGINAL EQ constraints con = b_eq - A_eq.dot(x) # Patch for bug #8664. Detecting this sort of issue earlier # (via abnormalities in the indicators) would be better. bounds = np.array(bounds) # again, this should have been the standard form lb = bounds[:, 0] ub = bounds[:, 1] lb[np.equal(lb, None)] = -np.inf ub[np.equal(ub, None)] = np.inf tol = np.sqrt(tol) # Somewhat arbitrary, but status 5 is very unusual if status == 0 and ((slack < -tol).any() or (np.abs(con) > tol).any() or (x < lb - tol).any() or (x > ub + tol).any()): status = 4 message = ("The solution does not satisfy the constraints, yet " "no errors were raised and there is no certificate of " "infeasibility or unboundedness. This is known to occur " "if the `presolve` option is False and the problem is " "infeasible. If you uncounter this under different " "circumstances, please submit a bug report. Otherwise, " "please enable presolve.") elif status == 0 and (np.isnan(x).any() or np.isnan(fun) or np.isnan(slack).any() or np.isnan(con).any()): status = 4 message = ("Numerical difficulties were encountered but no errors " "were raised. This is known to occur if the 'presolve' " "option is False, 'sparse' is True, and A_eq includes " "redundant rows. If you encounter this under different " "circumstances, please submit a bug report. Otherwise, " "remove linearly dependent equations from your equality " "constraints or enable presolve.") return x, fun, slack, con, status, message def _get_solver(sparse=False, lstsq=False, sym_pos=True, cholesky=True): """ Given solver options, return a handle to the appropriate linear system solver. Parameters ---------- sparse : bool True if the system to be solved is sparse. This is typically set True when the original ``A_ub`` and ``A_eq`` arrays are sparse. lstsq : bool True if the system is ill-conditioned and/or (nearly) singular and thus a more robust least-squares solver is desired. This is sometimes needed as the solution is approached. sym_pos : bool True if the system matrix is symmetric positive definite Sometimes this needs to be set false as the solution is approached, even when the system should be symmetric positive definite, due to numerical difficulties. cholesky : bool True if the system is to be solved by Cholesky, rather than LU, decomposition. This is typically faster unless the problem is very small or prone to numerical difficulties. Returns ------- solve : function Handle to the appropriate solver function """ if sparse: if lstsq or not(sym_pos): def solve(M, r, sym_pos=False): return sps.linalg.lsqr(M, r)[0] else: # this is not currently used; it is replaced by splu solve # TODO: expose use of this as an option def solve(M, r): return sps.linalg.spsolve(M, r, permc_spec="MMD_AT_PLUS_A") else: if lstsq: # sometimes necessary as solution is approached def solve(M, r): return sp.linalg.lstsq(M, r)[0] elif cholesky: solve = sp.linalg.cho_solve else: # this seems to cache the matrix factorization, so solving # with multiple right hand sides is much faster def solve(M, r, sym_pos=sym_pos): return sp.linalg.solve(M, r, sym_pos=sym_pos) return solve def _get_delta( A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False, lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False, permc_spec='MMD_AT_PLUS_A'): """ Given standard form problem defined by ``A``, ``b``, and ``c``; current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``; algorithmic parameters ``gamma and ``eta; and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc`` (predictor-corrector), and ``ip`` (initial point improvement), get the search direction for increments to the variable estimates. Parameters ---------- As defined in [1], except: sparse : bool True if the system to be solved is sparse. This is typically set True when the original ``A_ub`` and ``A_eq`` arrays are sparse. lstsq : bool True if the system is ill-conditioned and/or (nearly) singular and thus a more robust least-squares solver is desired. This is sometimes needed as the solution is approached. sym_pos : bool True if the system matrix is symmetric positive definite Sometimes this needs to be set false as the solution is approached, even when the system should be symmetric positive definite, due to numerical difficulties. cholesky : bool True if the system is to be solved by Cholesky, rather than LU, decomposition. This is typically faster unless the problem is very small or prone to numerical difficulties. pc : bool True if the predictor-corrector method of Mehrota is to be used. This is almost always (if not always) beneficial. Even though it requires the solution of an additional linear system, the factorization is typically (implicitly) reused so solution is efficient, and the number of algorithm iterations is typically reduced. ip : bool True if the improved initial point suggestion due to [1] section 4.3 is desired. It's unclear whether this is beneficial. permc_spec : str (default = 'MMD_AT_PLUS_A') (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = True``.) A matrix is factorized in each iteration of the algorithm. This option specifies how to permute the columns of the matrix for sparsity preservation. Acceptable values are: - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering. This option can impact the convergence of the interior point algorithm; test different values to determine which performs best for your problem. For more information, refer to ``scipy.sparse.linalg.splu``. Returns ------- Search directions as defined in [1] References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ if A.shape[0] == 0: # If there are no constraints, some solvers fail (understandably) # rather than returning empty solution. This gets the job done. sparse, lstsq, sym_pos, cholesky = False, False, True, False solve = _get_solver(sparse, lstsq, sym_pos, cholesky) n_x = len(x) # [1] Equation 8.8 r_P = b * tau - A.dot(x) r_D = c * tau - A.T.dot(y) - z r_G = c.dot(x) - b.transpose().dot(y) + kappa mu = (x.dot(z) + tau * kappa) / (n_x + 1) # Assemble M from [1] Equation 8.31 Dinv = x / z splu = False if sparse and not lstsq: # sparse requires Dinv to be diag matrix M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T)) try: # TODO: should use linalg.factorized instead, but I don't have # umfpack and therefore cannot test its performance solve = sps.linalg.splu(M, permc_spec=permc_spec).solve splu = True except: lstsq = True solve = _get_solver(sparse, lstsq, sym_pos, cholesky) else: # dense does not; use broadcasting M = A.dot(Dinv.reshape(-1, 1) * A.T) # For some small problems, calling sp.linalg.solve w/ sym_pos = True # may be faster. I am pretty certain it caches the factorization for # multiple uses and checks the incoming matrix to see if it's the same as # the one it already factorized. (I can't explain the speed otherwise.) if cholesky: try: L = sp.linalg.cho_factor(M) except: cholesky = False solve = _get_solver(sparse, lstsq, sym_pos, cholesky) # pc: "predictor-corrector" [1] Section 4.1 # In development this option could be turned off # but it always seems to improve performance substantially n_corrections = 1 if pc else 0 i = 0 alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0 while i <= n_corrections: # Reference [1] Eq. 8.6 rhatp = eta(gamma) * r_P rhatd = eta(gamma) * r_D rhatg = np.array(eta(gamma) * r_G).reshape((1,)) # Reference [1] Eq. 8.7 rhatxs = gamma * mu - x * z rhattk = np.array(gamma * mu - tau * kappa).reshape((1,)) if i == 1: if ip: # if the correction is to get "initial point" # Reference [1] Eq. 8.23 rhatxs = ((1 - alpha) * gamma * mu - x * z - alpha**2 * d_x * d_z) rhattk = np.array( (1 - alpha) * gamma * mu - tau * kappa - alpha**2 * d_tau * d_kappa).reshape( (1, )) else: # if the correction is for "predictor-corrector" # Reference [1] Eq. 8.13 rhatxs -= d_x * d_z rhattk -= d_tau * d_kappa # sometimes numerical difficulties arise as the solution is approached # this loop tries to solve the equations using a sequence of functions # for solve. For dense systems, the order is: # 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve, # 2. scipy.linalg.solve w/ sym_pos = True, # 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails # 4. scipy.linalg.lstsq # For sparse systems, the order is: # 1. scipy.sparse.linalg.splu # 2. scipy.sparse.linalg.lsqr # TODO: if umfpack is installed, use factorized instead of splu. # Can't do that now because factorized doesn't pass permc_spec # to splu if umfpack isn't installed. Also, umfpack not tested. solved = False while(not solved): try: solve_this = L if cholesky else M # [1] Equation 8.28 p, q = _sym_solve(Dinv, solve_this, A, c, b, solve, splu) # [1] Equation 8.29 u, v = _sym_solve(Dinv, solve_this, A, rhatd - (1 / x) * rhatxs, rhatp, solve, splu) if np.any(np.isnan(p)) or np.any(np.isnan(q)): raise LinAlgError solved = True except (LinAlgError, ValueError) as e: # Usually this doesn't happen. If it does, it happens when # there are redundant constraints or when approaching the # solution. If so, change solver. cholesky = False if not lstsq: if sym_pos: warn( "Solving system with option 'sym_pos':True " "failed. It is normal for this to happen " "occasionally, especially as the solution is " "approached. However, if you see this frequently, " "consider setting option 'sym_pos' to False.", OptimizeWarning) sym_pos = False else: warn( "Solving system with option 'sym_pos':False " "failed. This may happen occasionally, " "especially as the solution is " "approached. However, if you see this frequently, " "your problem may be numerically challenging. " "If you cannot improve the formulation, consider " "setting 'lstsq' to True. Consider also setting " "`presolve` to True, if it is not already.", OptimizeWarning) lstsq = True else: raise e solve = _get_solver(sparse, lstsq, sym_pos) # [1] Results after 8.29 d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) / (1 / tau * kappa + (-c.dot(p) + b.dot(q)))) d_x = u + p * d_tau d_y = v + q * d_tau # [1] Relations between after 8.25 and 8.26 d_z = (1 / x) * (rhatxs - z * d_x) d_kappa = 1 / tau * (rhattk - kappa * d_tau) # [1] 8.12 and "Let alpha be the maximal possible step..." before 8.23 alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1) if ip: # initial point - see [1] 4.4 gamma = 10 else: # predictor-corrector, [1] definition after 8.12 beta1 = 0.1 # [1] pg. 220 (Table 8.1) gamma = (1 - alpha)**2 * min(beta1, (1 - alpha)) i += 1 return d_x, d_y, d_z, d_tau, d_kappa def _sym_solve(Dinv, M, A, r1, r2, solve, splu=False): """ An implementation of [1] equation 8.31 and 8.32 References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ # [1] 8.31 r = r2 + A.dot(Dinv * r1) if splu: v = solve(r) else: v = solve(M, r) # [1] 8.32 u = Dinv * (A.T.dot(v) - r1) return u, v def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0): """ An implementation of [1] equation 8.21 References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ # [1] 4.3 Equation 8.21, ignoring 8.20 requirement # same step is taken in primal and dual spaces # alpha0 is basically beta3 from [1] Table 8.1, but instead of beta3 # the value 1 is used in Mehrota corrector and initial point correction i_x = d_x < 0 i_z = d_z < 0 alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1 alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1 alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1 alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1 alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa]) return alpha def _get_message(status): """ Given problem status code, return a more detailed message. Parameters ---------- status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered. Returns ------- message : str A string descriptor of the exit status of the optimization. """ messages = ( ["Optimization terminated successfully.", "The iteration limit was reached before the algorithm converged.", "The algorithm terminated successfully and determined that the " "problem is infeasible.", "The algorithm terminated successfully and determined that the " "problem is unbounded.", "Numerical difficulties were encountered before the problem " "converged. Please check your problem formulation for errors, " "independence of linear equality constraints, and reasonable " "scaling and matrix condition numbers. If you continue to " "encounter this error, please submit a bug report." ]) return messages[status] def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha): """ An implementation of [1] Equation 8.9 References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ x = x + alpha * d_x tau = tau + alpha * d_tau z = z + alpha * d_z kappa = kappa + alpha * d_kappa y = y + alpha * d_y return x, y, z, tau, kappa def _get_blind_start(shape): """ Return the starting point from [1] 4.4 References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ m, n = shape x0 = np.ones(n) y0 = np.zeros(m) z0 = np.ones(n) tau0 = 1 kappa0 = 1 return x0, y0, z0, tau0, kappa0 def _indicators(A, b, c, c0, x, y, z, tau, kappa): """ Implementation of several equations from [1] used as indicators of the status of optimization. References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ # residuals for termination are relative to initial values x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape) # See [1], Section 4 - The Homogeneous Algorithm, Equation 8.8 def r_p(x, tau): return b * tau - A.dot(x) def r_d(y, z, tau): return c * tau - A.T.dot(y) - z def r_g(x, y, kappa): return kappa + c.dot(x) - b.dot(y) # np.dot unpacks if they are arrays of size one def mu(x, tau, z, kappa): return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1) obj = c.dot(x / tau) + c0 def norm(a): return np.linalg.norm(a) # See [1], Section 4.5 - The Stopping Criteria r_p0 = r_p(x0, tau0) r_d0 = r_d(y0, z0, tau0) r_g0 = r_g(x0, y0, kappa0) mu_0 = mu(x0, tau0, z0, kappa0) rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y))) rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0)) rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0)) rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0)) rho_mu = mu(x, tau, z, kappa) / mu_0 return rho_p, rho_d, rho_A, rho_g, rho_mu, obj def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False): """ Print indicators of optimization status to the console. Parameters ---------- rho_p : float The (normalized) primal feasibility, see [1] 4.5 rho_d : float The (normalized) dual feasibility, see [1] 4.5 rho_g : float The (normalized) duality gap, see [1] 4.5 alpha : float The step size, see [1] 4.3 rho_mu : float The (normalized) path parameter, see [1] 4.5 obj : float The objective function value of the current iterate header : bool True if a header is to be printed References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ if header: print("Primal Feasibility ", "Dual Feasibility ", "Duality Gap ", "Step ", "Path Parameter ", "Objective ") # no clue why this works fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}' print(fmt.format( rho_p, rho_d, rho_g, alpha, rho_mu, obj)) def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec): r""" Solve a linear programming problem in standard form: minimize: c'^T * x' subject to: A * x' == b 0 < x' < oo using the interior point method of [1]. Parameters ---------- A : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x`` (for standard form problem). b : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in ``A`` (for standard form problem). c : 1-D array Coefficients of the linear objective function to be minimized (for standard form problem). c0 : float Constant term in objective function due to fixed (and eliminated) variables. (Purely for display.) alpha0 : float The maximal step size for Mehrota's predictor-corrector search direction; see :math:`\beta_3`of [1] Table 8.1 beta : float The desired reduction of the path parameter :math:`\mu` (see [3]_) maxiter : int The maximum number of iterations of the algorithm. disp : bool Set to ``True`` if indicators of optimization status are to be printed to the console each iteration. tol : float Termination tolerance; see [1]_ Section 4.5. sparse : bool Set to ``True`` if the problem is to be treated as sparse. However, the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as (dense) arrays rather than sparse matrices. lstsq : bool Set to ``True`` if the problem is expected to be very poorly conditioned. This should always be left as ``False`` unless severe numerical difficulties are frequently encountered, and a better option would be to improve the formulation of the problem. sym_pos : bool Leave ``True`` if the problem is expected to yield a well conditioned symmetric positive definite normal equation matrix (almost always). cholesky : bool Set to ``True`` if the normal equations are to be solved by explicit Cholesky decomposition followed by explicit forward/backward substitution. This is typically faster for moderate, dense problems that are numerically well-behaved. pc : bool Leave ``True`` if the predictor-corrector method of Mehrota is to be used. This is almost always (if not always) beneficial. ip : bool Set to ``True`` if the improved initial point suggestion due to [1]_ Section 4.3 is desired. It's unclear whether this is beneficial. permc_spec : str (default = 'MMD_AT_PLUS_A') (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = True``.) A matrix is factorized in each iteration of the algorithm. This option specifies how to permute the columns of the matrix for sparsity preservation. Acceptable values are: - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering. This option can impact the convergence of the interior point algorithm; test different values to determine which performs best for your problem. For more information, refer to ``scipy.sparse.linalg.splu``. Returns ------- x_hat : float Solution vector (for standard form problem). status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered. message : str A string descriptor of the exit status of the optimization. iteration : int The number of iterations taken to solve the problem References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. .. [3] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear Programming based on Newton's Method." Unpublished Course Notes, March 2004. Available 2/25/2017 at: https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf """ iteration = 0 # default initial point x, y, z, tau, kappa = _get_blind_start(A.shape) # first iteration is special improvement of initial point ip = ip if pc else False # [1] 4.5 rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( A, b, c, c0, x, y, z, tau, kappa) go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : ) if disp: _display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True) status = 0 message = "Optimization terminated successfully." if sparse: A = sps.csc_matrix(A) A.T = A.transpose() # A.T is defined for sparse matrices but is slow # Redefine it to avoid calculating again # This is fine as long as A doesn't change while go: iteration += 1 if ip: # initial point # [1] Section 4.4 gamma = 1 def eta(g): return 1 else: # gamma = 0 in predictor step according to [1] 4.1 # if predictor/corrector is off, use mean of complementarity [3] # 5.1 / [4] Below Figure 10-4 gamma = 0 if pc else beta * np.mean(z * x) # [1] Section 4.1 def eta(g=gamma): return 1 - g try: # Solve [1] 8.6 and 8.7/8.13/8.23 d_x, d_y, d_z, d_tau, d_kappa = _get_delta( A, b, c, x, y, z, tau, kappa, gamma, eta, sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec) if ip: # initial point # [1] 4.4 # Formula after 8.23 takes a full step regardless if this will # take it negative alpha = 1.0 x, y, z, tau, kappa = _do_step( x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) x[x < 1] = 1 z[z < 1] = 1 tau = max(1, tau) kappa = max(1, kappa) ip = False # done with initial point else: # [1] Section 4.3 alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0) # [1] Equation 8.9 x, y, z, tau, kappa = _do_step( x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) except (LinAlgError, FloatingPointError, ValueError, ZeroDivisionError): # this can happen when sparse solver is used and presolve # is turned off. Also observed ValueError in AppVeyor Python 3.6 # Win32 build (PR #8676). I've never seen it otherwise. status = 4 message = _get_message(status) break # [1] 4.5 rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( A, b, c, c0, x, y, z, tau, kappa) go = rho_p > tol or rho_d > tol or rho_A > tol if disp: _display_iter(rho_p, rho_d, rho_g, alpha, float(rho_mu), obj) # [1] 4.5 inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol * max(1, kappa)) inf2 = rho_mu < tol and tau < tol * min(1, kappa) if inf1 or inf2: # [1] Lemma 8.4 / Theorem 8.3 if b.transpose().dot(y) > tol: status = 2 else: # elif c.T.dot(x) < tol: ? Probably not necessary. status = 3 message = _get_message(status) break elif iteration >= maxiter: status = 1 message = _get_message(status) break if disp: print(message) x_hat = x / tau # [1] Statement after Theorem 8.2 return x_hat, status, message, iteration def _linprog_ip( c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, callback=None, alpha0=.99995, beta=0.1, maxiter=1000, disp=False, tol=1e-8, sparse=False, lstsq=False, sym_pos=True, cholesky=None, pc=True, ip=False, presolve=True, permc_spec='MMD_AT_PLUS_A', rr=True, _sparse_presolve=False, **unknown_options): r""" Minimize a linear objective function subject to linear equality constraints, linear inequality constraints, and simple bounds using the interior point method of [1]_. Linear programming is intended to solve problems of the following form:: Minimize: c^T * x Subject to: A_ub * x <= b_ub A_eq * x == b_eq bounds[i][0] < x_i < bounds[i][1] Parameters ---------- c : array_like Coefficients of the linear objective function to be minimized. A_ub : array_like, optional 2-D array which, when matrix-multiplied by ``x``, gives the values of the upper-bound inequality constraints at ``x``. b_ub : array_like, optional 1-D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. A_eq : array_like, optional 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. b_eq : array_like, optional 1-D array of values representing the right hand side of each equality constraint (row) in ``A_eq``. bounds : sequence, optional ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use ``None`` for one of ``min`` or ``max`` when there is no bound in that direction. By default bounds are ``(0, None)`` (non-negative). If a sequence containing a single tuple is provided, then ``min`` and ``max`` will be applied to all variables in the problem. Options ------- maxiter : int (default = 1000) The maximum number of iterations of the algorithm. disp : bool (default = False) Set to ``True`` if indicators of optimization status are to be printed to the console each iteration. tol : float (default = 1e-8) Termination tolerance to be used for all termination criteria; see [1]_ Section 4.5. alpha0 : float (default = 0.99995) The maximal step size for Mehrota's predictor-corrector search direction; see :math:`\beta_{3}` of [1]_ Table 8.1. beta : float (default = 0.1) The desired reduction of the path parameter :math:`\mu` (see [3]_) when Mehrota's predictor-corrector is not in use (uncommon). sparse : bool (default = False) Set to ``True`` if the problem is to be treated as sparse after presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, this option will automatically be set ``True``, and the problem will be treated as sparse even during presolve. If your constraint matrices contain mostly zeros and the problem is not very small (less than about 100 constraints or variables), consider setting ``True`` or providing ``A_eq`` and ``A_ub`` as sparse matrices. lstsq : bool (default = False) Set to ``True`` if the problem is expected to be very poorly conditioned. This should always be left ``False`` unless severe numerical difficulties are encountered. Leave this at the default unless you receive a warning message suggesting otherwise. sym_pos : bool (default = True) Leave ``True`` if the problem is expected to yield a well conditioned symmetric positive definite normal equation matrix (almost always). Leave this at the default unless you receive a warning message suggesting otherwise. cholesky : bool (default = True) Set to ``True`` if the normal equations are to be solved by explicit Cholesky decomposition followed by explicit forward/backward substitution. This is typically faster for moderate, dense problems that are numerically well-behaved. pc : bool (default = True) Leave ``True`` if the predictor-corrector method of Mehrota is to be used. This is almost always (if not always) beneficial. ip : bool (default = False) Set to ``True`` if the improved initial point suggestion due to [1]_ Section 4.3 is desired. Whether this is beneficial or not depends on the problem. presolve : bool (default = True) Leave ``True`` if presolve routine should be run. The presolve routine is almost always useful because it can detect trivial infeasibilities and unboundedness, eliminate fixed variables, and remove redundancies. One circumstance in which it might be turned off (set ``False``) is when it detects that the problem is trivially unbounded; it is possible that that the problem is truly infeasibile but this has not been detected. rr : bool (default = True) Default ``True`` attempts to eliminate any redundant rows in ``A_eq``. Set ``False`` if ``A_eq`` is known to be of full row rank, or if you are looking for a potential speedup (at the expense of reliability). permc_spec : str (default = 'MMD_AT_PLUS_A') (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = True``.) A matrix is factorized in each iteration of the algorithm. This option specifies how to permute the columns of the matrix for sparsity preservation. Acceptable values are: - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering. This option can impact the convergence of the interior point algorithm; test different values to determine which performs best for your problem. For more information, refer to ``scipy.sparse.linalg.splu``. Returns ------- A ``scipy.optimize.OptimizeResult`` consisting of the following fields: x : ndarray The independent variable vector which optimizes the linear programming problem. fun : float The optimal value of the objective function con : float The residuals of the equality constraints (nominally zero). slack : ndarray The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. success : bool Returns True if the algorithm succeeded in finding an optimal solution. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. Notes ----- This method implements the algorithm outlined in [1]_ with ideas from [5]_ and a structure inspired by the simpler methods of [3]_ and [4]_. First, a presolve procedure based on [5]_ attempts to identify trivial infeasibilities, trivial unboundedness, and potential problem simplifications. Specifically, it checks for: - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained variables; - column singletons in ``A_eq``, representing fixed variables; and - column singletons in ``A_ub``, representing simple bounds. If presolve reveals that the problem is unbounded (e.g. an unconstrained and unbounded variable has negative cost) or infeasible (e.g. a row of zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver terminates with the appropriate status code. Note that presolve terminates as soon as any sign of unboundedness is detected; consequently, a problem may be reported as unbounded when in reality the problem is infeasible (but infeasibility has not been detected yet). Therefore, if the output message states that unboundedness is detected in presolve and it is necessary to know whether the problem is actually infeasible, set option ``presolve=False``. If neither infeasibility nor unboundedness are detected in a single pass of the presolve check, bounds are tightened where possible and fixed variables are removed from the problem. Then, linearly dependent rows of the ``A_eq`` matrix are removed, (unless they represent an infeasibility) to avoid numerical difficulties in the primary solve routine. Note that rows that are nearly linearly dependent (within a prescibed tolerance) may also be removed, which can change the optimal solution in rare cases. If this is a concern, eliminate redundancy from your problem formulation and run with option ``rr=False`` or ``presolve=False``. Several potential improvements can be made here: additional presolve checks outlined in [5]_ should be implemented, the presolve routine should be run multiple times (until no further simplifications can be made), and more of the efficiency improvements from [2]_ should be implemented in the redundancy removal routines. After presolve, the problem is transformed to standard form by converting the (tightened) simple bounds to upper bound constraints, introducing non-negative slack variables for inequality constraints, and expressing unbounded variables as the difference between two non-negative variables. The primal-dual path following method begins with initial 'guesses' of the primal and dual variables of the standard form problem and iteratively attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the problem with a gradually reduced logarithmic barrier term added to the objective. This particular implementation uses a homogeneous self-dual formulation, which provides certificates of infeasibility or unboundedness where applicable. The default initial point for the primal and dual variables is that defined in [1]_ Section 4.4 Equation 8.22. Optionally (by setting initial point option ``ip=True``), an alternate (potentially improved) starting point can be calculated according to the additional recommendations of [1]_ Section 4.4. A search direction is calculated using the predictor-corrector method (single correction) proposed by Mehrota and detailed in [1]_ Section 4.1. (A potential improvement would be to implement the method of multiple corrections described in [1]_ Section 4.2.) In practice, this is accomplished by solving the normal equations, [1]_ Section 5.1 Equations 8.31 and 8.32, derived from the Newton equations [1]_ Section 5 Equations 8.25 (compare to [1]_ Section 4 Equations 8.6-8.8). The advantage of solving the normal equations rather than 8.25 directly is that the matrices involved are symmetric positive definite, so Cholesky decomposition can be used rather than the more expensive LU factorization. With the default ``cholesky=True``, this is accomplished using ``scipy.linalg.cho_factor`` followed by forward/backward substitutions via ``scipy.linalg.cho_solve``. With ``cholesky=False`` and ``sym_pos=True``, Cholesky decomposition is performed instead by ``scipy.linalg.solve``. Based on speed tests, this also appears to retain the Cholesky decomposition of the matrix for later use, which is beneficial as the same system is solved four times with different right hand sides in each iteration of the algorithm. In problems with redundancy (e.g. if presolve is turned off with option ``presolve=False``) or if the matrices become ill-conditioned (e.g. as the solution is approached and some decision variables approach zero), Cholesky decomposition can fail. Should this occur, successively more robust solvers (``scipy.linalg.solve`` with ``sym_pos=False`` then ``scipy.linalg.lstsq``) are tried, at the cost of computational efficiency. These solvers can be used from the outset by setting the options ``sym_pos=False`` and ``lstsq=True``, respectively. Note that with the option ``sparse=True``, the normal equations are solved using ``scipy.sparse.linalg.spsolve``. Unfortunately, this uses the more expensive LU decomposition from the outset, but for large, sparse problems, the use of sparse linear algebra techniques improves the solve speed despite the use of LU rather than Cholesky decomposition. A simple improvement would be to use the sparse Cholesky decomposition of ``CHOLMOD`` via ``scikit-sparse`` when available. Other potential improvements for combatting issues associated with dense columns in otherwise sparse problems are outlined in [1]_ Section 5.3 and [7]_ Section 4.1-4.2; the latter also discusses the alleviation of accuracy issues associated with the substitution approach to free variables. After calculating the search direction, the maximum possible step size that does not activate the non-negativity constraints is calculated, and the smaller of this step size and unity is applied (as in [1]_ Section 4.1.) [1]_ Section 4.3 suggests improvements for choosing the step size. The new point is tested according to the termination conditions of [1]_ Section 4.5. The same tolerance, which can be set using the ``tol`` option, is used for all checks. (A potential improvement would be to expose the different tolerances to be set independently.) If optimality, unboundedness, or infeasibility is detected, the solve procedure terminates; otherwise it repeats. If optimality is achieved, a postsolve procedure undoes transformations associated with presolve and converting to standard form. It then calculates the residuals (equality constraint violations, which should be very small) and slacks (difference between the left and right hand sides of the upper bound constraints) of the original problem, which are returned with the solution in an ``OptimizeResult`` object. References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. .. [2] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. .. [3] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear Programming based on Newton's Method." Unpublished Course Notes, March 2004. Available 2/25/2017 at https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf .. [4] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at http://www.4er.org/CourseNotes/Book%20B/B-III.pdf .. [5] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear programming." Mathematical Programming 71.2 (1995): 221-245. .. [6] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. .. [7] Andersen, Erling D., et al. Implementation of interior point methods for large scale linear programming. HEC/Universite de Geneve, 1996. """ _check_unknown_options(unknown_options) if callback is not None: raise NotImplementedError("method 'interior-point' does not support " "callback functions.") # This is an undocumented option for unit testing sparse presolve if _sparse_presolve and A_eq is not None: A_eq = sp.sparse.coo_matrix(A_eq) if _sparse_presolve and A_ub is not None: A_ub = sp.sparse.coo_matrix(A_ub) # These should be warnings, not errors if not sparse and (sp.sparse.issparse(A_eq) or sp.sparse.issparse(A_ub)): sparse = True warn("Sparse constraint matrix detected; setting 'sparse':True.", OptimizeWarning) if sparse and lstsq: warn("Invalid option combination 'sparse':True " "and 'lstsq':True; Sparse least squares is not recommended.", OptimizeWarning) if sparse and not sym_pos: warn("Invalid option combination 'sparse':True " "and 'sym_pos':False; the effect is the same as sparse least " "squares, which is not recommended.", OptimizeWarning) if sparse and cholesky: # Cholesky decomposition is not available for sparse problems warn("Invalid option combination 'sparse':True " "and 'cholesky':True; sparse Colesky decomposition is not " "available.", OptimizeWarning) if lstsq and cholesky: warn("Invalid option combination 'lstsq':True " "and 'cholesky':True; option 'cholesky' has no effect when " "'lstsq' is set True.", OptimizeWarning) valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD') if permc_spec.upper() not in valid_permc_spec: warn("Invalid permc_spec option: '" + str(permc_spec) + "'. " "Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', " "and 'COLAMD'. Reverting to default.", OptimizeWarning) permc_spec = 'MMD_AT_PLUS_A' # This can be an error if not sym_pos and cholesky: raise ValueError( "Invalid option combination 'sym_pos':False " "and 'cholesky':True: Cholesky decomposition is only possible " "for symmetric positive definite matrices.") cholesky = cholesky is None and sym_pos and not sparse and not lstsq iteration = 0 complete = False # will become True if solved in presolve undo = [] # Convert lists to numpy arrays, etc... c, A_ub, b_ub, A_eq, b_eq, bounds = _clean_inputs( c, A_ub, b_ub, A_eq, b_eq, bounds) # Keep the original arrays to calculate slack/residuals for original # problem. c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o = c.copy( ), A_ub.copy(), b_ub.copy(), A_eq.copy(), b_eq.copy() # Solve trivial problem, eliminate variables, tighten bounds, etc... c0 = 0 # we might get a constant term in the objective if presolve is True: (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, message) = _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, rr) # If not solved in presolve, solve it if not complete: # Convert problem to standard form A, b, c, c0 = _get_Abc(c, c0, A_ub, b_ub, A_eq, b_eq, bounds, undo) # Solve the problem x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec) # Eliminate artificial variables, re-introduce presolved variables, etc... # need modified bounds here to translate variables appropriately x, fun, slack, con, status, message = _postprocess( x, c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds, complete, undo, status, message, tol) sol = { 'x': x, 'fun': fun, 'slack': slack, 'con': con, 'status': status, 'message': message, 'nit': iteration, "success": status == 0} return OptimizeResult(sol)
89,346
40.192716
143
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_differentiable_functions.py
from __future__ import division, print_function, absolute_import import numpy as np import scipy.sparse as sps from ._numdiff import approx_derivative, group_columns from ._hessian_update_strategy import HessianUpdateStrategy from scipy.sparse.linalg import LinearOperator from copy import deepcopy FD_METHODS = ('2-point', '3-point', 'cs') class ScalarFunction(object): """Scalar function and its derivatives. This class defines a scalar function F: R^n->R and methods for computing or approximating its first and second derivatives. Notes ----- This class implements a memoization logic. There are methods `fun`, `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following things should be considered: 1. Use only public methods `fun`, `grad` and `hess`. 2. After one of the methods is called, the corresponding attribute will be set. However, a subsequent call with a different argument of *any* of the methods may overwrite the attribute. """ def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step, finite_diff_bounds): if not callable(grad) and grad not in FD_METHODS: raise ValueError("`grad` must be either callable or one of {}." .format(FD_METHODS)) if not (callable(hess) or hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)): raise ValueError("`hess` must be either callable," "HessianUpdateStrategy or one of {}." .format(FD_METHODS)) if grad in FD_METHODS and hess in FD_METHODS: raise ValueError("Whenever the gradient is estimated via " "finite-differences, we require the Hessian " "to be estimated using one of the " "quasi-Newton strategies.") self.x = np.atleast_1d(x0).astype(float) self.n = self.x.size self.nfev = 0 self.ngev = 0 self.nhev = 0 self.f_updated = False self.g_updated = False self.H_updated = False finite_diff_options = {} if grad in FD_METHODS: finite_diff_options["method"] = grad finite_diff_options["rel_step"] = finite_diff_rel_step finite_diff_options["bounds"] = finite_diff_bounds if hess in FD_METHODS: finite_diff_options["method"] = hess finite_diff_options["rel_step"] = finite_diff_rel_step finite_diff_options["as_linear_operator"] = True # Function evaluation def fun_wrapped(x): self.nfev += 1 return fun(x, *args) def update_fun(): self.f = fun_wrapped(self.x) self._update_fun_impl = update_fun self._update_fun() # Gradient evaluation if callable(grad): def grad_wrapped(x): self.ngev += 1 return np.atleast_1d(grad(x, *args)) def update_grad(): self.g = grad_wrapped(self.x) elif grad in FD_METHODS: def update_grad(): self._update_fun() self.g = approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options) self._update_grad_impl = update_grad self._update_grad() # Hessian Evaluation if callable(hess): self.H = hess(x0, *args) self.H_updated = True self.nhev += 1 if sps.issparse(self.H): def hess_wrapped(x): self.nhev += 1 return sps.csr_matrix(hess(x, *args)) self.H = sps.csr_matrix(self.H) elif isinstance(self.H, LinearOperator): def hess_wrapped(x): self.nhev += 1 return hess(x, *args) else: def hess_wrapped(x): self.nhev += 1 return np.atleast_2d(np.asarray(hess(x, *args))) self.H = np.atleast_2d(np.asarray(self.H)) def update_hess(): self.H = hess_wrapped(self.x) elif hess in FD_METHODS: def update_hess(): self._update_grad() self.H = approx_derivative(grad_wrapped, self.x, f0=self.g, **finite_diff_options) return self.H update_hess() self.H_updated = True elif isinstance(hess, HessianUpdateStrategy): self.H = hess self.H.initialize(self.n, 'hess') self.H_updated = True self.x_prev = None self.g_prev = None def update_hess(): self._update_grad() self.H.update(self.x - self.x_prev, self.g - self.g_prev) self._update_hess_impl = update_hess if isinstance(hess, HessianUpdateStrategy): def update_x(x): self._update_grad() self.x_prev = self.x self.g_prev = self.g self.x = x self.f_updated = False self.g_updated = False self.H_updated = False self._update_hess() else: def update_x(x): self.x = x self.f_updated = False self.g_updated = False self.H_updated = False self._update_x_impl = update_x def _update_fun(self): if not self.f_updated: self._update_fun_impl() self.f_updated = True def _update_grad(self): if not self.g_updated: self._update_grad_impl() self.g_updated = True def _update_hess(self): if not self.H_updated: self._update_hess_impl() self.H_updated = True def fun(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) self._update_fun() return self.f def grad(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) self._update_grad() return self.g def hess(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) self._update_hess() return self.H class VectorFunction(object): """Vector function and its derivatives. This class defines a vector function F: R^n->R^m and methods for computing or approximating its first and second derivatives. Notes ----- This class implements a memoization logic. There are methods `fun`, `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following things should be considered: 1. Use only public methods `fun`, `jac` and `hess`. 2. After one of the methods is called, the corresponding attribute will be set. However, a subsequent call with a different argument of *any* of the methods may overwrite the attribute. """ def __init__(self, fun, x0, jac, hess, finite_diff_rel_step, finite_diff_jac_sparsity, finite_diff_bounds, sparse_jacobian): if not callable(jac) and jac not in FD_METHODS: raise ValueError("`jac` must be either callable or one of {}." .format(FD_METHODS)) if not (callable(hess) or hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)): raise ValueError("`hess` must be either callable," "HessianUpdateStrategy or one of {}." .format(FD_METHODS)) if jac in FD_METHODS and hess in FD_METHODS: raise ValueError("Whenever the Jacobian is estimated via " "finite-differences, we require the Hessian to " "be estimated using one of the quasi-Newton " "strategies.") self.x = np.atleast_1d(x0).astype(float) self.n = self.x.size self.nfev = 0 self.njev = 0 self.nhev = 0 self.f_updated = False self.J_updated = False self.H_updated = False finite_diff_options = {} if jac in FD_METHODS: finite_diff_options["method"] = jac finite_diff_options["rel_step"] = finite_diff_rel_step if finite_diff_jac_sparsity is not None: sparsity_groups = group_columns(finite_diff_jac_sparsity) finite_diff_options["sparsity"] = (finite_diff_jac_sparsity, sparsity_groups) finite_diff_options["bounds"] = finite_diff_bounds self.x_diff = np.copy(self.x) if hess in FD_METHODS: finite_diff_options["method"] = hess finite_diff_options["rel_step"] = finite_diff_rel_step finite_diff_options["as_linear_operator"] = True self.x_diff = np.copy(self.x) if jac in FD_METHODS and hess in FD_METHODS: raise ValueError("Whenever the Jacobian is estimated via " "finite-differences, we require the Hessian to " "be estimated using one of the quasi-Newton " "strategies.") # Function evaluation def fun_wrapped(x): self.nfev += 1 return np.atleast_1d(fun(x)) def update_fun(): self.f = fun_wrapped(self.x) self._update_fun_impl = update_fun update_fun() self.v = np.zeros_like(self.f) self.m = self.v.size # Jacobian Evaluation if callable(jac): self.J = jac(self.x) self.J_updated = True self.njev += 1 if (sparse_jacobian or sparse_jacobian is None and sps.issparse(self.J)): def jac_wrapped(x): self.njev += 1 return sps.csr_matrix(jac(x)) self.J = sps.csr_matrix(self.J) self.sparse_jacobian = True elif sps.issparse(self.J): def jac_wrapped(x): self.njev += 1 return jac(x).toarray() self.J = self.J.toarray() self.sparse_jacobian = False else: def jac_wrapped(x): self.njev += 1 return np.atleast_2d(jac(x)) self.J = np.atleast_2d(self.J) self.sparse_jacobian = False def update_jac(): self.J = jac_wrapped(self.x) elif jac in FD_METHODS: self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options) self.J_updated = True if (sparse_jacobian or sparse_jacobian is None and sps.issparse(self.J)): def update_jac(): self._update_fun() self.J = sps.csr_matrix( approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options)) self.J = sps.csr_matrix(self.J) self.sparse_jacobian = True elif sps.issparse(self.J): def update_jac(): self._update_fun() self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options).toarray() self.J = self.J.toarray() self.sparse_jacobian = False else: def update_jac(): self._update_fun() self.J = np.atleast_2d( approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options)) self.J = np.atleast_2d(self.J) self.sparse_jacobian = False self._update_jac_impl = update_jac # Define Hessian if callable(hess): self.H = hess(self.x, self.v) self.H_updated = True self.nhev += 1 if sps.issparse(self.H): def hess_wrapped(x, v): self.nhev += 1 return sps.csr_matrix(hess(x, v)) self.H = sps.csr_matrix(self.H) elif isinstance(self.H, LinearOperator): def hess_wrapped(x, v): self.nhev += 1 return hess(x, v) else: def hess_wrapped(x, v): self.nhev += 1 return np.atleast_2d(np.asarray(hess(x, v))) self.H = np.atleast_2d(np.asarray(self.H)) def update_hess(): self.H = hess_wrapped(self.x, self.v) elif hess in FD_METHODS: def jac_dot_v(x, v): return jac_wrapped(x).T.dot(v) def update_hess(): self._update_jac() self.H = approx_derivative(jac_dot_v, self.x, f0=self.J.T.dot(self.v), args=(self.v,), **finite_diff_options) update_hess() self.H_updated = True elif isinstance(hess, HessianUpdateStrategy): self.H = hess self.H.initialize(self.n, 'hess') self.H_updated = True self.x_prev = None self.J_prev = None def update_hess(): self._update_jac() # When v is updated before x was updated, then x_prev and # J_prev are None and we need this check. if self.x_prev is not None and self.J_prev is not None: delta_x = self.x - self.x_prev delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v) self.H.update(delta_x, delta_g) self._update_hess_impl = update_hess if isinstance(hess, HessianUpdateStrategy): def update_x(x): self._update_jac() self.x_prev = self.x self.J_prev = self.J self.x = x self.f_updated = False self.J_updated = False self.H_updated = False self._update_hess() else: def update_x(x): self.x = x self.f_updated = False self.J_updated = False self.H_updated = False self._update_x_impl = update_x def _update_v(self, v): if not np.array_equal(v, self.v): self.v = v self.H_updated = False def _update_x(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) def _update_fun(self): if not self.f_updated: self._update_fun_impl() self.f_updated = True def _update_jac(self): if not self.J_updated: self._update_jac_impl() self.J_updated = True def _update_hess(self): if not self.H_updated: self._update_hess_impl() self.H_updated = True def fun(self, x): self._update_x(x) self._update_fun() return self.f def jac(self, x): self._update_x(x) self._update_jac() return self.J def hess(self, x, v): # v should be updated before x. self._update_v(v) self._update_x(x) self._update_hess() return self.H class LinearVectorFunction(object): """Linear vector function and its derivatives. Defines a linear function F = A x, where x is n-dimensional vector and A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian is identically zero and it is returned as a csr matrix. """ def __init__(self, A, x0, sparse_jacobian): if sparse_jacobian or sparse_jacobian is None and sps.issparse(A): self.J = sps.csr_matrix(A) self.sparse_jacobian = True elif sps.issparse(A): self.J = A.toarray() self.sparse_jacobian = False else: self.J = np.atleast_2d(A) self.sparse_jacobian = False self.m, self.n = self.J.shape self.x = np.atleast_1d(x0).astype(float) self.f = self.J.dot(self.x) self.f_updated = True self.v = np.zeros(self.m, dtype=float) self.H = sps.csr_matrix((self.n, self.n)) def _update_x(self, x): if not np.array_equal(x, self.x): self.x = x self.f_updated = False def fun(self, x): self._update_x(x) if not self.f_updated: self.f = self.J.dot(x) self.f_updated = True return self.f def jac(self, x): self._update_x(x) return self.J def hess(self, x, v): self._update_x(x) self.v = v return self.H class IdentityVectorFunction(LinearVectorFunction): """Identity vector function and its derivatives. The Jacobian is the identity matrix, returned as a dense array when `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is identically zero and it is returned as a csr matrix. """ def __init__(self, x0, sparse_jacobian): n = len(x0) if sparse_jacobian or sparse_jacobian is None: A = sps.eye(n, format='csr') sparse_jacobian = True else: A = np.eye(n) sparse_jacobian = False super(IdentityVectorFunction, self).__init__(A, x0, sparse_jacobian)
18,036
33.487572
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/linesearch.py
""" Functions --------- .. autosummary:: :toctree: generated/ line_search_armijo line_search_wolfe1 line_search_wolfe2 scalar_search_wolfe1 scalar_search_wolfe2 """ from __future__ import division, print_function, absolute_import from warnings import warn from scipy.optimize import minpack2 import numpy as np from scipy._lib.six import xrange __all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2', 'scalar_search_wolfe1', 'scalar_search_wolfe2', 'line_search_armijo'] class LineSearchWarning(RuntimeWarning): pass #------------------------------------------------------------------------------ # Minpack's Wolfe line and scalar searches #------------------------------------------------------------------------------ def line_search_wolfe1(f, fprime, xk, pk, gfk=None, old_fval=None, old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8, xtol=1e-14): """ As `scalar_search_wolfe1` but do a line search to direction `pk` Parameters ---------- f : callable Function `f(x)` fprime : callable Gradient of `f` xk : array_like Current point pk : array_like Search direction gfk : array_like, optional Gradient of `f` at point `xk` old_fval : float, optional Value of `f` at point `xk` old_old_fval : float, optional Value of `f` at point preceding `xk` The rest of the parameters are the same as for `scalar_search_wolfe1`. Returns ------- stp, f_count, g_count, fval, old_fval As in `line_search_wolfe1` gval : array Gradient of `f` at the final point """ if gfk is None: gfk = fprime(xk) if isinstance(fprime, tuple): eps = fprime[1] fprime = fprime[0] newargs = (f, eps) + args gradient = False else: newargs = args gradient = True gval = [gfk] gc = [0] fc = [0] def phi(s): fc[0] += 1 return f(xk + s*pk, *args) def derphi(s): gval[0] = fprime(xk + s*pk, *newargs) if gradient: gc[0] += 1 else: fc[0] += len(xk) + 1 return np.dot(gval[0], pk) derphi0 = np.dot(gfk, pk) stp, fval, old_fval = scalar_search_wolfe1( phi, derphi, old_fval, old_old_fval, derphi0, c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol) return stp, fc[0], gc[0], fval, old_fval, gval[0] def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, c1=1e-4, c2=0.9, amax=50, amin=1e-8, xtol=1e-14): """ Scalar function search for alpha that satisfies strong Wolfe conditions alpha > 0 is assumed to be a descent direction. Parameters ---------- phi : callable phi(alpha) Function at point `alpha` derphi : callable dphi(alpha) Derivative `d phi(alpha)/ds`. Returns a scalar. phi0 : float, optional Value of `f` at 0 old_phi0 : float, optional Value of `f` at the previous point derphi0 : float, optional Value `derphi` at 0 c1, c2 : float, optional Wolfe parameters amax, amin : float, optional Maximum and minimum step size xtol : float, optional Relative tolerance for an acceptable step. Returns ------- alpha : float Step size, or None if no suitable step was found phi : float Value of `phi` at the new point `alpha` phi0 : float Value of `phi` at `alpha=0` Notes ----- Uses routine DCSRCH from MINPACK. """ if phi0 is None: phi0 = phi(0.) if derphi0 is None: derphi0 = derphi(0.) if old_phi0 is not None and derphi0 != 0: alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) if alpha1 < 0: alpha1 = 1.0 else: alpha1 = 1.0 phi1 = phi0 derphi1 = derphi0 isave = np.zeros((2,), np.intc) dsave = np.zeros((13,), float) task = b'START' maxiter = 100 for i in xrange(maxiter): stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1, c1, c2, xtol, task, amin, amax, isave, dsave) if task[:2] == b'FG': alpha1 = stp phi1 = phi(stp) derphi1 = derphi(stp) else: break else: # maxiter reached, the line search did not converge stp = None if task[:5] == b'ERROR' or task[:4] == b'WARN': stp = None # failed return stp, phi1, phi0 line_search = line_search_wolfe1 #------------------------------------------------------------------------------ # Pure-Python Wolfe line and scalar searches #------------------------------------------------------------------------------ def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None, old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None, extra_condition=None, maxiter=10): """Find alpha that satisfies strong Wolfe conditions. Parameters ---------- f : callable f(x,*args) Objective function. myfprime : callable f'(x,*args) Objective function gradient. xk : ndarray Starting point. pk : ndarray Search direction. gfk : ndarray, optional Gradient value for x=xk (xk being the current parameter estimate). Will be recomputed if omitted. old_fval : float, optional Function value for x=xk. Will be recomputed if omitted. old_old_fval : float, optional Function value for the point preceding x=xk args : tuple, optional Additional arguments passed to objective function. c1 : float, optional Parameter for Armijo condition rule. c2 : float, optional Parameter for curvature condition rule. amax : float, optional Maximum step size extra_condition : callable, optional A callable of the form ``extra_condition(alpha, x, f, g)`` returning a boolean. Arguments are the proposed step ``alpha`` and the corresponding ``x``, ``f`` and ``g`` values. The line search accepts the value of ``alpha`` only if this callable returns ``True``. If the callable returns ``False`` for the step length, the algorithm will continue with new iterates. The callable is only called for iterates satisfying the strong Wolfe conditions. maxiter : int, optional Maximum number of iterations to perform Returns ------- alpha : float or None Alpha for which ``x_new = x0 + alpha * pk``, or None if the line search algorithm did not converge. fc : int Number of function evaluations made. gc : int Number of gradient evaluations made. new_fval : float or None New function value ``f(x_new)=f(x0+alpha*pk)``, or None if the line search algorithm did not converge. old_fval : float Old function value ``f(x0)``. new_slope : float or None The local slope along the search direction at the new value ``<myfprime(x_new), pk>``, or None if the line search algorithm did not converge. Notes ----- Uses the line search algorithm to enforce strong Wolfe conditions. See Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-60. For the zoom phase it uses an algorithm by [...]. """ fc = [0] gc = [0] gval = [None] gval_alpha = [None] def phi(alpha): fc[0] += 1 return f(xk + alpha * pk, *args) if isinstance(myfprime, tuple): def derphi(alpha): fc[0] += len(xk) + 1 eps = myfprime[1] fprime = myfprime[0] newargs = (f, eps) + args gval[0] = fprime(xk + alpha * pk, *newargs) # store for later use gval_alpha[0] = alpha return np.dot(gval[0], pk) else: fprime = myfprime def derphi(alpha): gc[0] += 1 gval[0] = fprime(xk + alpha * pk, *args) # store for later use gval_alpha[0] = alpha return np.dot(gval[0], pk) if gfk is None: gfk = fprime(xk, *args) derphi0 = np.dot(gfk, pk) if extra_condition is not None: # Add the current gradient as argument, to avoid needless # re-evaluation def extra_condition2(alpha, phi): if gval_alpha[0] != alpha: derphi(alpha) x = xk + alpha * pk return extra_condition(alpha, x, phi, gval[0]) else: extra_condition2 = None alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2( phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax, extra_condition2, maxiter=maxiter) if derphi_star is None: warn('The line search algorithm did not converge', LineSearchWarning) else: # derphi_star is a number (derphi) -- so use the most recently # calculated gradient used in computing it derphi = gfk*pk # this is the gradient at the next step no need to compute it # again in the outer loop. derphi_star = gval[0] return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star def scalar_search_wolfe2(phi, derphi=None, phi0=None, old_phi0=None, derphi0=None, c1=1e-4, c2=0.9, amax=None, extra_condition=None, maxiter=10): """Find alpha that satisfies strong Wolfe conditions. alpha > 0 is assumed to be a descent direction. Parameters ---------- phi : callable f(x) Objective scalar function. derphi : callable f'(x), optional Objective function derivative (can be None) phi0 : float, optional Value of phi at s=0 old_phi0 : float, optional Value of phi at previous point derphi0 : float, optional Value of derphi at s=0 c1 : float, optional Parameter for Armijo condition rule. c2 : float, optional Parameter for curvature condition rule. amax : float, optional Maximum step size extra_condition : callable, optional A callable of the form ``extra_condition(alpha, phi_value)`` returning a boolean. The line search accepts the value of ``alpha`` only if this callable returns ``True``. If the callable returns ``False`` for the step length, the algorithm will continue with new iterates. The callable is only called for iterates satisfying the strong Wolfe conditions. maxiter : int, optional Maximum number of iterations to perform Returns ------- alpha_star : float or None Best alpha, or None if the line search algorithm did not converge. phi_star : float phi at alpha_star phi0 : float phi at 0 derphi_star : float or None derphi at alpha_star, or None if the line search algorithm did not converge. Notes ----- Uses the line search algorithm to enforce strong Wolfe conditions. See Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-60. For the zoom phase it uses an algorithm by [...]. """ if phi0 is None: phi0 = phi(0.) if derphi0 is None and derphi is not None: derphi0 = derphi(0.) alpha0 = 0 if old_phi0 is not None and derphi0 != 0: alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) else: alpha1 = 1.0 if alpha1 < 0: alpha1 = 1.0 phi_a1 = phi(alpha1) #derphi_a1 = derphi(alpha1) evaluated below phi_a0 = phi0 derphi_a0 = derphi0 if extra_condition is None: extra_condition = lambda alpha, phi: True for i in xrange(maxiter): if alpha1 == 0 or (amax is not None and alpha0 == amax): # alpha1 == 0: This shouldn't happen. Perhaps the increment has # slipped below machine precision? alpha_star = None phi_star = phi0 phi0 = old_phi0 derphi_star = None if alpha1 == 0: msg = 'Rounding errors prevent the line search from converging' else: msg = "The line search algorithm could not find a solution " + \ "less than or equal to amax: %s" % amax warn(msg, LineSearchWarning) break if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \ ((phi_a1 >= phi_a0) and (i > 1)): alpha_star, phi_star, derphi_star = \ _zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi, derphi, phi0, derphi0, c1, c2, extra_condition) break derphi_a1 = derphi(alpha1) if (abs(derphi_a1) <= -c2*derphi0): if extra_condition(alpha1, phi_a1): alpha_star = alpha1 phi_star = phi_a1 derphi_star = derphi_a1 break if (derphi_a1 >= 0): alpha_star, phi_star, derphi_star = \ _zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi, derphi, phi0, derphi0, c1, c2, extra_condition) break alpha2 = 2 * alpha1 # increase by factor of two on each iteration if amax is not None: alpha2 = min(alpha2, amax) alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi(alpha1) derphi_a0 = derphi_a1 else: # stopping test maxiter reached alpha_star = alpha1 phi_star = phi_a1 derphi_star = None warn('The line search algorithm did not converge', LineSearchWarning) return alpha_star, phi_star, phi0, derphi_star def _cubicmin(a, fa, fpa, b, fb, c, fc): """ Finds the minimizer for a cubic polynomial that goes through the points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa. If no minimizer can be found return None """ # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D with np.errstate(divide='raise', over='raise', invalid='raise'): try: C = fpa db = b - a dc = c - a denom = (db * dc) ** 2 * (db - dc) d1 = np.empty((2, 2)) d1[0, 0] = dc ** 2 d1[0, 1] = -db ** 2 d1[1, 0] = -dc ** 3 d1[1, 1] = db ** 3 [A, B] = np.dot(d1, np.asarray([fb - fa - C * db, fc - fa - C * dc]).flatten()) A /= denom B /= denom radical = B * B - 3 * A * C xmin = a + (-B + np.sqrt(radical)) / (3 * A) except ArithmeticError: return None if not np.isfinite(xmin): return None return xmin def _quadmin(a, fa, fpa, b, fb): """ Finds the minimizer for a quadratic polynomial that goes through the points (a,fa), (b,fb) with derivative at a of fpa, """ # f(x) = B*(x-a)^2 + C*(x-a) + D with np.errstate(divide='raise', over='raise', invalid='raise'): try: D = fa C = fpa db = b - a * 1.0 B = (fb - D - C * db) / (db * db) xmin = a - C / (2.0 * B) except ArithmeticError: return None if not np.isfinite(xmin): return None return xmin def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, phi, derphi, phi0, derphi0, c1, c2, extra_condition): """ Part of the optimization algorithm in `scalar_search_wolfe2`. """ maxiter = 10 i = 0 delta1 = 0.2 # cubic interpolant check delta2 = 0.1 # quadratic interpolant check phi_rec = phi0 a_rec = 0 while True: # interpolate to find a trial step length between a_lo and # a_hi Need to choose interpolation here. Use cubic # interpolation and then if the result is within delta * # dalpha or outside of the interval bounded by a_lo or a_hi # then use quadratic interpolation, if the result is still too # close, then use bisection dalpha = a_hi - a_lo if dalpha < 0: a, b = a_hi, a_lo else: a, b = a_lo, a_hi # minimizer of cubic interpolant # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) # # if the result is too close to the end points (or out of the # interval) then use quadratic interpolation with phi_lo, # derphi_lo and phi_hi if the result is still too close to the # end points (or out of the interval) then use bisection if (i > 0): cchk = delta1 * dalpha a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec) if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk): qchk = delta2 * dalpha a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): a_j = a_lo + 0.5*dalpha # Check new value of a_j phi_aj = phi(a_j) if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo): phi_rec = phi_hi a_rec = a_hi a_hi = a_j phi_hi = phi_aj else: derphi_aj = derphi(a_j) if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj): a_star = a_j val_star = phi_aj valprime_star = derphi_aj break if derphi_aj*(a_hi - a_lo) >= 0: phi_rec = phi_hi a_rec = a_hi a_hi = a_lo phi_hi = phi_lo else: phi_rec = phi_lo a_rec = a_lo a_lo = a_j phi_lo = phi_aj derphi_lo = derphi_aj i += 1 if (i > maxiter): # Failed to find a conforming step size a_star = None val_star = None valprime_star = None break return a_star, val_star, valprime_star #------------------------------------------------------------------------------ # Armijo line and scalar searches #------------------------------------------------------------------------------ def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): """Minimize over alpha, the function ``f(xk+alpha pk)``. Parameters ---------- f : callable Function to be minimized. xk : array_like Current point. pk : array_like Search direction. gfk : array_like Gradient of `f` at point `xk`. old_fval : float Value of `f` at point `xk`. args : tuple, optional Optional arguments. c1 : float, optional Value to control stopping criterion. alpha0 : scalar, optional Value of `alpha` at start of the optimization. Returns ------- alpha f_count f_val_at_alpha Notes ----- Uses the interpolation algorithm (Armijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 """ xk = np.atleast_1d(xk) fc = [0] def phi(alpha1): fc[0] += 1 return f(xk + alpha1*pk, *args) if old_fval is None: phi0 = phi(0.) else: phi0 = old_fval # compute f(xk) -- done in past loop derphi0 = np.dot(gfk, pk) alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, alpha0=alpha0) return alpha, fc[0], phi1 def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): """ Compatibility wrapper for `line_search_armijo` """ r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1, alpha0=alpha0) return r[0], r[1], 0, r[2] def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0): """Minimize over alpha, the function ``phi(alpha)``. Uses the interpolation algorithm (Armijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 alpha > 0 is assumed to be a descent direction. Returns ------- alpha phi1 """ phi_a0 = phi(alpha0) if phi_a0 <= phi0 + c1*alpha0*derphi0: return alpha0, phi_a0 # Otherwise compute the minimizer of a quadratic interpolant: alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) phi_a1 = phi(alpha1) if (phi_a1 <= phi0 + c1*alpha1*derphi0): return alpha1, phi_a1 # Otherwise loop with cubic interpolation until we find an alpha which # satisfies the first Wolfe condition (since we are backtracking, we will # assume that the value of alpha is not too small and satisfies the second # condition. while alpha1 > amin: # we are assuming alpha>0 is a descent direction factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) a = a / factor b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) b = b / factor alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) phi_a2 = phi(alpha2) if (phi_a2 <= phi0 + c1*alpha2*derphi0): return alpha2, phi_a2 if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: alpha2 = alpha1 / 2.0 alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi_a2 # Failed to find a suitable step length return None, phi_a1 #------------------------------------------------------------------------------ # Non-monotone line search for DF-SANE #------------------------------------------------------------------------------ def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta, gamma=1e-4, tau_min=0.1, tau_max=0.5): """ Nonmonotone backtracking line search as described in [1]_ Parameters ---------- f : callable Function returning a tuple ``(f, F)`` where ``f`` is the value of a merit function and ``F`` the residual. x_k : ndarray Initial position d : ndarray Search direction prev_fs : float List of previous merit function values. Should have ``len(prev_fs) <= M`` where ``M`` is the nonmonotonicity window parameter. eta : float Allowed merit function increase, see [1]_ gamma, tau_min, tau_max : float, optional Search parameters, see [1]_ Returns ------- alpha : float Step length xp : ndarray Next position fp : float Merit function value at next position Fp : ndarray Residual at next position References ---------- [1] "Spectral residual method without gradient information for solving large-scale nonlinear systems of equations." W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). """ f_k = prev_fs[-1] f_bar = max(prev_fs) alpha_p = 1 alpha_m = 1 alpha = 1 while True: xp = x_k + alpha_p * d fp, Fp = f(xp) if fp <= f_bar + eta - gamma * alpha_p**2 * f_k: alpha = alpha_p break alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) xp = x_k - alpha_m * d fp, Fp = f(xp) if fp <= f_bar + eta - gamma * alpha_m**2 * f_k: alpha = -alpha_m break alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) return alpha, xp, fp, Fp def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta, gamma=1e-4, tau_min=0.1, tau_max=0.5, nu=0.85): """ Nonmonotone line search from [1] Parameters ---------- f : callable Function returning a tuple ``(f, F)`` where ``f`` is the value of a merit function and ``F`` the residual. x_k : ndarray Initial position d : ndarray Search direction f_k : float Initial merit function value C, Q : float Control parameters. On the first iteration, give values Q=1.0, C=f_k eta : float Allowed merit function increase, see [1]_ nu, gamma, tau_min, tau_max : float, optional Search parameters, see [1]_ Returns ------- alpha : float Step length xp : ndarray Next position fp : float Merit function value at next position Fp : ndarray Residual at next position C : float New value for the control parameter C Q : float New value for the control parameter Q References ---------- .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line search and its application to the spectral residual method'', IMA J. Numer. Anal. 29, 814 (2009). """ alpha_p = 1 alpha_m = 1 alpha = 1 while True: xp = x_k + alpha_p * d fp, Fp = f(xp) if fp <= C + eta - gamma * alpha_p**2 * f_k: alpha = alpha_p break alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) xp = x_k - alpha_m * d fp, Fp = f(xp) if fp <= C + eta - gamma * alpha_m**2 * f_k: alpha = -alpha_m break alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) # Update C and Q Q_next = nu * Q + 1 C = (nu * Q * (C + eta) + fp) / Q_next Q = Q_next return alpha, xp, fp, Fp, C, Q
26,489
29.102273
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_basinhopping.py
""" basinhopping: The basinhopping global optimization algorithm """ from __future__ import division, print_function, absolute_import import numpy as np import math from numpy import cos, sin import scipy.optimize import collections from scipy._lib._util import check_random_state __all__ = ['basinhopping'] class Storage(object): """ Class used to store the lowest energy structure """ def __init__(self, minres): self._add(minres) def _add(self, minres): self.minres = minres self.minres.x = np.copy(minres.x) def update(self, minres): if minres.fun < self.minres.fun: self._add(minres) return True else: return False def get_lowest(self): return self.minres class BasinHoppingRunner(object): """This class implements the core of the basinhopping algorithm. x0 : ndarray The starting coordinates. minimizer : callable The local minimizer, with signature ``result = minimizer(x)``. The return value is an `optimize.OptimizeResult` object. step_taking : callable This function displaces the coordinates randomly. Signature should be ``x_new = step_taking(x)``. Note that `x` may be modified in-place. accept_tests : list of callables Each test is passed the kwargs `f_new`, `x_new`, `f_old` and `x_old`. These tests will be used to judge whether or not to accept the step. The acceptable return values are True, False, or ``"force accept"``. If any of the tests return False then the step is rejected. If ``"force accept"``, then this will override any other tests in order to accept the step. This can be used, for example, to forcefully escape from a local minimum that ``basinhopping`` is trapped in. disp : bool, optional Display status messages. """ def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False): self.x = np.copy(x0) self.minimizer = minimizer self.step_taking = step_taking self.accept_tests = accept_tests self.disp = disp self.nstep = 0 # initialize return object self.res = scipy.optimize.OptimizeResult() self.res.minimization_failures = 0 # do initial minimization minres = minimizer(self.x) if not minres.success: self.res.minimization_failures += 1 if self.disp: print("warning: basinhopping: local minimization failure") self.x = np.copy(minres.x) self.energy = minres.fun if self.disp: print("basinhopping step %d: f %g" % (self.nstep, self.energy)) # initialize storage class self.storage = Storage(minres) if hasattr(minres, "nfev"): self.res.nfev = minres.nfev if hasattr(minres, "njev"): self.res.njev = minres.njev if hasattr(minres, "nhev"): self.res.nhev = minres.nhev def _monte_carlo_step(self): """Do one Monte Carlo iteration Randomly displace the coordinates, minimize, and decide whether or not to accept the new coordinates. """ # Take a random step. Make a copy of x because the step_taking # algorithm might change x in place x_after_step = np.copy(self.x) x_after_step = self.step_taking(x_after_step) # do a local minimization minres = self.minimizer(x_after_step) x_after_quench = minres.x energy_after_quench = minres.fun if not minres.success: self.res.minimization_failures += 1 if self.disp: print("warning: basinhopping: local minimization failure") if hasattr(minres, "nfev"): self.res.nfev += minres.nfev if hasattr(minres, "njev"): self.res.njev += minres.njev if hasattr(minres, "nhev"): self.res.nhev += minres.nhev # accept the move based on self.accept_tests. If any test is False, # then reject the step. If any test returns the special string # 'force accept', then accept the step regardless. This can be used # to forcefully escape from a local minimum if normal basin hopping # steps are not sufficient. accept = True for test in self.accept_tests: testres = test(f_new=energy_after_quench, x_new=x_after_quench, f_old=self.energy, x_old=self.x) if testres == 'force accept': accept = True break elif testres is None: raise ValueError("accept_tests must return True, False, or " "'force accept'") elif not testres: accept = False # Report the result of the acceptance test to the take step class. # This is for adaptive step taking if hasattr(self.step_taking, "report"): self.step_taking.report(accept, f_new=energy_after_quench, x_new=x_after_quench, f_old=self.energy, x_old=self.x) return accept, minres def one_cycle(self): """Do one cycle of the basinhopping algorithm """ self.nstep += 1 new_global_min = False accept, minres = self._monte_carlo_step() if accept: self.energy = minres.fun self.x = np.copy(minres.x) new_global_min = self.storage.update(minres) # print some information if self.disp: self.print_report(minres.fun, accept) if new_global_min: print("found new global minimum on step %d with function" " value %g" % (self.nstep, self.energy)) # save some variables as BasinHoppingRunner attributes self.xtrial = minres.x self.energy_trial = minres.fun self.accept = accept return new_global_min def print_report(self, energy_trial, accept): """print a status update""" minres = self.storage.get_lowest() print("basinhopping step %d: f %g trial_f %g accepted %d " " lowest_f %g" % (self.nstep, self.energy, energy_trial, accept, minres.fun)) class AdaptiveStepsize(object): """ Class to implement adaptive stepsize. This class wraps the step taking class and modifies the stepsize to ensure the true acceptance rate is as close as possible to the target. Parameters ---------- takestep : callable The step taking routine. Must contain modifiable attribute takestep.stepsize accept_rate : float, optional The target step acceptance rate interval : int, optional Interval for how often to update the stepsize factor : float, optional The step size is multiplied or divided by this factor upon each update. verbose : bool, optional Print information about each update """ def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9, verbose=True): self.takestep = takestep self.target_accept_rate = accept_rate self.interval = interval self.factor = factor self.verbose = verbose self.nstep = 0 self.nstep_tot = 0 self.naccept = 0 def __call__(self, x): return self.take_step(x) def _adjust_step_size(self): old_stepsize = self.takestep.stepsize accept_rate = float(self.naccept) / self.nstep if accept_rate > self.target_accept_rate: # We're accepting too many steps. This generally means we're # trapped in a basin. Take bigger steps self.takestep.stepsize /= self.factor else: # We're not accepting enough steps. Take smaller steps self.takestep.stepsize *= self.factor if self.verbose: print("adaptive stepsize: acceptance rate %f target %f new " "stepsize %g old stepsize %g" % (accept_rate, self.target_accept_rate, self.takestep.stepsize, old_stepsize)) def take_step(self, x): self.nstep += 1 self.nstep_tot += 1 if self.nstep % self.interval == 0: self._adjust_step_size() return self.takestep(x) def report(self, accept, **kwargs): "called by basinhopping to report the result of the step" if accept: self.naccept += 1 class RandomDisplacement(object): """ Add a random displacement of maximum size `stepsize` to each coordinate Calling this updates `x` in-place. Parameters ---------- stepsize : float, optional Maximum stepsize in any dimension random_state : None or `np.random.RandomState` instance, optional The random number generator that generates the displacements """ def __init__(self, stepsize=0.5, random_state=None): self.stepsize = stepsize self.random_state = check_random_state(random_state) def __call__(self, x): x += self.random_state.uniform(-self.stepsize, self.stepsize, np.shape(x)) return x class MinimizerWrapper(object): """ wrap a minimizer function as a minimizer class """ def __init__(self, minimizer, func=None, **kwargs): self.minimizer = minimizer self.func = func self.kwargs = kwargs def __call__(self, x0): if self.func is None: return self.minimizer(x0, **self.kwargs) else: return self.minimizer(self.func, x0, **self.kwargs) class Metropolis(object): """ Metropolis acceptance criterion Parameters ---------- T : float The "temperature" parameter for the accept or reject criterion. random_state : None or `np.random.RandomState` object Random number generator used for acceptance test """ def __init__(self, T, random_state=None): # Avoid ZeroDivisionError since "MBH can be regarded as a special case # of the BH framework with the Metropolis criterion, where temperature # T = 0." (Reject all steps that increase energy.) self.beta = 1.0 / T if T != 0 else float('inf') self.random_state = check_random_state(random_state) def accept_reject(self, energy_new, energy_old): """ If new energy is lower than old, it will always be accepted. If new is higher than old, there is a chance it will be accepted, less likely for larger differences. """ w = math.exp(min(0, -float(energy_new - energy_old) * self.beta)) rand = self.random_state.rand() return w >= rand def __call__(self, **kwargs): """ f_new and f_old are mandatory in kwargs """ return bool(self.accept_reject(kwargs["f_new"], kwargs["f_old"])) def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, minimizer_kwargs=None, take_step=None, accept_test=None, callback=None, interval=50, disp=False, niter_success=None, seed=None): """ Find the global minimum of a function using the basin-hopping algorithm Basin-hopping is a two-phase method that combines a global stepping algorithm with local minimization at each step. Designed to mimic the natural process of energy minimization of clusters of atoms, it works well for similar problems with "funnel-like, but rugged" energy landscapes [5]_. As the step-taking, step acceptance, and minimization methods are all customizable, this function can also be used to implement other two-phase methods. Parameters ---------- func : callable ``f(x, *args)`` Function to be optimized. ``args`` can be passed as an optional item in the dict ``minimizer_kwargs`` x0 : array_like Initial guess. niter : integer, optional The number of basin-hopping iterations T : float, optional The "temperature" parameter for the accept or reject criterion. Higher "temperatures" mean that larger jumps in function value will be accepted. For best results ``T`` should be comparable to the separation (in function value) between local minima. stepsize : float, optional Maximum step size for use in the random displacement. minimizer_kwargs : dict, optional Extra keyword arguments to be passed to the local minimizer ``scipy.optimize.minimize()`` Some important options could be: method : str The minimization method (e.g. ``"L-BFGS-B"``) args : tuple Extra arguments passed to the objective function (``func``) and its derivatives (Jacobian, Hessian). take_step : callable ``take_step(x)``, optional Replace the default step-taking routine with this routine. The default step-taking routine is a random displacement of the coordinates, but other step-taking algorithms may be better for some systems. ``take_step`` can optionally have the attribute ``take_step.stepsize``. If this attribute exists, then ``basinhopping`` will adjust ``take_step.stepsize`` in order to try to optimize the global minimum search. accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional Define a test which will be used to judge whether or not to accept the step. This will be used in addition to the Metropolis test based on "temperature" ``T``. The acceptable return values are True, False, or ``"force accept"``. If any of the tests return False then the step is rejected. If the latter, then this will override any other tests in order to accept the step. This can be used, for example, to forcefully escape from a local minimum that ``basinhopping`` is trapped in. callback : callable, ``callback(x, f, accept)``, optional A callback function which will be called for all minima found. ``x`` and ``f`` are the coordinates and function value of the trial minimum, and ``accept`` is whether or not that minimum was accepted. This can be used, for example, to save the lowest N minima found. Also, ``callback`` can be used to specify a user defined stop criterion by optionally returning True to stop the ``basinhopping`` routine. interval : integer, optional interval for how often to update the ``stepsize`` disp : bool, optional Set to True to print status messages niter_success : integer, optional Stop the run if the global minimum candidate remains the same for this number of iterations. seed : int or `np.random.RandomState`, optional If `seed` is not specified the `np.RandomState` singleton is used. If `seed` is an int, a new `np.random.RandomState` instance is used, seeded with seed. If `seed` is already a `np.random.RandomState instance`, then that `np.random.RandomState` instance is used. Specify `seed` for repeatable minimizations. The random numbers generated with this seed only affect the default Metropolis `accept_test` and the default `take_step`. If you supply your own `take_step` and `accept_test`, and these functions use random number generation, then those functions are responsible for the state of their random number generator. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``fun`` the value of the function at the solution, and ``message`` which describes the cause of the termination. The ``OptimizeResult`` object returned by the selected minimizer at the lowest minimum is also contained within this object and can be accessed through the ``lowest_optimization_result`` attribute. See `OptimizeResult` for a description of other attributes. See Also -------- minimize : The local minimization function called once for each basinhopping step. ``minimizer_kwargs`` is passed to this routine. Notes ----- Basin-hopping is a stochastic algorithm which attempts to find the global minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_ [4]_. The algorithm in its current form was described by David Wales and Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/. The algorithm is iterative with each cycle composed of the following features 1) random perturbation of the coordinates 2) local minimization 3) accept or reject the new coordinates based on the minimized function value The acceptance test used here is the Metropolis criterion of standard Monte Carlo algorithms, although there are many other possibilities [3]_. This global minimization method has been shown to be extremely efficient for a wide variety of problems in physics and chemistry. It is particularly useful when the function has many minima separated by large barriers. See the Cambridge Cluster Database http://www-wales.ch.cam.ac.uk/CCD.html for databases of molecular systems that have been optimized primarily using basin-hopping. This database includes minimization problems exceeding 300 degrees of freedom. See the free software program GMIN (http://www-wales.ch.cam.ac.uk/GMIN) for a Fortran implementation of basin-hopping. This implementation has many different variations of the procedure described above, including more advanced step taking algorithms and alternate acceptance criterion. For stochastic global optimization there is no way to determine if the true global minimum has actually been found. Instead, as a consistency check, the algorithm can be run from a number of different random starting points to ensure the lowest minimum found in each example has converged to the global minimum. For this reason ``basinhopping`` will by default simply run for the number of iterations ``niter`` and return the lowest minimum found. It is left to the user to ensure that this is in fact the global minimum. Choosing ``stepsize``: This is a crucial parameter in ``basinhopping`` and depends on the problem being solved. The step is chosen uniformly in the region from x0-stepsize to x0+stepsize, in each dimension. Ideally it should be comparable to the typical separation (in argument values) between local minima of the function being optimized. ``basinhopping`` will, by default, adjust ``stepsize`` to find an optimal value, but this may take many iterations. You will get quicker results if you set a sensible initial value for ``stepsize``. Choosing ``T``: The parameter ``T`` is the "temperature" used in the Metropolis criterion. Basinhopping steps are always accepted if ``func(xnew) < func(xold)``. Otherwise, they are accepted with probability:: exp( -(func(xnew) - func(xold)) / T ) So, for best results, ``T`` should to be comparable to the typical difference (in function values) between local minima. (The height of "walls" between local minima is irrelevant.) If ``T`` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all steps that increase energy are rejected. .. versionadded:: 0.12.0 References ---------- .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press, Cambridge, UK. .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and the Lowest Energy Structures of Lennard-Jones Clusters Containing up to 110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111. .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA, 1987, 84, 6611. .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters, crystals, and biomolecules, Science, 1999, 285, 1368. .. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as a General and Versatile Optimization Framework for the Characterization of Biological Macromolecules, Advances in Artificial Intelligence, Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832` Examples -------- The following example is a one-dimensional minimization problem, with many local minima superimposed on a parabola. >>> from scipy.optimize import basinhopping >>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x >>> x0=[1.] Basinhopping, internally, uses a local minimization algorithm. We will use the parameter ``minimizer_kwargs`` to tell basinhopping which algorithm to use and how to set up that minimizer. This parameter will be passed to ``scipy.optimize.minimize()``. >>> minimizer_kwargs = {"method": "BFGS"} >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, ... niter=200) >>> print("global minimum: x = %.4f, f(x0) = %.4f" % (ret.x, ret.fun)) global minimum: x = -0.1951, f(x0) = -1.0009 Next consider a two-dimensional minimization problem. Also, this time we will use gradient information to significantly speed up the search. >>> def func2d(x): ... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + ... 0.2) * x[0] ... df = np.zeros(2) ... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 ... df[1] = 2. * x[1] + 0.2 ... return f, df We'll also use a different local minimization algorithm. Also we must tell the minimizer that our function returns both energy and gradient (jacobian) >>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True} >>> x0 = [1.0, 1.0] >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, ... niter=200) >>> print("global minimum: x = [%.4f, %.4f], f(x0) = %.4f" % (ret.x[0], ... ret.x[1], ... ret.fun)) global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109 Here is an example using a custom step-taking routine. Imagine you want the first coordinate to take larger steps than the rest of the coordinates. This can be implemented like so: >>> class MyTakeStep(object): ... def __init__(self, stepsize=0.5): ... self.stepsize = stepsize ... def __call__(self, x): ... s = self.stepsize ... x[0] += np.random.uniform(-2.*s, 2.*s) ... x[1:] += np.random.uniform(-s, s, x[1:].shape) ... return x Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude of ``stepsize`` to optimize the search. We'll use the same 2-D function as before >>> mytakestep = MyTakeStep() >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, ... niter=200, take_step=mytakestep) >>> print("global minimum: x = [%.4f, %.4f], f(x0) = %.4f" % (ret.x[0], ... ret.x[1], ... ret.fun)) global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109 Now let's do an example using a custom callback function which prints the value of every minimum found >>> def print_fun(x, f, accepted): ... print("at minimum %.4f accepted %d" % (f, int(accepted))) We'll run it for only 10 basinhopping steps this time. >>> np.random.seed(1) >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, ... niter=10, callback=print_fun) at minimum 0.4159 accepted 1 at minimum -0.9073 accepted 1 at minimum -0.1021 accepted 1 at minimum -0.1021 accepted 1 at minimum 0.9102 accepted 1 at minimum 0.9102 accepted 1 at minimum 2.2945 accepted 0 at minimum -0.1021 accepted 1 at minimum -1.0109 accepted 1 at minimum -1.0109 accepted 1 The minimum at -1.0109 is actually the global minimum, found already on the 8th iteration. Now let's implement bounds on the problem using a custom ``accept_test``: >>> class MyBounds(object): ... def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ): ... self.xmax = np.array(xmax) ... self.xmin = np.array(xmin) ... def __call__(self, **kwargs): ... x = kwargs["x_new"] ... tmax = bool(np.all(x <= self.xmax)) ... tmin = bool(np.all(x >= self.xmin)) ... return tmax and tmin >>> mybounds = MyBounds() >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, ... niter=10, accept_test=mybounds) """ x0 = np.array(x0) # set up the np.random.RandomState generator rng = check_random_state(seed) # set up minimizer if minimizer_kwargs is None: minimizer_kwargs = dict() wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func, **minimizer_kwargs) # set up step-taking algorithm if take_step is not None: if not isinstance(take_step, collections.Callable): raise TypeError("take_step must be callable") # if take_step.stepsize exists then use AdaptiveStepsize to control # take_step.stepsize if hasattr(take_step, "stepsize"): take_step_wrapped = AdaptiveStepsize(take_step, interval=interval, verbose=disp) else: take_step_wrapped = take_step else: # use default displace = RandomDisplacement(stepsize=stepsize, random_state=rng) take_step_wrapped = AdaptiveStepsize(displace, interval=interval, verbose=disp) # set up accept tests if accept_test is not None: if not isinstance(accept_test, collections.Callable): raise TypeError("accept_test must be callable") accept_tests = [accept_test] else: accept_tests = [] # use default metropolis = Metropolis(T, random_state=rng) accept_tests.append(metropolis) if niter_success is None: niter_success = niter + 2 bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped, accept_tests, disp=disp) # start main iteration loop count, i = 0, 0 message = ["requested number of basinhopping iterations completed" " successfully"] for i in range(niter): new_global_min = bh.one_cycle() if isinstance(callback, collections.Callable): # should we pass a copy of x? val = callback(bh.xtrial, bh.energy_trial, bh.accept) if val is not None: if val: message = ["callback function requested stop early by" "returning True"] break count += 1 if new_global_min: count = 0 elif count > niter_success: message = ["success condition satisfied"] break # prepare return object res = bh.res res.lowest_optimization_result = bh.storage.get_lowest() res.x = np.copy(res.lowest_optimization_result.x) res.fun = res.lowest_optimization_result.fun res.message = message res.nit = i + 1 return res def _test_func2d_nograd(x): f = (cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] + 1.010876184442655) return f def _test_func2d(x): f = (cos(14.5 * x[0] - 0.3) + (x[0] + 0.2) * x[0] + cos(14.5 * x[1] - 0.3) + (x[1] + 0.2) * x[1] + x[0] * x[1] + 1.963879482144252) df = np.zeros(2) df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + x[1] df[1] = -14.5 * sin(14.5 * x[1] - 0.3) + 2. * x[1] + 0.2 + x[0] return f, df if __name__ == "__main__": print("\n\nminimize a 2d function without gradient") # minimum expected at ~[-0.195, -0.1] kwargs = {"method": "L-BFGS-B"} x0 = np.array([1.0, 1.]) scipy.optimize.minimize(_test_func2d_nograd, x0, **kwargs) ret = basinhopping(_test_func2d_nograd, x0, minimizer_kwargs=kwargs, niter=200, disp=False) print("minimum expected at func([-0.195, -0.1]) = 0.0") print(ret) print("\n\ntry a harder 2d problem") kwargs = {"method": "L-BFGS-B", "jac": True} x0 = np.array([1.0, 1.0]) ret = basinhopping(_test_func2d, x0, minimizer_kwargs=kwargs, niter=200, disp=False) print("minimum expected at ~, func([-0.19415263, -0.19415263]) = 0") print(ret)
29,477
38.943089
104
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_krylov.py
from ._trustregion import (_minimize_trust_region) from ._trlib import (get_trlib_quadratic_subproblem) __all__ = ['_minimize_trust_krylov'] def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None, inexact=True, **trust_region_options): """ Minimization of a scalar function of one or more variables using a nearly exact trust-region algorithm that only requires matrix vector products with the hessian matrix. Options ------- inexact : bool, optional Accuracy to solve subproblems. If True requires less nonlinear iterations, but more vector products. .. versionadded:: 1.0.0 """ if jac is None: raise ValueError('Jacobian is required for trust region ', 'exact minimization.') if hess is None and hessp is None: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is required for Krylov trust-region minimization') # tol_rel specifies the termination tolerance relative to the initial # gradient norm in the krylov subspace iteration. # - tol_rel_i specifies the tolerance for interior convergence. # - tol_rel_b specifies the tolerance for boundary convergence. # in nonlinear programming applications it is not necessary to solve # the boundary case as exact as the interior case. # - setting tol_rel_i=-2 leads to a forcing sequence in the krylov # subspace iteration leading to quadratic convergence if eventually # the trust region stays inactive. # - setting tol_rel_b=-3 leads to a forcing sequence in the krylov # subspace iteration leading to superlinear convergence as long # as the iterates hit the trust region boundary. # For details consult the documentation of trlib_krylov_min # in _trlib/trlib_krylov.h # # Optimality of this choice of parameters among a range of possibilities # has been tested on the unconstrained subset of the CUTEst library. if inexact: return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=get_trlib_quadratic_subproblem( tol_rel_i=-2.0, tol_rel_b=-3.0, disp=trust_region_options.get('disp', False) ), **trust_region_options) else: return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=get_trlib_quadratic_subproblem( tol_rel_i=1e-8, tol_rel_b=1e-6, disp=trust_region_options.get('disp', False) ), **trust_region_options)
3,030
44.924242
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/slsqp.py
""" This module implements the Sequential Least SQuares Programming optimization algorithm (SLSQP), originally developed by Dieter Kraft. See http://www.netlib.org/toms/733 Functions --------- .. autosummary:: :toctree: generated/ approx_jacobian fmin_slsqp """ from __future__ import division, print_function, absolute_import __all__ = ['approx_jacobian', 'fmin_slsqp'] import numpy as np from scipy.optimize._slsqp import slsqp from numpy import (zeros, array, linalg, append, asfarray, concatenate, finfo, sqrt, vstack, exp, inf, isfinite, atleast_1d) from .optimize import wrap_function, OptimizeResult, _check_unknown_options __docformat__ = "restructuredtext en" _epsilon = sqrt(finfo(float).eps) def approx_jacobian(x, func, epsilon, *args): """ Approximate the Jacobian matrix of a callable function. Parameters ---------- x : array_like The state vector at which to compute the Jacobian matrix. func : callable f(x,*args) The vector-valued function. epsilon : float The perturbation used to determine the partial derivatives. args : sequence Additional arguments passed to func. Returns ------- An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length of the outputs of `func`, and ``lenx`` is the number of elements in `x`. Notes ----- The approximation is done using forward differences. """ x0 = asfarray(x) f0 = atleast_1d(func(*((x0,)+args))) jac = zeros([len(x0), len(f0)]) dx = zeros(len(x0)) for i in range(len(x0)): dx[i] = epsilon jac[i] = (func(*((x0+dx,)+args)) - f0)/epsilon dx[i] = 0.0 return jac.transpose() def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None, bounds=(), fprime=None, fprime_eqcons=None, fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6, iprint=1, disp=None, full_output=0, epsilon=_epsilon, callback=None): """ Minimize a function using Sequential Least SQuares Programming Python interface function for the SLSQP Optimization subroutine originally implemented by Dieter Kraft. Parameters ---------- func : callable f(x,*args) Objective function. Must return a scalar. x0 : 1-D ndarray of float Initial guess for the independent variable(s). eqcons : list, optional A list of functions of length n such that eqcons[j](x,*args) == 0.0 in a successfully optimized problem. f_eqcons : callable f(x,*args), optional Returns a 1-D array in which each element must equal 0.0 in a successfully optimized problem. If f_eqcons is specified, eqcons is ignored. ieqcons : list, optional A list of functions of length n such that ieqcons[j](x,*args) >= 0.0 in a successfully optimized problem. f_ieqcons : callable f(x,*args), optional Returns a 1-D ndarray in which each element must be greater or equal to 0.0 in a successfully optimized problem. If f_ieqcons is specified, ieqcons is ignored. bounds : list, optional A list of tuples specifying the lower and upper bound for each independent variable [(xl0, xu0),(xl1, xu1),...] Infinite values will be interpreted as large floating values. fprime : callable `f(x,*args)`, optional A function that evaluates the partial derivatives of func. fprime_eqcons : callable `f(x,*args)`, optional A function of the form `f(x, *args)` that returns the m by n array of equality constraint normals. If not provided, the normals will be approximated. The array returned by fprime_eqcons should be sized as ( len(eqcons), len(x0) ). fprime_ieqcons : callable `f(x,*args)`, optional A function of the form `f(x, *args)` that returns the m by n array of inequality constraint normals. If not provided, the normals will be approximated. The array returned by fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ). args : sequence, optional Additional arguments passed to func and fprime. iter : int, optional The maximum number of iterations. acc : float, optional Requested accuracy. iprint : int, optional The verbosity of fmin_slsqp : * iprint <= 0 : Silent operation * iprint == 1 : Print summary upon completion (default) * iprint >= 2 : Print status of each iterate and summary disp : int, optional Over-rides the iprint interface (preferred). full_output : bool, optional If False, return only the minimizer of func (default). Otherwise, output final objective function and summary information. epsilon : float, optional The step size for finite-difference derivative estimates. callback : callable, optional Called after each iteration, as ``callback(x)``, where ``x`` is the current parameter vector. Returns ------- out : ndarray of float The final minimizer of func. fx : ndarray of float, if full_output is true The final value of the objective function. its : int, if full_output is true The number of iterations. imode : int, if full_output is true The exit mode from the optimizer (see below). smode : string, if full_output is true Message describing the exit mode from the optimizer. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'SLSQP' `method` in particular. Notes ----- Exit modes are defined as follows :: -1 : Gradient evaluation required (g & a) 0 : Optimization terminated successfully. 1 : Function evaluation required (f & c) 2 : More equality constraints than independent variables 3 : More than 3*n iterations in LSQ subproblem 4 : Inequality constraints incompatible 5 : Singular matrix E in LSQ subproblem 6 : Singular matrix C in LSQ subproblem 7 : Rank-deficient equality constraint subproblem HFTI 8 : Positive directional derivative for linesearch 9 : Iteration limit exceeded Examples -------- Examples are given :ref:`in the tutorial <tutorial-sqlsp>`. """ if disp is not None: iprint = disp opts = {'maxiter': iter, 'ftol': acc, 'iprint': iprint, 'disp': iprint != 0, 'eps': epsilon, 'callback': callback} # Build the constraints as a tuple of dictionaries cons = () # 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take # the same extra arguments as the objective function. cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons) cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons) # 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian # (fprime_eqcons, fprime_ieqcons); also take the same extra arguments # as the objective function. if f_eqcons: cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons, 'args': args}, ) if f_ieqcons: cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons, 'args': args}, ) res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds, constraints=cons, **opts) if full_output: return res['x'], res['fun'], res['nit'], res['status'], res['message'] else: return res['x'] def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None, constraints=(), maxiter=100, ftol=1.0E-6, iprint=1, disp=False, eps=_epsilon, callback=None, **unknown_options): """ Minimize a scalar function of one or more variables using Sequential Least SQuares Programming (SLSQP). Options ------- ftol : float Precision goal for the value of f in the stopping criterion. eps : float Step size used for numerical approximation of the Jacobian. disp : bool Set to True to print convergence messages. If False, `verbosity` is ignored and set to 0. maxiter : int Maximum number of iterations. """ _check_unknown_options(unknown_options) fprime = jac iter = maxiter acc = ftol epsilon = eps if not disp: iprint = 0 # Constraints are triaged per type into a dictionary of tuples if isinstance(constraints, dict): constraints = (constraints, ) cons = {'eq': (), 'ineq': ()} for ic, con in enumerate(constraints): # check type try: ctype = con['type'].lower() except KeyError: raise KeyError('Constraint %d has no type defined.' % ic) except TypeError: raise TypeError('Constraints must be defined using a ' 'dictionary.') except AttributeError: raise TypeError("Constraint's type must be a string.") else: if ctype not in ['eq', 'ineq']: raise ValueError("Unknown constraint type '%s'." % con['type']) # check function if 'fun' not in con: raise ValueError('Constraint %d has no function defined.' % ic) # check Jacobian cjac = con.get('jac') if cjac is None: # approximate Jacobian function. The factory function is needed # to keep a reference to `fun`, see gh-4240. def cjac_factory(fun): def cjac(x, *args): return approx_jacobian(x, fun, epsilon, *args) return cjac cjac = cjac_factory(con['fun']) # update constraints' dictionary cons[ctype] += ({'fun': con['fun'], 'jac': cjac, 'args': con.get('args', ())}, ) exit_modes = {-1: "Gradient evaluation required (g & a)", 0: "Optimization terminated successfully.", 1: "Function evaluation required (f & c)", 2: "More equality constraints than independent variables", 3: "More than 3*n iterations in LSQ subproblem", 4: "Inequality constraints incompatible", 5: "Singular matrix E in LSQ subproblem", 6: "Singular matrix C in LSQ subproblem", 7: "Rank-deficient equality constraint subproblem HFTI", 8: "Positive directional derivative for linesearch", 9: "Iteration limit exceeded"} # Wrap func feval, func = wrap_function(func, args) # Wrap fprime, if provided, or approx_jacobian if not if fprime: geval, fprime = wrap_function(fprime, args) else: geval, fprime = wrap_function(approx_jacobian, (func, epsilon)) # Transform x0 into an array. x = asfarray(x0).flatten() # Set the parameters that SLSQP will need # meq, mieq: number of equality and inequality constraints meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) for c in cons['eq']])) mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) for c in cons['ineq']])) # m = The total number of constraints m = meq + mieq # la = The number of constraints, or 1 if there are no constraints la = array([1, m]).max() # n = The number of independent variables n = len(x) # Define the workspaces for SLSQP n1 = n + 1 mineq = m - meq + n1 + n1 len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \ + 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1 len_jw = mineq w = zeros(len_w) jw = zeros(len_jw) # Decompose bounds into xl and xu if bounds is None or len(bounds) == 0: xl = np.empty(n, dtype=float) xu = np.empty(n, dtype=float) xl.fill(np.nan) xu.fill(np.nan) else: bnds = array(bounds, float) if bnds.shape[0] != n: raise IndexError('SLSQP Error: the length of bounds is not ' 'compatible with that of x0.') with np.errstate(invalid='ignore'): bnderr = bnds[:, 0] > bnds[:, 1] if bnderr.any(): raise ValueError('SLSQP Error: lb > ub in bounds %s.' % ', '.join(str(b) for b in bnderr)) xl, xu = bnds[:, 0], bnds[:, 1] # Mark infinite bounds with nans; the Fortran code understands this infbnd = ~isfinite(bnds) xl[infbnd[:, 0]] = np.nan xu[infbnd[:, 1]] = np.nan # Clip initial guess to bounds (SLSQP may fail with bounds-infeasible # initial point) have_bound = np.isfinite(xl) x[have_bound] = np.clip(x[have_bound], xl[have_bound], np.inf) have_bound = np.isfinite(xu) x[have_bound] = np.clip(x[have_bound], -np.inf, xu[have_bound]) # Initialize the iteration counter and the mode value mode = array(0, int) acc = array(acc, float) majiter = array(iter, int) majiter_prev = 0 # Print the header if iprint >= 2 if iprint >= 2: print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM")) while 1: if mode == 0 or mode == 1: # objective and constraint evaluation required # Compute objective function fx = func(x) try: fx = float(np.asarray(fx)) except (TypeError, ValueError): raise ValueError("Objective function must return a scalar") # Compute the constraints if cons['eq']: c_eq = concatenate([atleast_1d(con['fun'](x, *con['args'])) for con in cons['eq']]) else: c_eq = zeros(0) if cons['ineq']: c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args'])) for con in cons['ineq']]) else: c_ieq = zeros(0) # Now combine c_eq and c_ieq into a single matrix c = concatenate((c_eq, c_ieq)) if mode == 0 or mode == -1: # gradient evaluation required # Compute the derivatives of the objective function # For some reason SLSQP wants g dimensioned to n+1 g = append(fprime(x), 0.0) # Compute the normals of the constraints if cons['eq']: a_eq = vstack([con['jac'](x, *con['args']) for con in cons['eq']]) else: # no equality constraint a_eq = zeros((meq, n)) if cons['ineq']: a_ieq = vstack([con['jac'](x, *con['args']) for con in cons['ineq']]) else: # no inequality constraint a_ieq = zeros((mieq, n)) # Now combine a_eq and a_ieq into a single a matrix if m == 0: # no constraints a = zeros((la, n)) else: a = vstack((a_eq, a_ieq)) a = concatenate((a, zeros([la, 1])), 1) # Call SLSQP slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw) # call callback if major iteration has incremented if callback is not None and majiter > majiter_prev: callback(np.copy(x)) # Print the status of the current iterate if iprint > 2 and the # major iteration has incremented if iprint >= 2 and majiter > majiter_prev: print("%5i %5i % 16.6E % 16.6E" % (majiter, feval[0], fx, linalg.norm(g))) # If exit mode is not -1 or 1, slsqp has completed if abs(mode) != 1: break majiter_prev = int(majiter) # Optimization loop complete. Print status if requested if iprint >= 1: print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')') print(" Current function value:", fx) print(" Iterations:", majiter) print(" Function evaluations:", feval[0]) print(" Gradient evaluations:", geval[0]) return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter), nfev=feval[0], njev=geval[0], status=int(mode), message=exit_modes[int(mode)], success=(mode == 0)) if __name__ == '__main__': # objective function def fun(x, r=[4, 2, 4, 2, 1]): """ Objective function """ return exp(x[0]) * (r[0] * x[0]**2 + r[1] * x[1]**2 + r[2] * x[0] * x[1] + r[3] * x[1] + r[4]) # bounds bnds = array([[-inf]*2, [inf]*2]).T bnds[:, 0] = [0.1, 0.2] # constraints def feqcon(x, b=1): """ Equality constraint """ return array([x[0]**2 + x[1] - b]) def jeqcon(x, b=1): """ Jacobian of equality constraint """ return array([[2*x[0], 1]]) def fieqcon(x, c=10): """ Inequality constraint """ return array([x[0] * x[1] + c]) def jieqcon(x, c=10): """ Jacobian of Inequality constraint """ return array([[1, 1]]) # constraints dictionaries cons = ({'type': 'eq', 'fun': feqcon, 'jac': jeqcon, 'args': (1, )}, {'type': 'ineq', 'fun': fieqcon, 'jac': jieqcon, 'args': (10,)}) # Bounds constraint problem print(' Bounds constraints '.center(72, '-')) print(' * fmin_slsqp') x, f = fmin_slsqp(fun, array([-1, 1]), bounds=bnds, disp=1, full_output=True)[:2] print(' * _minimize_slsqp') res = _minimize_slsqp(fun, array([-1, 1]), bounds=bnds, **{'disp': True}) # Equality and inequality constraints problem print(' Equality and inequality constraints '.center(72, '-')) print(' * fmin_slsqp') x, f = fmin_slsqp(fun, array([-1, 1]), f_eqcons=feqcon, fprime_eqcons=jeqcon, f_ieqcons=fieqcon, fprime_ieqcons=jieqcon, disp=1, full_output=True)[:2] print(' * _minimize_slsqp') res = _minimize_slsqp(fun, array([-1, 1]), constraints=cons, **{'disp': True})
18,568
35.409804
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_spectral.py
""" Spectral Algorithm for Nonlinear Equations """ from __future__ import division, absolute_import, print_function import collections import numpy as np from scipy.optimize import OptimizeResult from scipy.optimize.optimize import _check_unknown_options from .linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng class _NoConvergence(Exception): pass def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000, fnorm=None, callback=None, disp=False, M=10, eta_strategy=None, sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options): r""" Solve nonlinear equation with the DF-SANE method Options ------- ftol : float, optional Relative norm tolerance. fatol : float, optional Absolute norm tolerance. Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``. fnorm : callable, optional Norm to use in the convergence check. If None, 2-norm is used. maxfev : int, optional Maximum number of function evaluations. disp : bool, optional Whether to print convergence process to stdout. eta_strategy : callable, optional Choice of the ``eta_k`` parameter, which gives slack for growth of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with `k` the iteration number, `x` the current iterate and `F` the current residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``. Default: ``||F||**2 / (1 + k)**2``. sigma_eps : float, optional The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``. Default: 1e-10 sigma_0 : float, optional Initial spectral coefficient. Default: 1.0 M : int, optional Number of iterates to include in the nonmonotonic line search. Default: 10 line_search : {'cruz', 'cheng'} Type of line search to employ. 'cruz' is the original one defined in [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)]. Default: 'cruz' References ---------- .. [1] "Spectral residual method without gradient information for solving large-scale nonlinear systems of equations." W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014). .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009). """ _check_unknown_options(unknown_options) if line_search not in ('cheng', 'cruz'): raise ValueError("Invalid value %r for 'line_search'" % (line_search,)) nexp = 2 if eta_strategy is None: # Different choice from [1], as their eta is not invariant # vs. scaling of F. def eta_strategy(k, x, F): # Obtain squared 2-norm of the initial residual from the outer scope return f_0 / (1 + k)**2 if fnorm is None: def fnorm(F): # Obtain squared 2-norm of the current residual from the outer scope return f_k**(1.0/nexp) def fmerit(F): return np.linalg.norm(F)**nexp nfev = [0] f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, nfev, maxfev, args) k = 0 f_0 = f_k sigma_k = sigma_0 F_0_norm = fnorm(F_k) # For the 'cruz' line search prev_fs = collections.deque([f_k], M) # For the 'cheng' line search Q = 1.0 C = f_0 converged = False message = "too many function evaluations required" while True: F_k_norm = fnorm(F_k) if disp: print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k)) if callback is not None: callback(x_k, F_k) if F_k_norm < ftol * F_0_norm + fatol: # Converged! message = "successful convergence" converged = True break # Control spectral parameter, from [2] if abs(sigma_k) > 1/sigma_eps: sigma_k = 1/sigma_eps * np.sign(sigma_k) elif abs(sigma_k) < sigma_eps: sigma_k = sigma_eps # Line search direction d = -sigma_k * F_k # Nonmonotone line search eta = eta_strategy(k, x_k, F_k) try: if line_search == 'cruz': alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta=eta) elif line_search == 'cheng': alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta=eta) except _NoConvergence: break # Update spectral parameter s_k = xp - x_k y_k = Fp - F_k sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k) # Take step x_k = xp F_k = Fp f_k = fp # Store function value if line_search == 'cruz': prev_fs.append(fp) k += 1 x = _wrap_result(x_k, is_complex, shape=x_shape) F = _wrap_result(F_k, is_complex) result = OptimizeResult(x=x, success=converged, message=message, fun=F, nfev=nfev[0], nit=k) return result def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()): """ Wrap a function and an initial value so that (i) complex values are wrapped to reals, and (ii) value for a merit function fmerit(x, f) is computed at the same time, (iii) iteration count is maintained and an exception is raised if it is exceeded. Parameters ---------- func : callable Function to wrap x0 : ndarray Initial value fmerit : callable Merit function fmerit(f) for computing merit value from residual. nfev_list : list List to store number of evaluations in. Should be [0] in the beginning. maxfev : int Maximum number of evaluations before _NoConvergence is raised. args : tuple Extra arguments to func Returns ------- wrap_func : callable Wrapped function, to be called as ``F, fp = wrap_func(x0)`` x0_wrap : ndarray of float Wrapped initial value; raveled to 1D and complex values mapped to reals. x0_shape : tuple Shape of the initial value array f : float Merit function at F F : ndarray of float Residual at x0_wrap is_complex : bool Whether complex values were mapped to reals """ x0 = np.asarray(x0) x0_shape = x0.shape F = np.asarray(func(x0, *args)).ravel() is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F) x0 = x0.ravel() nfev_list[0] = 1 if is_complex: def wrap_func(x): if nfev_list[0] >= maxfev: raise _NoConvergence() nfev_list[0] += 1 z = _real2complex(x).reshape(x0_shape) v = np.asarray(func(z, *args)).ravel() F = _complex2real(v) f = fmerit(F) return f, F x0 = _complex2real(x0) F = _complex2real(F) else: def wrap_func(x): if nfev_list[0] >= maxfev: raise _NoConvergence() nfev_list[0] += 1 x = x.reshape(x0_shape) F = np.asarray(func(x, *args)).ravel() f = fmerit(F) return f, F return wrap_func, x0, x0_shape, fmerit(F), F, is_complex def _wrap_result(result, is_complex, shape=None): """ Convert from real to complex and reshape result arrays. """ if is_complex: z = _real2complex(result) else: z = result if shape is not None: z = z.reshape(shape) return z def _real2complex(x): return np.ascontiguousarray(x, dtype=float).view(np.complex128) def _complex2real(z): return np.ascontiguousarray(z, dtype=complex).view(np.float64)
7,986
29.719231
103
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_remove_redundancy.py
""" Routines for removing redundant (linearly dependent) equations from linear programming equality constraints. """ # Author: Matt Haberland from __future__ import division, print_function, absolute_import import numpy as np from scipy.linalg import svd import scipy def _row_count(A): """ Counts the number of nonzeros in each row of input array A. Nonzeros are defined as any element with absolute value greater than tol = 1e-13. This value should probably be an input to the function. Parameters ---------- A : 2-D array An array representing a matrix Returns ------- rowcount : 1-D array Number of nonzeros in each row of A """ tol = 1e-13 return np.array((abs(A) > tol).sum(axis=1)).flatten() def _get_densest(A, eligibleRows): """ Returns the index of the densest row of A. Ignores rows that are not eligible for consideration. Parameters ---------- A : 2-D array An array representing a matrix eligibleRows : 1-D logical array Values indicate whether the corresponding row of A is eligible to be considered Returns ------- i_densest : int Index of the densest row in A eligible for consideration """ rowCounts = _row_count(A) return np.argmax(rowCounts * eligibleRows) def _remove_zero_rows(A, b): """ Eliminates trivial equations from system of equations defined by Ax = b and identifies trivial infeasibilities Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the removal operation 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. """ status = 0 message = "" i_zero = _row_count(A) == 0 A = A[np.logical_not(i_zero), :] if not(np.allclose(b[i_zero], 0)): status = 2 message = "There is a zero row in A_eq with a nonzero corresponding " \ "entry in b_eq. The problem is infeasible." b = b[np.logical_not(i_zero)] return A, b, status, message def bg_update_dense(plu, perm_r, v, j): LU, p = plu u = scipy.linalg.solve_triangular(LU, v[perm_r], lower=True, unit_diagonal=True) LU[:j+1, j] = u[:j+1] l = u[j+1:] piv = LU[j, j] LU[j+1:, j] += (l/piv) return LU, p def _remove_redundancy_dense(A, rhs): """ Eliminates redundant equations from system of equations defined by Ax = b and identifies infeasibilities. Parameters ---------- A : 2-D sparse matrix An matrix representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations Returns ---------- A : 2-D sparse matrix A matrix representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the system 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. References ---------- .. [2] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. """ tolapiv = 1e-8 tolprimal = 1e-8 status = 0 message = "" inconsistent = ("There is a linear combination of rows of A_eq that " "results in zero, suggesting a redundant constraint. " "However the same linear combination of b_eq is " "nonzero, suggesting that the constraints conflict " "and the problem is infeasible.") A, rhs, status, message = _remove_zero_rows(A, rhs) if status != 0: return A, rhs, status, message m, n = A.shape v = list(range(m)) # Artificial column indices. b = list(v) # Basis column indices. # This is better as a list than a set because column order of basis matrix # needs to be consistent. k = set(range(m, m+n)) # Structural column indices. d = [] # Indices of dependent rows lu = None perm_r = None A_orig = A A = np.hstack((np.eye(m), A)) e = np.zeros(m) # Implements basic algorithm from [2] # Uses some of the suggested improvements (removing zero rows and # Bartels-Golub update idea). # Removing column singletons would be easy, but it is not as important # because the procedure is performed only on the equality constraint # matrix from the original problem - not on the canonical form matrix, # which would have many more column singletons due to slack variables # from the inequality constraints. # The thoughts on "crashing" the initial basis sound useful, but the # description of the procedure seems to assume a lot of familiarity with # the subject; it is not very explicit. I already went through enough # trouble getting the basic algorithm working, so I was not interested in # trying to decipher this, too. (Overall, the paper is fraught with # mistakes and ambiguities - which is strange, because the rest of # Andersen's papers are quite good.) B = A[:, b] for i in v: e[i] = 1 if i > 0: e[i-1] = 0 try: # fails for i==0 and any time it gets ill-conditioned j = b[i-1] lu = bg_update_dense(lu, perm_r, A[:, j], i-1) except: lu = scipy.linalg.lu_factor(B) LU, p = lu perm_r = list(range(m)) for i1, i2 in enumerate(p): perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1] pi = scipy.linalg.lu_solve(lu, e, trans=1) # not efficient, but this is not the time sink... js = np.array(list(k-set(b))) batch = 50 dependent = True # This is a tiny bit faster than looping over columns indivually, # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv: for j_index in range(0, len(js), batch): j_indices = js[np.arange(j_index, min(j_index+batch, len(js)))] c = abs(A[:, j_indices].transpose().dot(pi)) if (c > tolapiv).any(): j = js[j_index + np.argmax(c)] # very independent column B[:, i] = A[:, j] b[i] = j dependent = False break if dependent: bibar = pi.T.dot(rhs.reshape(-1, 1)) bnorm = np.linalg.norm(rhs) if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent status = 2 message = inconsistent return A_orig, rhs, status, message else: # dependent d.append(i) keep = set(range(m)) keep = list(keep - set(d)) return A_orig[keep, :], rhs[keep], status, message def _remove_redundancy_sparse(A, rhs): """ Eliminates redundant equations from system of equations defined by Ax = b and identifies infeasibilities. Parameters ---------- A : 2-D sparse matrix An matrix representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D sparse matrix A matrix representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the system 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. References ---------- .. [2] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. """ tolapiv = 1e-8 tolprimal = 1e-8 status = 0 message = "" inconsistent = ("There is a linear combination of rows of A_eq that " "results in zero, suggesting a redundant constraint. " "However the same linear combination of b_eq is " "nonzero, suggesting that the constraints conflict " "and the problem is infeasible.") A, rhs, status, message = _remove_zero_rows(A, rhs) if status != 0: return A, rhs, status, message m, n = A.shape v = list(range(m)) # Artificial column indices. b = list(v) # Basis column indices. # This is better as a list than a set because column order of basis matrix # needs to be consistent. k = set(range(m, m+n)) # Structural column indices. d = [] # Indices of dependent rows A_orig = A A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc() e = np.zeros(m) # Implements basic algorithm from [2] # Uses only one of the suggested improvements (removing zero rows). # Removing column singletons would be easy, but it is not as important # because the procedure is performed only on the equality constraint # matrix from the original problem - not on the canonical form matrix, # which would have many more column singletons due to slack variables # from the inequality constraints. # The thoughts on "crashing" the initial basis sound useful, but the # description of the procedure seems to assume a lot of familiarity with # the subject; it is not very explicit. I already went through enough # trouble getting the basic algorithm working, so I was not interested in # trying to decipher this, too. (Overall, the paper is fraught with # mistakes and ambiguities - which is strange, because the rest of # Andersen's papers are quite good.) # I tried and tried and tried to improve performance using the # Bartels-Golub update. It works, but it's only practical if the LU # factorization can be specialized as described, and that is not possible # until the Scipy SuperLU interface permits control over column # permutation - see issue #7700. for i in v: B = A[:, b] e[i] = 1 if i > 0: e[i-1] = 0 pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1) js = list(k-set(b)) # not efficient, but this is not the time sink... # Due to overhead, it tends to be faster (for problems tested) to # compute the full matrix-vector product rather than individual # vector-vector products (with the chance of terminating as soon # as any are nonzero). For very large matrices, it might be worth # it to compute, say, 100 or 1000 at a time and stop when a nonzero # is found. c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0] if len(c) > 0: # independent j = js[c[0]] # in a previous commit, the previous line was changed to choose # index j corresponding with the maximum dot product. # While this avoided issues with almost # singular matrices, it slowed the routine in most NETLIB tests. # I think this is because these columns were denser than the # first column with nonzero dot product (c[0]). # It would be nice to have a heuristic that balances sparsity with # high dot product, but I don't think it's worth the time to # develop one right now. Bartels-Golub update is a much higher # priority. b[i] = j # replace artificial column else: bibar = pi.T.dot(rhs.reshape(-1, 1)) bnorm = np.linalg.norm(rhs) if abs(bibar)/(1 + bnorm) > tolprimal: status = 2 message = inconsistent return A_orig, rhs, status, message else: # dependent d.append(i) keep = set(range(m)) keep = list(keep - set(d)) return A_orig[keep, :], rhs[keep], status, message def _remove_redundancy(A, b): """ Eliminates redundant equations from system of equations defined by Ax = b and identifies infeasibilities. Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the system 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. References ---------- .. [2] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. """ A, b, status, message = _remove_zero_rows(A, b) if status != 0: return A, b, status, message U, s, Vh = svd(A) eps = np.finfo(float).eps tol = s.max() * max(A.shape) * eps m, n = A.shape s_min = s[-1] if m <= n else 0 # this algorithm is faster than that of [2] when the nullspace is small # but it could probably be improvement by randomized algorithms and with # a sparse implementation. # it relies on repeated singular value decomposition to find linearly # dependent rows (as identified by columns of U that correspond with zero # singular values). Unfortunately, only one row can be removed per # decomposition (I tried otherwise; doing so can cause problems.) # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds # but that function is unreliable at finding singular values near zero. # Finding max eigenvalue L of A A^T, then largest eigenvalue (and # associated eigenvector) of -A A^T + L I (I is identity) via power # iteration would also work in theory, but is only efficient if the # smallest nonzero eigenvalue of A A^T is close to the largest nonzero # eigenvalue. while abs(s_min) < tol: v = U[:, -1] # TODO: return these so user can eliminate from problem? # rows need to be represented in significant amount eligibleRows = np.abs(v) > tol * 10e6 if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol): status = 4 message = ("Due to numerical issues, redundant equality " "constraints could not be removed automatically. " "Try providing your constraint matrices as sparse " "matrices to activate sparse presolve, try turning " "off redundancy removal, or try turning off presolve " "altogether.") break if np.any(np.abs(v.dot(b)) > tol): status = 2 message = ("There is a linear combination of rows of A_eq that " "results in zero, suggesting a redundant constraint. " "However the same linear combination of b_eq is " "nonzero, suggesting that the constraints conflict " "and the problem is infeasible.") break i_remove = _get_densest(A, eligibleRows) A = np.delete(A, i_remove, axis=0) b = np.delete(b, i_remove) U, s, Vh = svd(A) m, n = A.shape s_min = s[-1] if m <= n else 0 return A, b, status, message
16,413
35.314159
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/__init__.py
""" ===================================================== Optimization and root finding (:mod:`scipy.optimize`) ===================================================== .. currentmodule:: scipy.optimize Optimization ============ Local Optimization ------------------ .. autosummary:: :toctree: generated/ minimize - Interface for minimizers of multivariate functions minimize_scalar - Interface for minimizers of univariate functions OptimizeResult - The optimization result returned by some optimizers OptimizeWarning - The optimization encountered problems The `minimize` function supports the following methods: .. toctree:: optimize.minimize-neldermead optimize.minimize-powell optimize.minimize-cg optimize.minimize-bfgs optimize.minimize-newtoncg optimize.minimize-lbfgsb optimize.minimize-tnc optimize.minimize-cobyla optimize.minimize-slsqp optimize.minimize-trustconstr optimize.minimize-dogleg optimize.minimize-trustncg optimize.minimize-trustkrylov optimize.minimize-trustexact Constraints are passed to `minimize` function as a single object or as a list of objects from the following classes: .. autosummary:: :toctree: generated/ NonlinearConstraint - Class defining general nonlinear constraints. LinearConstraint - Class defining general linear constraints. Simple bound constraints are handled separately and there is a special class for them: .. autosummary:: :toctree: generated/ Bounds - Bound constraints. Quasi-Newton strategies implementing `HessianUpdateStrategy` interface can be used to approximate the Hessian in `minimize` function (available only for the 'trust-constr' method). Available quasi-Newton methods implementing this interface are: .. autosummary:: :toctree: generated/ BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. SR1 - Symmetric-rank-1 Hessian update strategy. The `minimize_scalar` function supports the following methods: .. toctree:: optimize.minimize_scalar-brent optimize.minimize_scalar-bounded optimize.minimize_scalar-golden The specific optimization method interfaces below in this subsection are not recommended for use in new scripts; all of these methods are accessible via a newer, more consistent interface provided by the functions above. General-purpose multivariate methods: .. autosummary:: :toctree: generated/ fmin - Nelder-Mead Simplex algorithm fmin_powell - Powell's (modified) level set method fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno) fmin_ncg - Line-search Newton Conjugate Gradient Constrained multivariate methods: .. autosummary:: :toctree: generated/ fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer fmin_tnc - Truncated Newton code fmin_cobyla - Constrained optimization by linear approximation fmin_slsqp - Minimization using sequential least-squares programming differential_evolution - stochastic minimization using differential evolution Univariate (scalar) minimization methods: .. autosummary:: :toctree: generated/ fminbound - Bounded minimization of a scalar function brent - 1-D function minimization using Brent method golden - 1-D function minimization using Golden Section method Equation (Local) Minimizers --------------------------- .. autosummary:: :toctree: generated/ leastsq - Minimize the sum of squares of M equations in N unknowns least_squares - Feature-rich least-squares minimization. nnls - Linear least-squares problem with non-negativity constraint lsq_linear - Linear least-squares problem with bound constraints Global Optimization ------------------- .. autosummary:: :toctree: generated/ basinhopping - Basinhopping stochastic optimizer brute - Brute force searching optimizer differential_evolution - stochastic minimization using differential evolution Rosenbrock function ------------------- .. autosummary:: :toctree: generated/ rosen - The Rosenbrock function. rosen_der - The derivative of the Rosenbrock function. rosen_hess - The Hessian matrix of the Rosenbrock function. rosen_hess_prod - Product of the Rosenbrock Hessian with a vector. Fitting ======= .. autosummary:: :toctree: generated/ curve_fit -- Fit curve to a set of points Root finding ============ Scalar functions ---------------- .. autosummary:: :toctree: generated/ brentq - quadratic interpolation Brent method brenth - Brent method, modified by Harris with hyperbolic extrapolation ridder - Ridder's method bisect - Bisection method newton - Secant method or Newton's method Fixed point finding: .. autosummary:: :toctree: generated/ fixed_point - Single-variable fixed-point solver Multidimensional ---------------- General nonlinear solvers: .. autosummary:: :toctree: generated/ root - Unified interface for nonlinear solvers of multivariate functions fsolve - Non-linear multi-variable equation solver broyden1 - Broyden's first method broyden2 - Broyden's second method The `root` function supports the following methods: .. toctree:: optimize.root-hybr optimize.root-lm optimize.root-broyden1 optimize.root-broyden2 optimize.root-anderson optimize.root-linearmixing optimize.root-diagbroyden optimize.root-excitingmixing optimize.root-krylov optimize.root-dfsane Large-scale nonlinear solvers: .. autosummary:: :toctree: generated/ newton_krylov anderson Simple iterations: .. autosummary:: :toctree: generated/ excitingmixing linearmixing diagbroyden :mod:`Additional information on the nonlinear solvers <scipy.optimize.nonlin>` Linear Programming ================== General linear programming solver: .. autosummary:: :toctree: generated/ linprog -- Unified interface for minimizers of linear programming problems The `linprog` function supports the following methods: .. toctree:: optimize.linprog-simplex optimize.linprog-interior-point The simplex method supports callback functions, such as: .. autosummary:: :toctree: generated/ linprog_verbose_callback -- Sample callback function for linprog (simplex) Assignment problems: .. autosummary:: :toctree: generated/ linear_sum_assignment -- Solves the linear-sum assignment problem Utilities ========= .. autosummary:: :toctree: generated/ approx_fprime - Approximate the gradient of a scalar function bracket - Bracket a minimum, given two starting points check_grad - Check the supplied derivative using finite differences line_search - Return a step that satisfies the strong Wolfe conditions show_options - Show specific options optimization solvers LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian HessianUpdateStrategy - Interface for implementing Hessian update strategies """ from __future__ import division, print_function, absolute_import from .optimize import * from ._minimize import * from ._root import * from .minpack import * from .zeros import * from .lbfgsb import fmin_l_bfgs_b, LbfgsInvHessProduct from .tnc import fmin_tnc from .cobyla import fmin_cobyla from .nonlin import * from .slsqp import fmin_slsqp from .nnls import nnls from ._basinhopping import basinhopping from ._linprog import linprog, linprog_verbose_callback from ._hungarian import linear_sum_assignment from ._differentialevolution import differential_evolution from ._lsq import least_squares, lsq_linear from ._constraints import (NonlinearConstraint, LinearConstraint, Bounds) from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1 __all__ = [s for s in dir() if not s.startswith('_')] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
7,953
25.871622
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/minpack.py
from __future__ import division, print_function, absolute_import import threading import warnings from . import _minpack import numpy as np from numpy import (atleast_1d, dot, take, triu, shape, eye, transpose, zeros, product, greater, array, all, where, isscalar, asarray, inf, abs, finfo, inexact, issubdtype, dtype) from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError from scipy._lib._util import _asarray_validated, _lazywhere from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning from ._lsq import least_squares from ._lsq.common import make_strictly_feasible from ._lsq.least_squares import prepare_bounds _MINPACK_LOCK = threading.RLock() error = _minpack.error __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] def _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape=None): res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) if (output_shape is not None) and (shape(res) != output_shape): if (output_shape[0] != 1): if len(output_shape) > 1: if output_shape[1] == 1: return shape(res) msg = "%s: there is a mismatch between the input and output " \ "shape of the '%s' argument" % (checker, argname) func_name = getattr(thefunc, '__name__', None) if func_name: msg += " '%s'." % func_name else: msg += "." msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res)) raise TypeError(msg) if issubdtype(res.dtype, inexact): dt = res.dtype else: dt = dtype(float) return shape(res), dt def fsolve(func, x0, args=(), fprime=None, full_output=0, col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, epsfcn=None, factor=100, diag=None): """ Find the roots of a function. Return the roots of the (non-linear) equations defined by ``func(x) = 0`` given a starting estimate. Parameters ---------- func : callable ``f(x, *args)`` A function that takes at least one (possibly vector) argument, and returns a value of the same length. x0 : ndarray The starting estimate for the roots of ``func(x) = 0``. args : tuple, optional Any extra arguments to `func`. fprime : callable ``f(x, *args)``, optional A function to compute the Jacobian of `func` with derivatives across the rows. By default, the Jacobian will be estimated. full_output : bool, optional If True, return optional outputs. col_deriv : bool, optional Specify whether the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). xtol : float, optional The calculation will terminate if the relative error between two consecutive iterates is at most `xtol`. maxfev : int, optional The maximum number of calls to the function. If zero, then ``100*(N+1)`` is the maximum where N is the number of elements in `x0`. band : tuple, optional If set to a two-sequence containing the number of sub- and super-diagonals within the band of the Jacobi matrix, the Jacobi matrix is considered banded (only for ``fprime=None``). epsfcn : float, optional A suitable step length for the forward-difference approximation of the Jacobian (for ``fprime=None``). If `epsfcn` is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in the interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). infodict : dict A dictionary of optional outputs with the keys: ``nfev`` number of function calls ``njev`` number of Jacobian calls ``fvec`` function evaluated at the output ``fjac`` the orthogonal matrix, q, produced by the QR factorization of the final approximate Jacobian matrix, stored column wise ``r`` upper triangular matrix produced by QR factorization of the same matrix ``qtf`` the vector ``(transpose(q) * fvec)`` ier : int An integer flag. Set to 1 if a solution was found, otherwise refer to `mesg` for more information. mesg : str If no solution is found, `mesg` details the cause of failure. See Also -------- root : Interface to root finding algorithms for multivariate functions. See the 'hybr' `method` in particular. Notes ----- ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. """ options = {'col_deriv': col_deriv, 'xtol': xtol, 'maxfev': maxfev, 'band': band, 'eps': epsfcn, 'factor': factor, 'diag': diag} res = _root_hybr(func, x0, args, jac=fprime, **options) if full_output: x = res['x'] info = dict((k, res.get(k)) for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res) info['fvec'] = res['fun'] return x, info, res['status'], res['message'] else: status = res['status'] msg = res['message'] if status == 0: raise TypeError(msg) elif status == 1: pass elif status in [2, 3, 4, 5]: warnings.warn(msg, RuntimeWarning) else: raise TypeError(msg) return res['x'] def _root_hybr(func, x0, args=(), jac=None, col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, factor=100, diag=None, **unknown_options): """ Find the roots of a multivariate function using MINPACK's hybrd and hybrj routines (modified Powell method). Options ------- col_deriv : bool Specify whether the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). xtol : float The calculation will terminate if the relative error between two consecutive iterates is at most `xtol`. maxfev : int The maximum number of calls to the function. If zero, then ``100*(N+1)`` is the maximum where N is the number of elements in `x0`. band : tuple If set to a two-sequence containing the number of sub- and super-diagonals within the band of the Jacobi matrix, the Jacobi matrix is considered banded (only for ``fprime=None``). eps : float A suitable step length for the forward-difference approximation of the Jacobian (for ``fprime=None``). If `eps` is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float A parameter determining the initial step bound (``factor * || diag * x||``). Should be in the interval ``(0.1, 100)``. diag : sequence N positive entries that serve as a scale factors for the variables. """ _check_unknown_options(unknown_options) epsfcn = eps x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) if epsfcn is None: epsfcn = finfo(dtype).eps Dfun = jac if Dfun is None: if band is None: ml, mu = -10, -10 else: ml, mu = band[:2] if maxfev == 0: maxfev = 200 * (n + 1) with _MINPACK_LOCK: retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, ml, mu, epsfcn, factor, diag) else: _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) if (maxfev == 0): maxfev = 100 * (n + 1) with _MINPACK_LOCK: retval = _minpack._hybrj(func, Dfun, x0, args, 1, col_deriv, xtol, maxfev, factor, diag) x, status = retval[0], retval[-1] errors = {0: "Improper input parameters were entered.", 1: "The solution converged.", 2: "The number of calls to function has " "reached maxfev = %d." % maxfev, 3: "xtol=%f is too small, no further improvement " "in the approximate\n solution " "is possible." % xtol, 4: "The iteration is not making good progress, as measured " "by the \n improvement from the last five " "Jacobian evaluations.", 5: "The iteration is not making good progress, " "as measured by the \n improvement from the last " "ten iterations.", 'unknown': "An error occurred."} info = retval[1] info['fun'] = info.pop('fvec') sol = OptimizeResult(x=x, success=(status == 1), status=status) sol.update(info) try: sol['message'] = errors[status] except KeyError: sol['message'] = errors['unknown'] return sol def leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): """ Minimize the sum of squares of a set of equations. :: x = arg min(sum(func(y)**2,axis=0)) y Parameters ---------- func : callable should take at least one (possibly length N vector) argument and returns M floating point numbers. It must not return NaNs or fitting might fail. x0 : ndarray The starting estimate for the minimization. args : tuple, optional Any extra arguments to func are placed in this tuple. Dfun : callable, optional A function or method to compute the Jacobian of func with derivatives across the rows. If this is None, the Jacobian will be estimated. full_output : bool, optional non-zero to return all optional outputs. col_deriv : bool, optional non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float, optional Relative error desired in the sum of squares. xtol : float, optional Relative error desired in the approximate solution. gtol : float, optional Orthogonality desired between the function vector and the columns of the Jacobian. maxfev : int, optional The maximum number of calls to the function. If `Dfun` is provided then the default `maxfev` is 100*(N+1) where N is the number of elements in x0, otherwise the default `maxfev` is 200*(N+1). epsfcn : float, optional A variable used in determining a suitable step length for the forward- difference approximation of the Jacobian (for Dfun=None). Normally the actual step length will be sqrt(epsfcn)*x If epsfcn is less than the machine precision, it is assumed that the relative errors are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). cov_x : ndarray Uses the fjac and ipvt optional outputs to construct an estimate of the jacobian around the solution. None if a singular matrix encountered (indicates very flat curvature in some direction). This matrix must be multiplied by the residual variance to get the covariance of the parameter estimates -- see curve_fit. infodict : dict a dictionary of optional outputs with the key s: ``nfev`` The number of function calls ``fvec`` The function evaluated at the output ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. ``qtf`` The vector (transpose(q) * fvec). mesg : str A string message giving information about the cause of failure. ier : int An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable 'mesg' gives more information. Notes ----- "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. cov_x is a Jacobian approximation to the Hessian of the least squares objective function. This approximation assumes that the objective function is based on the difference between some observed target data (ydata) and a (non-linear) function of the parameters `f(xdata, params)` :: func(params) = ydata - f(xdata, params) so that the objective function is :: min sum((ydata - f(xdata, params))**2, axis=0) params The solution, `x`, is always a 1D array, regardless of the shape of `x0`, or whether `x0` is a scalar. """ x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) m = shape[0] if n > m: raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m)) if epsfcn is None: epsfcn = finfo(dtype).eps if Dfun is None: if maxfev == 0: maxfev = 200*(n + 1) with _MINPACK_LOCK: retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol, maxfev, epsfcn, factor, diag) else: if col_deriv: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) else: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) if maxfev == 0: maxfev = 100 * (n + 1) with _MINPACK_LOCK: retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv, ftol, xtol, gtol, maxfev, factor, diag) errors = {0: ["Improper input parameters.", TypeError], 1: ["Both actual and predicted relative reductions " "in the sum of squares\n are at most %f" % ftol, None], 2: ["The relative error between two consecutive " "iterates is at most %f" % xtol, None], 3: ["Both actual and predicted relative reductions in " "the sum of squares\n are at most %f and the " "relative error between two consecutive " "iterates is at \n most %f" % (ftol, xtol), None], 4: ["The cosine of the angle between func(x) and any " "column of the\n Jacobian is at most %f in " "absolute value" % gtol, None], 5: ["Number of calls to function has reached " "maxfev = %d." % maxfev, ValueError], 6: ["ftol=%f is too small, no further reduction " "in the sum of squares\n is possible.""" % ftol, ValueError], 7: ["xtol=%f is too small, no further improvement in " "the approximate\n solution is possible." % xtol, ValueError], 8: ["gtol=%f is too small, func(x) is orthogonal to the " "columns of\n the Jacobian to machine " "precision." % gtol, ValueError], 'unknown': ["Unknown error.", TypeError]} info = retval[-1] # The FORTRAN return value if info not in [1, 2, 3, 4] and not full_output: if info in [5, 6, 7, 8]: warnings.warn(errors[info][0], RuntimeWarning) else: try: raise errors[info][1](errors[info][0]) except KeyError: raise errors['unknown'][1](errors['unknown'][0]) mesg = errors[info][0] if full_output: cov_x = None if info in [1, 2, 3, 4]: from numpy.dual import inv perm = take(eye(n), retval[1]['ipvt'] - 1, 0) r = triu(transpose(retval[1]['fjac'])[:n, :]) R = dot(r, perm) try: cov_x = inv(dot(transpose(R), R)) except (LinAlgError, ValueError): pass return (retval[0], cov_x) + retval[1:-1] + (mesg, info) else: return (retval[0], info) def _wrap_func(func, xdata, ydata, transform): if transform is None: def func_wrapped(params): return func(xdata, *params) - ydata elif transform.ndim == 1: def func_wrapped(params): return transform * (func(xdata, *params) - ydata) else: # Chisq = (y - yd)^T C^{-1} (y-yd) # transform = L such that C = L L^T # C^{-1} = L^{-T} L^{-1} # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd) # Define (y-yd)' = L^{-1} (y-yd) # by solving # L (y-yd)' = (y-yd) # and minimize (y-yd)'^T (y-yd)' def func_wrapped(params): return solve_triangular(transform, func(xdata, *params) - ydata, lower=True) return func_wrapped def _wrap_jac(jac, xdata, transform): if transform is None: def jac_wrapped(params): return jac(xdata, *params) elif transform.ndim == 1: def jac_wrapped(params): return transform[:, np.newaxis] * np.asarray(jac(xdata, *params)) else: def jac_wrapped(params): return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True) return jac_wrapped def _initialize_feasible(lb, ub): p0 = np.ones_like(lb) lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) mask = lb_finite & ub_finite p0[mask] = 0.5 * (lb[mask] + ub[mask]) mask = lb_finite & ~ub_finite p0[mask] = lb[mask] + 1 mask = ~lb_finite & ub_finite p0[mask] = ub[mask] - 1 return p0 def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=True, bounds=(-np.inf, np.inf), method=None, jac=None, **kwargs): """ Use non-linear least squares to fit a function, f, to data. Assumes ``ydata = f(xdata, *params) + eps`` Parameters ---------- f : callable The model function, f(x, ...). It must take the independent variable as the first argument and the parameters to fit as separate remaining arguments. xdata : An M-length sequence or an (k,M)-shaped array for functions with k predictors The independent variable where the data is measured. ydata : M-length sequence The dependent data --- nominally f(xdata, ...) p0 : None, scalar, or N-length sequence, optional Initial guess for the parameters. If None, then the initial values will all be 1 (if the number of parameters for the function can be determined using introspection, otherwise a ValueError is raised). sigma : None or M-length sequence or MxM array, optional Determines the uncertainty in `ydata`. If we define residuals as ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` depends on its number of dimensions: - A 1-d `sigma` should contain values of standard deviations of errors in `ydata`. In this case, the optimized function is ``chisq = sum((r / sigma) ** 2)``. - A 2-d `sigma` should contain the covariance matrix of errors in `ydata`. In this case, the optimized function is ``chisq = r.T @ inv(sigma) @ r``. .. versionadded:: 0.19 None (default) is equivalent of 1-d `sigma` filled with ones. absolute_sigma : bool, optional If True, `sigma` is used in an absolute sense and the estimated parameter covariance `pcov` reflects these absolute values. If False, only the relative magnitudes of the `sigma` values matter. The returned parameter covariance matrix `pcov` is based on scaling `sigma` by a constant factor. This constant is set by demanding that the reduced `chisq` for the optimal parameters `popt` when using the *scaled* `sigma` equals unity. In other words, `sigma` is scaled to match the sample variance of the residuals after the fit. Mathematically, ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` check_finite : bool, optional If True, check that the input arrays do not contain nans of infs, and raise a ValueError if they do. Setting this parameter to False may silently produce nonsensical results if the input arrays do contain nans. Default is True. bounds : 2-tuple of array_like, optional Lower and upper bounds on parameters. Defaults to no bounds. Each element of the tuple must be either an array with the length equal to the number of parameters, or a scalar (in which case the bound is taken to be the same for all parameters.) Use ``np.inf`` with an appropriate sign to disable bounds on all or some parameters. .. versionadded:: 0.17 method : {'lm', 'trf', 'dogbox'}, optional Method to use for optimization. See `least_squares` for more details. Default is 'lm' for unconstrained problems and 'trf' if `bounds` are provided. The method 'lm' won't work when the number of observations is less than the number of variables, use 'trf' or 'dogbox' in this case. .. versionadded:: 0.17 jac : callable, string or None, optional Function with signature ``jac(x, ...)`` which computes the Jacobian matrix of the model function with respect to parameters as a dense array_like structure. It will be scaled according to provided `sigma`. If None (default), the Jacobian will be estimated numerically. String keywords for 'trf' and 'dogbox' methods can be used to select a finite difference scheme, see `least_squares`. .. versionadded:: 0.18 kwargs Keyword arguments passed to `leastsq` for ``method='lm'`` or `least_squares` otherwise. Returns ------- popt : array Optimal values for the parameters so that the sum of the squared residuals of ``f(xdata, *popt) - ydata`` is minimized pcov : 2d array The estimated covariance of popt. The diagonals provide the variance of the parameter estimate. To compute one standard deviation errors on the parameters use ``perr = np.sqrt(np.diag(pcov))``. How the `sigma` parameter affects the estimated covariance depends on `absolute_sigma` argument, as described above. If the Jacobian matrix at the solution doesn't have a full rank, then 'lm' method returns a matrix filled with ``np.inf``, on the other hand 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute the covariance matrix. Raises ------ ValueError if either `ydata` or `xdata` contain NaNs, or if incompatible options are used. RuntimeError if the least-squares minimization fails. OptimizeWarning if covariance of the parameters can not be estimated. See Also -------- least_squares : Minimize the sum of squares of nonlinear functions. scipy.stats.linregress : Calculate a linear least squares regression for two sets of measurements. Notes ----- With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm through `leastsq`. Note that this algorithm can only deal with unconstrained problems. Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to the docstring of `least_squares` for more information. Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.optimize import curve_fit >>> def func(x, a, b, c): ... return a * np.exp(-b * x) + c Define the data to be fit with some noise: >>> xdata = np.linspace(0, 4, 50) >>> y = func(xdata, 2.5, 1.3, 0.5) >>> np.random.seed(1729) >>> y_noise = 0.2 * np.random.normal(size=xdata.size) >>> ydata = y + y_noise >>> plt.plot(xdata, ydata, 'b-', label='data') Fit for the parameters a, b, c of the function `func`: >>> popt, pcov = curve_fit(func, xdata, ydata) >>> popt array([ 2.55423706, 1.35190947, 0.47450618]) >>> plt.plot(xdata, func(xdata, *popt), 'r-', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) Constrain the optimization to the region of ``0 <= a <= 3``, ``0 <= b <= 1`` and ``0 <= c <= 0.5``: >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) >>> popt array([ 2.43708906, 1. , 0.35015434]) >>> plt.plot(xdata, func(xdata, *popt), 'g--', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) >>> plt.xlabel('x') >>> plt.ylabel('y') >>> plt.legend() >>> plt.show() """ if p0 is None: # determine number of parameters by inspecting the function from scipy._lib._util import getargspec_no_self as _getargspec args, varargs, varkw, defaults = _getargspec(f) if len(args) < 2: raise ValueError("Unable to determine number of fit parameters.") n = len(args) - 1 else: p0 = np.atleast_1d(p0) n = p0.size lb, ub = prepare_bounds(bounds, n) if p0 is None: p0 = _initialize_feasible(lb, ub) bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) if method is None: if bounded_problem: method = 'trf' else: method = 'lm' if method == 'lm' and bounded_problem: raise ValueError("Method 'lm' only works for unconstrained problems. " "Use 'trf' or 'dogbox' instead.") # NaNs can not be handled if check_finite: ydata = np.asarray_chkfinite(ydata) else: ydata = np.asarray(ydata) if isinstance(xdata, (list, tuple, np.ndarray)): # `xdata` is passed straight to the user-defined `f`, so allow # non-array_like `xdata`. if check_finite: xdata = np.asarray_chkfinite(xdata) else: xdata = np.asarray(xdata) # Determine type of sigma if sigma is not None: sigma = np.asarray(sigma) # if 1-d, sigma are errors, define transform = 1/sigma if sigma.shape == (ydata.size, ): transform = 1.0 / sigma # if 2-d, sigma is the covariance matrix, # define transform = L such that L L^T = C elif sigma.shape == (ydata.size, ydata.size): try: # scipy.linalg.cholesky requires lower=True to return L L^T = A transform = cholesky(sigma, lower=True) except LinAlgError: raise ValueError("`sigma` must be positive definite.") else: raise ValueError("`sigma` has incorrect shape.") else: transform = None func = _wrap_func(f, xdata, ydata, transform) if callable(jac): jac = _wrap_jac(jac, xdata, transform) elif jac is None and method != 'lm': jac = '2-point' if method == 'lm': # Remove full_output from kwargs, otherwise we're passing it in twice. return_full = kwargs.pop('full_output', False) res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) popt, pcov, infodict, errmsg, ier = res cost = np.sum(infodict['fvec'] ** 2) if ier not in [1, 2, 3, 4]: raise RuntimeError("Optimal parameters not found: " + errmsg) else: # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. if 'max_nfev' not in kwargs: kwargs['max_nfev'] = kwargs.pop('maxfev', None) res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, **kwargs) if not res.success: raise RuntimeError("Optimal parameters not found: " + res.message) cost = 2 * res.cost # res.cost is half sum of squares! popt = res.x # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(res.jac, full_matrices=False) threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s**2, VT) return_full = False warn_cov = False if pcov is None: # indeterminate covariance pcov = zeros((len(popt), len(popt)), dtype=float) pcov.fill(inf) warn_cov = True elif not absolute_sigma: if ydata.size > p0.size: s_sq = cost / (ydata.size - p0.size) pcov = pcov * s_sq else: pcov.fill(inf) warn_cov = True if warn_cov: warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning) if return_full: return popt, pcov, infodict, errmsg, ier else: return popt, pcov def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x = x.reshape((n,)) fvec = atleast_1d(fcn(x, *args)) m = len(fvec) fvec = fvec.reshape((m,)) ldfjac = m fjac = atleast_1d(Dfcn(x, *args)) fjac = fjac.reshape((m, n)) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,), float) err = zeros((m,), float) fvecp = None with _MINPACK_LOCK: _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) fvecp = atleast_1d(fcn(xp, *args)) fvecp = fvecp.reshape((m,)) with _MINPACK_LOCK: _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) good = (product(greater(err, 0.5), axis=0)) return (good, err) def _del2(p0, p1, d): return p0 - np.square(p1 - p0) / d def _relerr(actual, desired): return (actual - desired) / desired def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): p0 = x0 for i in range(maxiter): p1 = func(p0, *args) if use_accel: p2 = func(p1, *args) d = p2 - 2.0 * p1 + p0 p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) else: p = p1 relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) if np.all(np.abs(relerr) < xtol): return p p0 = p msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) raise RuntimeError(msg) def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): """ Find a fixed point of the function. Given a function of one or more variables and a starting point, find a fixed-point of the function: i.e. where ``func(x0) == x0``. Parameters ---------- func : function Function to evaluate. x0 : array_like Fixed point of function. args : tuple, optional Extra arguments to `func`. xtol : float, optional Convergence tolerance, defaults to 1e-08. maxiter : int, optional Maximum number of iterations, defaults to 500. method : {"del2", "iteration"}, optional Method of finding the fixed-point, defaults to "del2" which uses Steffensen's Method with Aitken's ``Del^2`` convergence acceleration [1]_. The "iteration" method simply iterates the function until convergence is detected, without attempting to accelerate the convergence. References ---------- .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 Examples -------- >>> from scipy import optimize >>> def func(x, c1, c2): ... return np.sqrt(c1/(x+c2)) >>> c1 = np.array([10,12.]) >>> c2 = np.array([3, 5.]) >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) array([ 1.4920333 , 1.37228132]) """ use_accel = {'del2': True, 'iteration': False}[method] x0 = _asarray_validated(x0, as_inexact=True) return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
33,844
36.39779
91
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_constraints.py
"""Constraints definition for minimize.""" from __future__ import division, print_function, absolute_import import numpy as np from ._hessian_update_strategy import BFGS from ._differentiable_functions import ( VectorFunction, LinearVectorFunction, IdentityVectorFunction) class NonlinearConstraint(object): """Nonlinear constraint on the variables. The constraint has the general inequality form:: lb <= fun(x) <= ub Here the vector of independent variables x is passed as ndarray of shape (n,) and ``fun`` returns a vector with m components. It is possible to use equal bounds to represent an equality constraint or infinite bounds to represent a one-sided constraint. Parameters ---------- fun : callable The function defining the constraint. The signature is ``fun(x) -> array_like, shape (m,)``. lb, ub : array_like Lower and upper bounds on the constraint. Each array must have the shape (m,) or be a scalar, in the latter case a bound will be the same for all components of the constraint. Use ``np.inf`` with an appropriate sign to specify a one-sided constraint. Set components of `lb` and `ub` equal to represent an equality constraint. Note that you can mix constraints of different types: interval, one-sided or equality, by setting different components of `lb` and `ub` as necessary. jac : {callable, '2-point', '3-point', 'cs'}, optional Method of computing the Jacobian matrix (an m-by-n matrix, where element (i, j) is the partial derivative of f[i] with respect to x[j]). The keywords {'2-point', '3-point', 'cs'} select a finite difference scheme for the numerical estimation. A callable must have the following signature: ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``. Default is '2-point'. hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional Method for computing the Hessian matrix. The keywords {'2-point', '3-point', 'cs'} select a finite difference scheme for numerical estimation. Alternatively, objects implementing `HessianUpdateStrategy` interface can be used to approximate the Hessian. Currently available implementations are: - `BFGS` (default option) - `SR1` A callable must return the Hessian matrix of ``dot(fun, v)`` and must have the following signature: ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``. Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers. keep_feasible : array_like of bool, optional Whether to keep the constraint components feasible throughout iterations. A single value set this property for all components. Default is False. Has no effect for equality constraints. finite_diff_rel_step: None or array_like, optional Relative step size for the finite difference approximation. Default is None, which will select a reasonable value automatically depending on a finite difference scheme. finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional Defines the sparsity structure of the Jacobian matrix for finite difference estimation, its shape must be (m, n). If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations. A zero entry means that a corresponding element in the Jacobian is identically zero. If provided, forces the use of 'lsmr' trust-region solver. If None (default) then dense differencing will be used. Notes ----- Finite difference schemes {'2-point', '3-point', 'cs'} may be used for approximating either the Jacobian or the Hessian. We, however, do not allow its use for approximating both simultaneously. Hence whenever the Jacobian is estimated via finite-differences, we require the Hessian to be estimated using one of the quasi-Newton strategies. The scheme 'cs' is potentially the most accurate, but requires the function to correctly handles complex inputs and be analytically continuable to the complex plane. The scheme '3-point' is more accurate than '2-point' but requires twice as many operations. """ def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(), keep_feasible=False, finite_diff_rel_step=None, finite_diff_jac_sparsity=None): self.fun = fun self.lb = lb self.ub = ub self.finite_diff_rel_step = finite_diff_rel_step self.finite_diff_jac_sparsity = finite_diff_jac_sparsity self.jac = jac self.hess = hess self.keep_feasible = keep_feasible class LinearConstraint(object): """Linear constraint on the variables. The constraint has the general inequality form:: lb <= A.dot(x) <= ub Here the vector of independent variables x is passed as ndarray of shape (n,) and the matrix A has shape (m, n). It is possible to use equal bounds to represent an equality constraint or infinite bounds to represent a one-sided constraint. Parameters ---------- A : {array_like, sparse matrix}, shape (m, n) Matrix defining the constraint. lb, ub : array_like Lower and upper bounds on the constraint. Each array must have the shape (m,) or be a scalar, in the latter case a bound will be the same for all components of the constraint. Use ``np.inf`` with an appropriate sign to specify a one-sided constraint. Set components of `lb` and `ub` equal to represent an equality constraint. Note that you can mix constraints of different types: interval, one-sided or equality, by setting different components of `lb` and `ub` as necessary. keep_feasible : array_like of bool, optional Whether to keep the constraint components feasible throughout iterations. A single value set this property for all components. Default is False. Has no effect for equality constraints. """ def __init__(self, A, lb, ub, keep_feasible=False): self.A = A self.lb = lb self.ub = ub self.keep_feasible = keep_feasible class Bounds(object): """Bounds constraint on the variables. The constraint has the general inequality form:: lb <= x <= ub It is possible to use equal bounds to represent an equality constraint or infinite bounds to represent a one-sided constraint. Parameters ---------- lb, ub : array_like, optional Lower and upper bounds on independent variables. Each array must have the same size as x or be a scalar, in which case a bound will be the same for all the variables. Set components of `lb` and `ub` equal to fix a variable. Use ``np.inf`` with an appropriate sign to disable bounds on all or some variables. Note that you can mix constraints of different types: interval, one-sided or equality, by setting different components of `lb` and `ub` as necessary. keep_feasible : array_like of bool, optional Whether to keep the constraint components feasible throughout iterations. A single value set this property for all components. Default is False. Has no effect for equality constraints. """ def __init__(self, lb, ub, keep_feasible=False): self.lb = lb self.ub = ub self.keep_feasible = keep_feasible class PreparedConstraint(object): """Constraint prepared from a user defined constraint. On creation it will check whether a constraint definition is valid and the initial point is feasible. If created successfully, it will contain the attributes listed below. Parameters ---------- constraint : {NonlinearConstraint, LinearConstraint`, Bounds} Constraint to check and prepare. x0 : array_like Initial vector of independent variables. sparse_jacobian : bool or None, optional If bool, then the Jacobian of the constraint will be converted to the corresponded format if necessary. If None (default), such conversion is not made. finite_diff_bounds : 2-tuple, optional Lower and upper bounds on the independent variables for the finite difference approximation, if applicable. Defaults to no bounds. Attributes ---------- fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction} Function defining the constraint wrapped by one of the convenience classes. bounds : 2-tuple Contains lower and upper bounds for the constraints --- lb and ub. These are converted to ndarray and have a size equal to the number of the constraints. keep_feasible : ndarray Array indicating which components must be kept feasible with a size equal to the number of the constraints. """ def __init__(self, constraint, x0, sparse_jacobian=None, finite_diff_bounds=(-np.inf, np.inf)): if isinstance(constraint, NonlinearConstraint): fun = VectorFunction(constraint.fun, x0, constraint.jac, constraint.hess, constraint.finite_diff_rel_step, constraint.finite_diff_jac_sparsity, finite_diff_bounds, sparse_jacobian) elif isinstance(constraint, LinearConstraint): fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian) elif isinstance(constraint, Bounds): fun = IdentityVectorFunction(x0, sparse_jacobian) else: raise ValueError("`constraint` of an unknown type is passed.") m = fun.m lb = np.asarray(constraint.lb, dtype=float) ub = np.asarray(constraint.ub, dtype=float) if lb.ndim == 0: lb = np.resize(lb, m) if ub.ndim == 0: ub = np.resize(ub, m) keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool) if keep_feasible.ndim == 0: keep_feasible = np.resize(keep_feasible, m) if keep_feasible.shape != (m,): raise ValueError("`keep_feasible` has a wrong shape.") mask = keep_feasible & (lb != ub) f0 = fun.f if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]): raise ValueError("`x0` is infeasible with respect to some " "inequality constraint with `keep_feasible` " "set to True.") self.fun = fun self.bounds = (lb, ub) self.keep_feasible = keep_feasible def new_bounds_to_old(lb, ub, n): """Convert the new bounds representation to the old one. The new representation is a tuple (lb, ub) and the old one is a list containing n tuples, i-th containing lower and upper bound on a i-th variable. """ lb = np.asarray(lb) ub = np.asarray(ub) if lb.ndim == 0: lb = np.resize(lb, n) if ub.ndim == 0: ub = np.resize(ub, n) lb = [x if x > -np.inf else None for x in lb] ub = [x if x < np.inf else None for x in ub] return list(zip(lb, ub)) def old_bound_to_new(bounds): """Convert the old bounds representation to the new one. The new representation is a tuple (lb, ub) and the old one is a list containing n tuples, i-th containing lower and upper bound on a i-th variable. """ lb, ub = zip(*bounds) lb = np.array([x if x is not None else -np.inf for x in lb]) ub = np.array([x if x is not None else np.inf for x in ub]) return lb, ub def strict_bounds(lb, ub, keep_feasible, n_vars): """Remove bounds which are not asked to be kept feasible.""" strict_lb = np.resize(lb, n_vars).astype(float) strict_ub = np.resize(ub, n_vars).astype(float) keep_feasible = np.resize(keep_feasible, n_vars) strict_lb[~keep_feasible] = -np.inf strict_ub[~keep_feasible] = np.inf return strict_lb, strict_ub
12,264
41.884615
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/nnls.py
from __future__ import division, print_function, absolute_import from . import _nnls from numpy import asarray_chkfinite, zeros, double __all__ = ['nnls'] def nnls(A, b, maxiter=None): """ Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper for a FORTRAN non-negative least squares solver. Parameters ---------- A : ndarray Matrix ``A`` as shown above. b : ndarray Right-hand side vector. maxiter: int, optional Maximum number of iterations, optional. Default is ``3 * A.shape[1]``. Returns ------- x : ndarray Solution vector. rnorm : float The residual, ``|| Ax-b ||_2``. Notes ----- The FORTRAN code was published in the book below. The algorithm is an active set method. It solves the KKT (Karush-Kuhn-Tucker) conditions for the non-negative least squares problem. References ---------- Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM """ A, b = map(asarray_chkfinite, (A, b)) if len(A.shape) != 2: raise ValueError("expected matrix") if len(b.shape) != 1: raise ValueError("expected vector") m, n = A.shape if m != b.shape[0]: raise ValueError("incompatible dimensions") maxiter = -1 if maxiter is None else int(maxiter) w = zeros((n,), dtype=double) zz = zeros((m,), dtype=double) index = zeros((n,), dtype=int) x, rnorm, mode = _nnls.nnls(A, m, n, b, w, zz, index, maxiter) if mode != 1: raise RuntimeError("too many iterations") return x, rnorm
1,616
23.5
71
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/cobyla.py
""" Interface to Constrained Optimization By Linear Approximation Functions --------- .. autosummary:: :toctree: generated/ fmin_cobyla """ from __future__ import division, print_function, absolute_import import numpy as np from scipy._lib.six import callable from scipy.optimize import _cobyla from .optimize import OptimizeResult, _check_unknown_options try: from itertools import izip except ImportError: izip = zip __all__ = ['fmin_cobyla'] def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4): """ Minimize a function using the Constrained Optimization BY Linear Approximation (COBYLA) method. This method wraps a FORTRAN implementation of the algorithm. Parameters ---------- func : callable Function to minimize. In the form func(x, \\*args). x0 : ndarray Initial guess. cons : sequence Constraint functions; must all be ``>=0`` (a single function if only 1 constraint). Each function takes the parameters `x` as its first argument, and it can return either a single number or an array or list of numbers. args : tuple, optional Extra arguments to pass to function. consargs : tuple, optional Extra arguments to pass to constraint functions (default of None means use same extra arguments as those passed to func). Use ``()`` for no extra arguments. rhobeg : float, optional Reasonable initial changes to the variables. rhoend : float, optional Final accuracy in the optimization (not precisely guaranteed). This is a lower bound on the size of the trust region. disp : {0, 1, 2, 3}, optional Controls the frequency of output; 0 implies no output. maxfun : int, optional Maximum number of function evaluations. catol : float, optional Absolute tolerance for constraint violations. Returns ------- x : ndarray The argument that minimises `f`. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'COBYLA' `method` in particular. Notes ----- This algorithm is based on linear approximations to the objective function and each constraint. We briefly describe the algorithm. Suppose the function is being minimized over k variables. At the jth iteration the algorithm has k+1 points v_1, ..., v_(k+1), an approximate solution x_j, and a radius RHO_j. (i.e. linear plus a constant) approximations to the objective function and constraint functions such that their function values agree with the linear approximation on the k+1 points v_1,.., v_(k+1). This gives a linear program to solve (where the linear approximations of the constraint functions are constrained to be non-negative). However the linear approximations are likely only good approximations near the current simplex, so the linear program is given the further requirement that the solution, which will become x_(j+1), must be within RHO_j from x_j. RHO_j only decreases, never increases. The initial RHO_j is rhobeg and the final RHO_j is rhoend. In this way COBYLA's iterations behave like a trust region algorithm. Additionally, the linear program may be inconsistent, or the approximation may give poor improvement. For details about how these issues are resolved, as well as how the points v_i are updated, refer to the source code or the references below. References ---------- Powell M.J.D. (1994), "A direct search optimization method that models the objective and constraint functions by linear interpolation.", in Advances in Optimization and Numerical Analysis, eds. S. Gomez and J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67 Powell M.J.D. (1998), "Direct search algorithms for optimization calculations", Acta Numerica 7, 287-336 Powell M.J.D. (2007), "A view of algorithms for optimization without derivatives", Cambridge University Technical Report DAMTP 2007/NA03 Examples -------- Minimize the objective function f(x,y) = x*y subject to the constraints x**2 + y**2 < 1 and y > 0:: >>> def objective(x): ... return x[0]*x[1] ... >>> def constr1(x): ... return 1 - (x[0]**2 + x[1]**2) ... >>> def constr2(x): ... return x[1] ... >>> from scipy.optimize import fmin_cobyla >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7) array([-0.70710685, 0.70710671]) The exact solution is (-sqrt(2)/2, sqrt(2)/2). """ err = "cons must be a sequence of callable functions or a single"\ " callable function." try: len(cons) except TypeError: if callable(cons): cons = [cons] else: raise TypeError(err) else: for thisfunc in cons: if not callable(thisfunc): raise TypeError(err) if consargs is None: consargs = args # build constraints con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons) # options opts = {'rhobeg': rhobeg, 'tol': rhoend, 'disp': disp, 'maxiter': maxfun, 'catol': catol} sol = _minimize_cobyla(func, x0, args, constraints=con, **opts) if disp and not sol['success']: print("COBYLA failed to find a solution: %s" % (sol.message,)) return sol['x'] def _minimize_cobyla(fun, x0, args=(), constraints=(), rhobeg=1.0, tol=1e-4, maxiter=1000, disp=False, catol=2e-4, **unknown_options): """ Minimize a scalar function of one or more variables using the Constrained Optimization BY Linear Approximation (COBYLA) algorithm. Options ------- rhobeg : float Reasonable initial changes to the variables. tol : float Final accuracy in the optimization (not precisely guaranteed). This is a lower bound on the size of the trust region. disp : bool Set to True to print convergence messages. If False, `verbosity` is ignored as set to 0. maxiter : int Maximum number of function evaluations. catol : float Tolerance (absolute) for constraint violations """ _check_unknown_options(unknown_options) maxfun = maxiter rhoend = tol iprint = int(bool(disp)) # check constraints if isinstance(constraints, dict): constraints = (constraints, ) for ic, con in enumerate(constraints): # check type try: ctype = con['type'].lower() except KeyError: raise KeyError('Constraint %d has no type defined.' % ic) except TypeError: raise TypeError('Constraints must be defined using a ' 'dictionary.') except AttributeError: raise TypeError("Constraint's type must be a string.") else: if ctype != 'ineq': raise ValueError("Constraints of type '%s' not handled by " "COBYLA." % con['type']) # check function if 'fun' not in con: raise KeyError('Constraint %d has no function defined.' % ic) # check extra arguments if 'args' not in con: con['args'] = () # m is the total number of constraint values # it takes into account that some constraints may be vector-valued cons_lengths = [] for c in constraints: f = c['fun'](x0, *c['args']) try: cons_length = len(f) except TypeError: cons_length = 1 cons_lengths.append(cons_length) m = sum(cons_lengths) def calcfc(x, con): f = fun(x, *args) i = 0 for size, c in izip(cons_lengths, constraints): con[i: i + size] = c['fun'](x, *c['args']) i += size return f info = np.zeros(4, np.float64) xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg, rhoend=rhoend, iprint=iprint, maxfun=maxfun, dinfo=info) if info[3] > catol: # Check constraint violation info[0] = 4 return OptimizeResult(x=xopt, status=int(info[0]), success=info[0] == 1, message={1: 'Optimization terminated successfully.', 2: 'Maximum number of function evaluations ' 'has been exceeded.', 3: 'Rounding errors are becoming damaging ' 'in COBYLA subroutine.', 4: 'Did not converge to a solution ' 'satisfying the constraints. See ' '`maxcv` for magnitude of violation.' }.get(info[0], 'Unknown exit status.'), nfev=int(info[1]), fun=info[2], maxcv=info[3])
9,402
33.443223
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/nonlin.py
r""" Nonlinear solvers ----------------- .. currentmodule:: scipy.optimize This is a collection of general-purpose nonlinear multidimensional solvers. These solvers find *x* for which *F(x) = 0*. Both *x* and *F* can be multidimensional. Routines ~~~~~~~~ Large-scale nonlinear solvers: .. autosummary:: newton_krylov anderson General nonlinear solvers: .. autosummary:: broyden1 broyden2 Simple iterations: .. autosummary:: excitingmixing linearmixing diagbroyden Examples ~~~~~~~~ **Small problem** >>> def F(x): ... return np.cos(x) + x[::-1] - [1, 2, 3, 4] >>> import scipy.optimize >>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14) >>> x array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251]) >>> np.cos(x) + x[::-1] array([ 1., 2., 3., 4.]) **Large problem** Suppose that we needed to solve the following integrodifferential equation on the square :math:`[0,1]\times[0,1]`: .. math:: \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of the square. The solution can be found using the `newton_krylov` solver: .. plot:: import numpy as np from scipy.optimize import newton_krylov from numpy import cosh, zeros_like, mgrid, zeros # parameters nx, ny = 75, 75 hx, hy = 1./(nx-1), 1./(ny-1) P_left, P_right = 0, 0 P_top, P_bottom = 1, 0 def residual(P): d2x = zeros_like(P) d2y = zeros_like(P) d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy return d2x + d2y - 10*cosh(P).mean()**2 # solve guess = zeros((nx, ny), float) sol = newton_krylov(residual, guess, method='lgmres', verbose=1) print('Residual: %g' % abs(residual(sol)).max()) # visualize import matplotlib.pyplot as plt x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)] plt.pcolor(x, y, sol) plt.colorbar() plt.show() """ # Copyright (C) 2009, Pauli Virtanen <pav@iki.fi> # Distributed under the same license as Scipy. from __future__ import division, print_function, absolute_import import sys import numpy as np from scipy._lib.six import callable, exec_, xrange from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError from numpy import asarray, dot, vdot import scipy.sparse.linalg import scipy.sparse from scipy.linalg import get_blas_funcs import inspect from scipy._lib._util import getargspec_no_self as _getargspec from .linesearch import scalar_search_wolfe1, scalar_search_armijo __all__ = [ 'broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'newton_krylov'] #------------------------------------------------------------------------------ # Utility functions #------------------------------------------------------------------------------ class NoConvergence(Exception): pass def maxnorm(x): return np.absolute(x).max() def _as_inexact(x): """Return `x` as an array, of either floats or complex floats""" x = asarray(x) if not np.issubdtype(x.dtype, np.inexact): return asarray(x, dtype=np.float_) return x def _array_like(x, x0): """Return ndarray `x` as same array subclass and shape as `x0`""" x = np.reshape(x, np.shape(x0)) wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) return wrap(x) def _safe_norm(v): if not np.isfinite(v).all(): return np.array(np.inf) return norm(v) #------------------------------------------------------------------------------ # Generic nonlinear solver machinery #------------------------------------------------------------------------------ _doc_parts = dict( params_basic=""" F : function(x) -> f Function whose root to find; should take and return an array-like object. xin : array_like Initial guess for the solution """.strip(), params_extra=""" iter : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. verbose : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. f_tol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. f_rtol : float, optional Relative tolerance for the residual. If omitted, not used. x_tol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. x_rtol : float, optional Relative minimum step size. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. callback : function, optional Optional callback function. It is called on every iteration as ``callback(x, f)`` where `x` is the current solution and `f` the corresponding residual. Returns ------- sol : ndarray An array (of similar array type as `x0`) containing the final solution. Raises ------ NoConvergence When a solution was not found. """.strip() ) def _set_doc(obj): if obj.__doc__: obj.__doc__ = obj.__doc__ % _doc_parts def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, full_output=False, raise_exception=True): """ Find a root of a function, in a way suitable for large-scale problems. Parameters ---------- %(params_basic)s jacobian : Jacobian A Jacobian approximation: `Jacobian` object or something that `asjacobian` can transform to one. Alternatively, a string specifying which of the builtin Jacobian approximations to use: krylov, broyden1, broyden2, anderson diagbroyden, linearmixing, excitingmixing %(params_extra)s full_output : bool If true, returns a dictionary `info` containing convergence information. raise_exception : bool If True, a `NoConvergence` exception is raise if no solution is found. See Also -------- asjacobian, Jacobian Notes ----- This algorithm implements the inexact Newton method, with backtracking or full line searches. Several Jacobian approximations are available, including Krylov and Quasi-Newton methods. References ---------- .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear Equations\". Society for Industrial and Applied Mathematics. (1995) http://www.siam.org/books/kelley/fr16/index.php """ condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, x_tol=x_tol, x_rtol=x_rtol, iter=iter, norm=tol_norm) x0 = _as_inexact(x0) func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten() x = x0.flatten() dx = np.inf Fx = func(x) Fx_norm = norm(Fx) jacobian = asjacobian(jacobian) jacobian.setup(x.copy(), Fx, func) if maxiter is None: if iter is not None: maxiter = iter + 1 else: maxiter = 100*(x.size+1) if line_search is True: line_search = 'armijo' elif line_search is False: line_search = None if line_search not in (None, 'armijo', 'wolfe'): raise ValueError("Invalid line search") # Solver tolerance selection gamma = 0.9 eta_max = 0.9999 eta_treshold = 0.1 eta = 1e-3 for n in xrange(maxiter): status = condition.check(Fx, x, dx) if status: break # The tolerance, as computed for scipy.sparse.linalg.* routines tol = min(eta, eta*Fx_norm) dx = -jacobian.solve(Fx, tol=tol) if norm(dx) == 0: raise ValueError("Jacobian inversion yielded zero vector. " "This indicates a bug in the Jacobian " "approximation.") # Line search, or Newton step if line_search: s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, line_search) else: s = 1.0 x = x + dx Fx = func(x) Fx_norm_new = norm(Fx) jacobian.update(x.copy(), Fx) if callback: callback(x, Fx) # Adjust forcing parameters for inexact methods eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 if gamma * eta**2 < eta_treshold: eta = min(eta_max, eta_A) else: eta = min(eta_max, max(eta_A, gamma*eta**2)) Fx_norm = Fx_norm_new # Print status if verbose: sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % ( n, norm(Fx), s, eta)) sys.stdout.flush() else: if raise_exception: raise NoConvergence(_array_like(x, x0)) else: status = 2 if full_output: info = {'nit': condition.iteration, 'fun': Fx, 'status': status, 'success': status == 1, 'message': {1: 'A solution was found at the specified ' 'tolerance.', 2: 'The maximum number of iterations allowed ' 'has been reached.' }[status] } return _array_like(x, x0), info else: return _array_like(x, x0) _set_doc(nonlin_solve) def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, smin=1e-2): tmp_s = [0] tmp_Fx = [Fx] tmp_phi = [norm(Fx)**2] s_norm = norm(x) / norm(dx) def phi(s, store=True): if s == tmp_s[0]: return tmp_phi[0] xt = x + s*dx v = func(xt) p = _safe_norm(v)**2 if store: tmp_s[0] = s tmp_phi[0] = p tmp_Fx[0] = v return p def derphi(s): ds = (abs(s) + s_norm + 1) * rdiff return (phi(s+ds, store=False) - phi(s)) / ds if search_type == 'wolfe': s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], xtol=1e-2, amin=smin) elif search_type == 'armijo': s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], amin=smin) if s is None: # XXX: No suitable step length found. Take the full Newton step, # and hope for the best. s = 1.0 x = x + s*dx if s == tmp_s[0]: Fx = tmp_Fx[0] else: Fx = func(x) Fx_norm = norm(Fx) return s, x, Fx, Fx_norm class TerminationCondition(object): """ Termination condition for an iteration. It is terminated if - |F| < f_rtol*|F_0|, AND - |F| < f_tol AND - |dx| < x_rtol*|x|, AND - |dx| < x_tol """ def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, iter=None, norm=maxnorm): if f_tol is None: f_tol = np.finfo(np.float_).eps ** (1./3) if f_rtol is None: f_rtol = np.inf if x_tol is None: x_tol = np.inf if x_rtol is None: x_rtol = np.inf self.x_tol = x_tol self.x_rtol = x_rtol self.f_tol = f_tol self.f_rtol = f_rtol if norm is None: self.norm = maxnorm else: self.norm = norm self.iter = iter self.f0_norm = None self.iteration = 0 def check(self, f, x, dx): self.iteration += 1 f_norm = self.norm(f) x_norm = self.norm(x) dx_norm = self.norm(dx) if self.f0_norm is None: self.f0_norm = f_norm if f_norm == 0: return 1 if self.iter is not None: # backwards compatibility with Scipy 0.6.0 return 2 * (self.iteration > self.iter) # NB: condition must succeed for rtol=inf even if norm == 0 return int((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm) and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm)) #------------------------------------------------------------------------------ # Generic Jacobian approximation #------------------------------------------------------------------------------ class Jacobian(object): """ Common interface for Jacobians or Jacobian approximations. The optional methods come useful when implementing trust region etc. algorithms that often require evaluating transposes of the Jacobian. Methods ------- solve Returns J^-1 * v update Updates Jacobian to point `x` (where the function has residual `Fx`) matvec : optional Returns J * v rmatvec : optional Returns A^H * v rsolve : optional Returns A^-H * v matmat : optional Returns A * V, where V is a dense matrix with dimensions (N,K). todense : optional Form the dense Jacobian matrix. Necessary for dense trust region algorithms, and useful for testing. Attributes ---------- shape Matrix dimensions (M, N) dtype Data type of the matrix. func : callable, optional Function the Jacobian corresponds to """ def __init__(self, **kw): names = ["solve", "update", "matvec", "rmatvec", "rsolve", "matmat", "todense", "shape", "dtype"] for name, value in kw.items(): if name not in names: raise ValueError("Unknown keyword argument %s" % name) if value is not None: setattr(self, name, kw[name]) if hasattr(self, 'todense'): self.__array__ = lambda: self.todense() def aspreconditioner(self): return InverseJacobian(self) def solve(self, v, tol=0): raise NotImplementedError def update(self, x, F): pass def setup(self, x, F, func): self.func = func self.shape = (F.size, x.size) self.dtype = F.dtype if self.__class__.setup is Jacobian.setup: # Call on the first point unless overridden self.update(x, F) class InverseJacobian(object): def __init__(self, jacobian): self.jacobian = jacobian self.matvec = jacobian.solve self.update = jacobian.update if hasattr(jacobian, 'setup'): self.setup = jacobian.setup if hasattr(jacobian, 'rsolve'): self.rmatvec = jacobian.rsolve @property def shape(self): return self.jacobian.shape @property def dtype(self): return self.jacobian.dtype def asjacobian(J): """ Convert given object to one suitable for use as a Jacobian. """ spsolve = scipy.sparse.linalg.spsolve if isinstance(J, Jacobian): return J elif inspect.isclass(J) and issubclass(J, Jacobian): return J() elif isinstance(J, np.ndarray): if J.ndim > 2: raise ValueError('array must have rank <= 2') J = np.atleast_2d(np.asarray(J)) if J.shape[0] != J.shape[1]: raise ValueError('array must be square') return Jacobian(matvec=lambda v: dot(J, v), rmatvec=lambda v: dot(J.conj().T, v), solve=lambda v: solve(J, v), rsolve=lambda v: solve(J.conj().T, v), dtype=J.dtype, shape=J.shape) elif scipy.sparse.isspmatrix(J): if J.shape[0] != J.shape[1]: raise ValueError('matrix must be square') return Jacobian(matvec=lambda v: J*v, rmatvec=lambda v: J.conj().T * v, solve=lambda v: spsolve(J, v), rsolve=lambda v: spsolve(J.conj().T, v), dtype=J.dtype, shape=J.shape) elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): return Jacobian(matvec=getattr(J, 'matvec'), rmatvec=getattr(J, 'rmatvec'), solve=J.solve, rsolve=getattr(J, 'rsolve'), update=getattr(J, 'update'), setup=getattr(J, 'setup'), dtype=J.dtype, shape=J.shape) elif callable(J): # Assume it's a function J(x) that returns the Jacobian class Jac(Jacobian): def update(self, x, F): self.x = x def solve(self, v, tol=0): m = J(self.x) if isinstance(m, np.ndarray): return solve(m, v) elif scipy.sparse.isspmatrix(m): return spsolve(m, v) else: raise ValueError("Unknown matrix type") def matvec(self, v): m = J(self.x) if isinstance(m, np.ndarray): return dot(m, v) elif scipy.sparse.isspmatrix(m): return m*v else: raise ValueError("Unknown matrix type") def rsolve(self, v, tol=0): m = J(self.x) if isinstance(m, np.ndarray): return solve(m.conj().T, v) elif scipy.sparse.isspmatrix(m): return spsolve(m.conj().T, v) else: raise ValueError("Unknown matrix type") def rmatvec(self, v): m = J(self.x) if isinstance(m, np.ndarray): return dot(m.conj().T, v) elif scipy.sparse.isspmatrix(m): return m.conj().T * v else: raise ValueError("Unknown matrix type") return Jac() elif isinstance(J, str): return dict(broyden1=BroydenFirst, broyden2=BroydenSecond, anderson=Anderson, diagbroyden=DiagBroyden, linearmixing=LinearMixing, excitingmixing=ExcitingMixing, krylov=KrylovJacobian)[J]() else: raise TypeError('Cannot convert object to a Jacobian') #------------------------------------------------------------------------------ # Broyden #------------------------------------------------------------------------------ class GenericBroyden(Jacobian): def setup(self, x0, f0, func): Jacobian.setup(self, x0, f0, func) self.last_f = f0 self.last_x = x0 if hasattr(self, 'alpha') and self.alpha is None: # Autoscale the initial Jacobian parameter # unless we have already guessed the solution. normf0 = norm(f0) if normf0: self.alpha = 0.5*max(norm(x0), 1) / normf0 else: self.alpha = 1.0 def _update(self, x, f, dx, df, dx_norm, df_norm): raise NotImplementedError def update(self, x, f): df = f - self.last_f dx = x - self.last_x self._update(x, f, dx, df, norm(dx), norm(df)) self.last_f = f self.last_x = x class LowRankMatrix(object): r""" A matrix represented as .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger However, if the rank of the matrix reaches the dimension of the vectors, full matrix representation will be used thereon. """ def __init__(self, alpha, n, dtype): self.alpha = alpha self.cs = [] self.ds = [] self.n = n self.dtype = dtype self.collapsed = None @staticmethod def _matvec(v, alpha, cs, ds): axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'], cs[:1] + [v]) w = alpha * v for c, d in zip(cs, ds): a = dotc(d, v) w = axpy(c, w, w.size, a) return w @staticmethod def _solve(v, alpha, cs, ds): """Evaluate w = M^-1 v""" if len(cs) == 0: return v/alpha # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) c0 = cs[0] A = alpha * np.identity(len(cs), dtype=c0.dtype) for i, d in enumerate(ds): for j, c in enumerate(cs): A[i,j] += dotc(d, c) q = np.zeros(len(cs), dtype=c0.dtype) for j, d in enumerate(ds): q[j] = dotc(d, v) q /= alpha q = solve(A, q) w = v/alpha for c, qc in zip(cs, q): w = axpy(c, w, w.size, -qc) return w def matvec(self, v): """Evaluate w = M v""" if self.collapsed is not None: return np.dot(self.collapsed, v) return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) def rmatvec(self, v): """Evaluate w = M^H v""" if self.collapsed is not None: return np.dot(self.collapsed.T.conj(), v) return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) def solve(self, v, tol=0): """Evaluate w = M^-1 v""" if self.collapsed is not None: return solve(self.collapsed, v) return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) def rsolve(self, v, tol=0): """Evaluate w = M^-H v""" if self.collapsed is not None: return solve(self.collapsed.T.conj(), v) return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) def append(self, c, d): if self.collapsed is not None: self.collapsed += c[:,None] * d[None,:].conj() return self.cs.append(c) self.ds.append(d) if len(self.cs) > c.size: self.collapse() def __array__(self): if self.collapsed is not None: return self.collapsed Gm = self.alpha*np.identity(self.n, dtype=self.dtype) for c, d in zip(self.cs, self.ds): Gm += c[:,None]*d[None,:].conj() return Gm def collapse(self): """Collapse the low-rank matrix to a full-rank one.""" self.collapsed = np.array(self) self.cs = None self.ds = None self.alpha = None def restart_reduce(self, rank): """ Reduce the rank of the matrix by dropping all vectors. """ if self.collapsed is not None: return assert rank > 0 if len(self.cs) > rank: del self.cs[:] del self.ds[:] def simple_reduce(self, rank): """ Reduce the rank of the matrix by dropping oldest vectors. """ if self.collapsed is not None: return assert rank > 0 while len(self.cs) > rank: del self.cs[0] del self.ds[0] def svd_reduce(self, max_rank, to_retain=None): """ Reduce the rank of the matrix by retaining some SVD components. This corresponds to the \"Broyden Rank Reduction Inverse\" algorithm described in [1]_. Note that the SVD decomposition can be done by solving only a problem whose size is the effective rank of this matrix, which is viable even for large problems. Parameters ---------- max_rank : int Maximum rank of this matrix after reduction. to_retain : int, optional Number of SVD components to retain when reduction is done (ie. rank > max_rank). Default is ``max_rank - 2``. References ---------- .. [1] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ if self.collapsed is not None: return p = max_rank if to_retain is not None: q = to_retain else: q = p - 2 if self.cs: p = min(p, len(self.cs[0])) q = max(0, min(q, p-1)) m = len(self.cs) if m < p: # nothing to do return C = np.array(self.cs).T D = np.array(self.ds).T D, R = qr(D, mode='economic') C = dot(C, R.T.conj()) U, S, WH = svd(C, full_matrices=False, compute_uv=True) C = dot(C, inv(WH)) D = dot(D, WH.T.conj()) for k in xrange(q): self.cs[k] = C[:,k].copy() self.ds[k] = D[:,k].copy() del self.cs[q:] del self.ds[q:] _doc_parts['broyden_params'] = """ alpha : float, optional Initial guess for the Jacobian is ``(-1/alpha)``. reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart``: drop all matrix columns. Has no extra parameters. - ``simple``: drop oldest matrix column. Has no extra parameters. - ``svd``: keep only the most significant SVD components. Takes an extra parameter, ``to_retain``, which determines the number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). """.strip() class BroydenFirst(GenericBroyden): r""" Find a root of a function, using Broyden's first Jacobian approximation. This method is also known as \"Broyden's good method\". Parameters ---------- %(params_basic)s %(broyden_params)s %(params_extra)s Notes ----- This algorithm implements the inverse Jacobian Quasi-Newton update .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df) which corresponds to Broyden's first Jacobian update .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx References ---------- .. [1] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ def __init__(self, alpha=None, reduction_method='restart', max_rank=None): GenericBroyden.__init__(self) self.alpha = alpha self.Gm = None if max_rank is None: max_rank = np.inf self.max_rank = max_rank if isinstance(reduction_method, str): reduce_params = () else: reduce_params = reduction_method[1:] reduction_method = reduction_method[0] reduce_params = (max_rank - 1,) + reduce_params if reduction_method == 'svd': self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) elif reduction_method == 'simple': self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) elif reduction_method == 'restart': self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) else: raise ValueError("Unknown rank reduction method '%s'" % reduction_method) def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) def todense(self): return inv(self.Gm) def solve(self, f, tol=0): r = self.Gm.matvec(f) if not np.isfinite(r).all(): # singular; reset the Jacobian approximation self.setup(self.last_x, self.last_f, self.func) return self.Gm.matvec(f) def matvec(self, f): return self.Gm.solve(f) def rsolve(self, f, tol=0): return self.Gm.rmatvec(f) def rmatvec(self, f): return self.Gm.rsolve(f) def _update(self, x, f, dx, df, dx_norm, df_norm): self._reduce() # reduce first to preserve secant condition v = self.Gm.rmatvec(dx) c = dx - self.Gm.matvec(df) d = v / vdot(df, v) self.Gm.append(c, d) class BroydenSecond(BroydenFirst): """ Find a root of a function, using Broyden\'s second Jacobian approximation. This method is also known as \"Broyden's bad method\". Parameters ---------- %(params_basic)s %(broyden_params)s %(params_extra)s Notes ----- This algorithm implements the inverse Jacobian Quasi-Newton update .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df) corresponding to Broyden's second method. References ---------- .. [1] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ def _update(self, x, f, dx, df, dx_norm, df_norm): self._reduce() # reduce first to preserve secant condition v = df c = dx - self.Gm.matvec(df) d = v / df_norm**2 self.Gm.append(c, d) #------------------------------------------------------------------------------ # Broyden-like (restricted memory) #------------------------------------------------------------------------------ class Anderson(GenericBroyden): """ Find a root of a function, using (extended) Anderson mixing. The Jacobian is formed by for a 'best' solution in the space spanned by last `M` vectors. As a result, only a MxM matrix inversions and MxN multiplications are required. [Ey]_ Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). M : float, optional Number of previous vectors to retain. Defaults to 5. w0 : float, optional Regularization parameter for numerical stability. Compared to unity, good values of the order of 0.01. %(params_extra)s References ---------- .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). """ # Note: # # Anderson method maintains a rank M approximation of the inverse Jacobian, # # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v # A = W + dF^H dF # W = w0^2 diag(dF^H dF) # # so that for w0 = 0 the secant condition applies for last M iterates, ie., # # J^-1 df_j = dx_j # # for all j = 0 ... M-1. # # Moreover, (from Sherman-Morrison-Woodbury formula) # # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v # C = (dX + alpha dF) A^-1 # b = -1/alpha # # and after simplification # # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v # def __init__(self, alpha=None, w0=0.01, M=5): GenericBroyden.__init__(self) self.alpha = alpha self.M = M self.dx = [] self.df = [] self.gamma = None self.w0 = w0 def solve(self, f, tol=0): dx = -self.alpha*f n = len(self.dx) if n == 0: return dx df_f = np.empty(n, dtype=f.dtype) for k in xrange(n): df_f[k] = vdot(self.df[k], f) try: gamma = solve(self.a, df_f) except LinAlgError: # singular; reset the Jacobian approximation del self.dx[:] del self.df[:] return dx for m in xrange(n): dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) return dx def matvec(self, f): dx = -f/self.alpha n = len(self.dx) if n == 0: return dx df_f = np.empty(n, dtype=f.dtype) for k in xrange(n): df_f[k] = vdot(self.df[k], f) b = np.empty((n, n), dtype=f.dtype) for i in xrange(n): for j in xrange(n): b[i,j] = vdot(self.df[i], self.dx[j]) if i == j and self.w0 != 0: b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha gamma = solve(b, df_f) for m in xrange(n): dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) return dx def _update(self, x, f, dx, df, dx_norm, df_norm): if self.M == 0: return self.dx.append(dx) self.df.append(df) while len(self.dx) > self.M: self.dx.pop(0) self.df.pop(0) n = len(self.dx) a = np.zeros((n, n), dtype=f.dtype) for i in xrange(n): for j in xrange(i, n): if i == j: wd = self.w0**2 else: wd = 0 a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) a += np.triu(a, 1).T.conj() self.a = a #------------------------------------------------------------------------------ # Simple iterations #------------------------------------------------------------------------------ class DiagBroyden(GenericBroyden): """ Find a root of a function, using diagonal Broyden Jacobian approximation. The Jacobian approximation is derived from previous iterations, by retaining only the diagonal of Broyden matrices. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). %(params_extra)s """ def __init__(self, alpha=None): GenericBroyden.__init__(self) self.alpha = alpha def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha def solve(self, f, tol=0): return -f / self.d def matvec(self, f): return -f * self.d def rsolve(self, f, tol=0): return -f / self.d.conj() def rmatvec(self, f): return -f * self.d.conj() def todense(self): return np.diag(-self.d) def _update(self, x, f, dx, df, dx_norm, df_norm): self.d -= (df + self.d*dx)*dx/dx_norm**2 class LinearMixing(GenericBroyden): """ Find a root of a function, using a scalar Jacobian approximation. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional The Jacobian approximation is (-1/alpha). %(params_extra)s """ def __init__(self, alpha=None): GenericBroyden.__init__(self) self.alpha = alpha def solve(self, f, tol=0): return -f*self.alpha def matvec(self, f): return -f/self.alpha def rsolve(self, f, tol=0): return -f*np.conj(self.alpha) def rmatvec(self, f): return -f/np.conj(self.alpha) def todense(self): return np.diag(-np.ones(self.shape[0])/self.alpha) def _update(self, x, f, dx, df, dx_norm, df_norm): pass class ExcitingMixing(GenericBroyden): """ Find a root of a function, using a tuned diagonal Jacobian approximation. The Jacobian matrix is diagonal and is tuned on each iteration. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional Initial Jacobian approximation is (-1/alpha). alphamax : float, optional The entries of the diagonal Jacobian are kept in the range ``[alpha, alphamax]``. %(params_extra)s """ def __init__(self, alpha=None, alphamax=1.0): GenericBroyden.__init__(self) self.alpha = alpha self.alphamax = alphamax self.beta = None def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype) def solve(self, f, tol=0): return -f*self.beta def matvec(self, f): return -f/self.beta def rsolve(self, f, tol=0): return -f*self.beta.conj() def rmatvec(self, f): return -f/self.beta.conj() def todense(self): return np.diag(-1/self.beta) def _update(self, x, f, dx, df, dx_norm, df_norm): incr = f*self.last_f > 0 self.beta[incr] += self.alpha self.beta[~incr] = self.alpha np.clip(self.beta, 0, self.alphamax, out=self.beta) #------------------------------------------------------------------------------ # Iterative/Krylov approximated Jacobians #------------------------------------------------------------------------------ class KrylovJacobian(Jacobian): r""" Find a root of a function, using Krylov approximation for inverse Jacobian. This method is suitable for solving large-scale problems. Parameters ---------- %(params_basic)s rdiff : float, optional Relative step size to use in numerical differentiation. method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function Krylov method to use to approximate the Jacobian. Can be a string, or a function implementing the same interface as the iterative solvers in `scipy.sparse.linalg`. The default is `scipy.sparse.linalg.lgmres`. inner_M : LinearOperator or InverseJacobian Preconditioner for the inner Krylov iteration. Note that you can use also inverse Jacobians as (adaptive) preconditioners. For example, >>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian >>> from scipy.optimize.nonlin import InverseJacobian >>> jac = BroydenFirst() >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac)) If the preconditioner has a method named 'update', it will be called as ``update(x, f)`` after each nonlinear step, with ``x`` giving the current point, and ``f`` the current function value. inner_tol, inner_maxiter, ... Parameters to pass on to the \"inner\" Krylov solver. See `scipy.sparse.linalg.gmres` for details. outer_k : int, optional Size of the subspace kept across LGMRES nonlinear iterations. See `scipy.sparse.linalg.lgmres` for details. %(params_extra)s See Also -------- scipy.sparse.linalg.gmres scipy.sparse.linalg.lgmres Notes ----- This function implements a Newton-Krylov solver. The basic idea is to compute the inverse of the Jacobian with an iterative Krylov method. These methods require only evaluating the Jacobian-vector products, which are conveniently approximated by a finite difference: .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega Due to the use of iterative matrix inverses, these methods can deal with large nonlinear problems. Scipy's `scipy.sparse.linalg` module offers a selection of Krylov solvers to choose from. The default here is `lgmres`, which is a variant of restarted GMRES iteration that reuses some of the information obtained in the previous Newton steps to invert Jacobians in subsequent steps. For a review on Newton-Krylov methods, see for example [1]_, and for the LGMRES sparse inverse method, see [2]_. References ---------- .. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004). :doi:`10.1016/j.jcp.2003.08.010` .. [2] A.H. Baker and E.R. Jessup and T. Manteuffel, SIAM J. Matrix Anal. Appl. 26, 962 (2005). :doi:`10.1137/S0895479803422014` """ def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, inner_M=None, outer_k=10, **kw): self.preconditioner = inner_M self.rdiff = rdiff self.method = dict( bicgstab=scipy.sparse.linalg.bicgstab, gmres=scipy.sparse.linalg.gmres, lgmres=scipy.sparse.linalg.lgmres, cgs=scipy.sparse.linalg.cgs, minres=scipy.sparse.linalg.minres, ).get(method, method) self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) if self.method is scipy.sparse.linalg.gmres: # Replace GMRES's outer iteration with Newton steps self.method_kw['restrt'] = inner_maxiter self.method_kw['maxiter'] = 1 self.method_kw.setdefault('atol', 0) elif self.method is scipy.sparse.linalg.gcrotmk: self.method_kw.setdefault('atol', 0) elif self.method is scipy.sparse.linalg.lgmres: self.method_kw['outer_k'] = outer_k # Replace LGMRES's outer iteration with Newton steps self.method_kw['maxiter'] = 1 # Carry LGMRES's `outer_v` vectors across nonlinear iterations self.method_kw.setdefault('outer_v', []) self.method_kw.setdefault('prepend_outer_v', True) # But don't carry the corresponding Jacobian*v products, in case # the Jacobian changes a lot in the nonlinear step # # XXX: some trust-region inspired ideas might be more efficient... # See eg. Brown & Saad. But needs to be implemented separately # since it's not an inexact Newton method. self.method_kw.setdefault('store_outer_Av', False) self.method_kw.setdefault('atol', 0) for key, value in kw.items(): if not key.startswith('inner_'): raise ValueError("Unknown parameter %s" % key) self.method_kw[key[6:]] = value def _update_diff_step(self): mx = abs(self.x0).max() mf = abs(self.f0).max() self.omega = self.rdiff * max(1, mx) / max(1, mf) def matvec(self, v): nv = norm(v) if nv == 0: return 0*v sc = self.omega / nv r = (self.func(self.x0 + sc*v) - self.f0) / sc if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): raise ValueError('Function returned non-finite results') return r def solve(self, rhs, tol=0): if 'tol' in self.method_kw: sol, info = self.method(self.op, rhs, **self.method_kw) else: sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw) return sol def update(self, x, f): self.x0 = x self.f0 = f self._update_diff_step() # Update also the preconditioner, if possible if self.preconditioner is not None: if hasattr(self.preconditioner, 'update'): self.preconditioner.update(x, f) def setup(self, x, f, func): Jacobian.setup(self, x, f, func) self.x0 = x self.f0 = f self.op = scipy.sparse.linalg.aslinearoperator(self) if self.rdiff is None: self.rdiff = np.finfo(x.dtype).eps ** (1./2) self._update_diff_step() # Setup also the preconditioner, if possible if self.preconditioner is not None: if hasattr(self.preconditioner, 'setup'): self.preconditioner.setup(x, f, func) #------------------------------------------------------------------------------ # Wrapper functions #------------------------------------------------------------------------------ def _nonlin_wrapper(name, jac): """ Construct a solver wrapper with given name and jacobian approx. It inspects the keyword arguments of ``jac.__init__``, and allows to use the same arguments in the wrapper function, in addition to the keyword arguments of `nonlin_solve` """ args, varargs, varkw, defaults = _getargspec(jac.__init__) kwargs = list(zip(args[-len(defaults):], defaults)) kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs]) if kw_str: kw_str = ", " + kw_str kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs]) if kwkw_str: kwkw_str = kwkw_str + ", " # Construct the wrapper function so that its keyword arguments # are visible in pydoc.help etc. wrapper = """ def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw): jac = %(jac)s(%(kwkw)s **kw) return nonlin_solve(F, xin, jac, iter, verbose, maxiter, f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, callback) """ wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, kwkw=kwkw_str) ns = {} ns.update(globals()) exec_(wrapper, ns) func = ns[name] func.__doc__ = jac.__doc__ _set_doc(func) return func broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) anderson = _nonlin_wrapper('anderson', Anderson) linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
46,969
29.361991
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_numdiff.py
"""Routines for numerical differentiation.""" from __future__ import division import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse EPS = np.finfo(np.float64).eps def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} def _compute_absolute_step(rel_step, x0, method): if rel_step is None: rel_step = relative_step[method] sign_x0 = (x0 >= 0).astype(float) * 2 - 1 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-d matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which i-th column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A = np.atleast_2d(A) A = (A != 0).astype(np.int32) if A.ndim != 2: raise ValueError("`A` must be 2-dimensional.") m, n = A.shape if order is None or np.isscalar(order): rng = np.random.RandomState(order) order = rng.permutation(n) else: order = np.asarray(order) if order.shape != (n,): raise ValueError("`order` has incorrect shape.") A = A[:, order] if issparse(A): groups = group_sparse(m, n, A.indices, A.indptr) else: groups = group_dense(m, n, A) groups[order] = groups.copy() return groups def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, bounds=(-np.inf, np.inf), sparsity=None, as_linear_operator=False, args=(), kwargs={}): """Compute finite difference approximation of the derivatives of a vector-valued function. If a function maps from R^n to R^m, its derivatives form m-by-n matrix called the Jacobian, where an element (i, j) is a partial derivative of f[i] with respect to x[j]. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-d array_like of shape (m,) or a scalar. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to a 1-d array. method : {'3-point', '2-point', 'cs'}, optional Finite difference method to use: - '2-point' - use the first order accuracy forward or backward difference. - '3-point' - use central difference in interior points and the second order accuracy forward or backward difference near the boundary. - 'cs' - use a complex-step finite difference scheme. This assumes that the user function is real-valued and can be analytically continued to the complex plane. Otherwise, produces bogus results. rel_step : None or array_like, optional Relative step size to use. The absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically, see Notes. f0 : None or array_like, optional If not None it is assumed to be equal to ``fun(x0)``, in this case the ``fun(x0)`` is not called. Default is None. bounds : tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. Bounds checking is not implemented when `as_linear_operator` is True. sparsity : {None, array_like, sparse matrix, 2-tuple}, optional Defines a sparsity structure of the Jacobian matrix. If the Jacobian matrix is known to have only few non-zero elements in each row, then it's possible to estimate its several columns by a single function evaluation [3]_. To perform such economic computations two ingredients are required: * structure : array_like or sparse matrix of shape (m, n). A zero element means that a corresponding element of the Jacobian identically equals to zero. * groups : array_like of shape (n,). A column grouping for a given sparsity structure, use `group_columns` to obtain it. A single array or a sparse matrix is interpreted as a sparsity structure, and groups are computed inside the function. A tuple is interpreted as (structure, groups). If None (default), a standard dense differencing will be used. Note, that sparse differencing makes sense only for large Jacobian matrices where each row contains few non-zero elements. as_linear_operator : bool, optional When True the function returns an `scipy.sparse.linalg.LinearOperator`. Otherwise it returns a dense array or a sparse matrix depending on `sparsity`. The linear operator provides an efficient way of computing ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow direct access to individual elements of the matrix. By default `as_linear_operator` is False. args, kwargs : tuple and dict, optional Additional arguments passed to `fun`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)``. Returns ------- J : {ndarray, sparse matrix, LinearOperator} Finite difference approximation of the Jacobian matrix. If `as_linear_operator` is True returns a LinearOperator with shape (m, n). Otherwise it returns a dense array or sparse matrix depending on how `sparsity` is defined. If `sparsity` is None then a ndarray with shape (m, n) is returned. If `sparsity` is not None returns a csr_matrix with shape (m, n). For sparse matrices and linear operators it is always returned as a 2-dimensional structure, for ndarrays, if m=1 it is returned as a 1-dimensional gradient array with shape (n,). See Also -------- check_derivative : Check correctness of a function computing derivatives. Notes ----- If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for '3-point' method. Such relative step approximately minimizes a sum of truncation and round-off errors, see [1]_. A finite difference scheme for '3-point' method is selected automatically. The well-known central difference scheme is used for points sufficiently far from the boundary, and 3-point forward or backward scheme is used for points near the boundary. Both schemes have the second-order accuracy in terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point forward and backward difference schemes. For dense differencing when m=1 Jacobian is returned with a shape (n,), on the other hand when n=1 Jacobian is returned with a shape (m, 1). Our motivation is the following: a) It handles a case of gradient computation (m=1) in a conventional way. b) It clearly separates these two different cases. b) In all cases np.atleast_2d can be called to get 2-d Jacobian with correct dimensions. References ---------- .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific Computing. 3rd edition", sec. 5.7. .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. .. [3] B. Fornberg, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. Examples -------- >>> import numpy as np >>> from scipy.optimize import approx_derivative >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> approx_derivative(f, x0, args=(1, 2)) array([[ 1., 0.], [-1., 0.]]) Bounds can be used to limit the region of function evaluation. In the example below we compute left and right derivative at point 1.0. >>> def g(x): ... return x**2 if x >= 1 else x ... >>> x0 = 1.0 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) array([ 1.]) >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) array([ 2.]) """ if method not in ['2-point', '3-point', 'cs']: raise ValueError("Unknown method '%s'. " % method) x0 = np.atleast_1d(x0) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = _prepare_bounds(bounds, x0) if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if as_linear_operator and not (np.all(np.isinf(lb)) and np.all(np.isinf(ub))): raise ValueError("Bounds not supported when " "`as_linear_operator` is True.") def fun_wrapped(x): f = np.atleast_1d(fun(x, *args, **kwargs)) if f.ndim > 1: raise RuntimeError("`fun` return value has " "more than 1 dimension.") return f if f0 is None: f0 = fun_wrapped(x0) else: f0 = np.atleast_1d(f0) if f0.ndim > 1: raise ValueError("`f0` passed has more than 1 dimension.") if np.any((x0 < lb) | (x0 > ub)): raise ValueError("`x0` violates bound constraints.") if as_linear_operator: if rel_step is None: rel_step = relative_step[method] return _linear_operator_difference(fun_wrapped, x0, f0, rel_step, method) else: h = _compute_absolute_step(rel_step, x0, method) if method == '2-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', lb, ub) elif method == '3-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) elif method == 'cs': use_one_sided = False if sparsity is None: return _dense_difference(fun_wrapped, x0, f0, h, use_one_sided, method) else: if not issparse(sparsity) and len(sparsity) == 2: structure, groups = sparsity else: structure = sparsity groups = group_columns(sparsity) if issparse(structure): structure = csc_matrix(structure) else: structure = np.atleast_2d(structure) groups = np.atleast_1d(groups) return _sparse_difference(fun_wrapped, x0, f0, h, use_one_sided, structure, groups, method) def _linear_operator_difference(fun, x0, f0, h, method): m = f0.size n = x0.size if method == '2-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p df = fun(x) - f0 return df / dx elif method == '3-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = 2*h / norm(p) x1 = x0 - (dx/2)*p x2 = x0 + (dx/2)*p f1 = fun(x1) f2 = fun(x2) df = f2 - f1 return df / dx elif method == 'cs': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p*1.j f1 = fun(x) df = f1.imag return df / dx else: raise RuntimeError("Never be here.") return LinearOperator((m, n), matvec) def _dense_difference(fun, x0, f0, h, use_one_sided, method): m = f0.size n = x0.size J_transposed = np.empty((n, m)) h_vecs = np.diag(h) for i in range(h.size): if method == '2-point': x = x0 + h_vecs[i] dx = x[i] - x0[i] # Recompute dx as exactly representable number. df = fun(x) - f0 elif method == '3-point' and use_one_sided[i]: x1 = x0 + h_vecs[i] x2 = x0 + 2 * h_vecs[i] dx = x2[i] - x0[i] f1 = fun(x1) f2 = fun(x2) df = -3.0 * f0 + 4 * f1 - f2 elif method == '3-point' and not use_one_sided[i]: x1 = x0 - h_vecs[i] x2 = x0 + h_vecs[i] dx = x2[i] - x1[i] f1 = fun(x1) f2 = fun(x2) df = f2 - f1 elif method == 'cs': f1 = fun(x0 + h_vecs[i]*1.j) df = f1.imag dx = h_vecs[i, i] else: raise RuntimeError("Never be here.") J_transposed[i] = df / dx if m == 1: J_transposed = np.ravel(J_transposed) return J_transposed.T def _sparse_difference(fun, x0, f0, h, use_one_sided, structure, groups, method): m = f0.size n = x0.size row_indices = [] col_indices = [] fractions = [] n_groups = np.max(groups) + 1 for group in range(n_groups): # Perturb variables which are in the same group simultaneously. e = np.equal(group, groups) h_vec = h * e if method == '2-point': x = x0 + h_vec dx = x - x0 df = fun(x) - f0 # The result is written to columns which correspond to perturbed # variables. cols, = np.nonzero(e) # Find all non-zero elements in selected columns of Jacobian. i, j, _ = find(structure[:, cols]) # Restore column indices in the full array. j = cols[j] elif method == '3-point': # Here we do conceptually the same but separate one-sided # and two-sided schemes. x1 = x0.copy() x2 = x0.copy() mask_1 = use_one_sided & e x1[mask_1] += h_vec[mask_1] x2[mask_1] += 2 * h_vec[mask_1] mask_2 = ~use_one_sided & e x1[mask_2] -= h_vec[mask_2] x2[mask_2] += h_vec[mask_2] dx = np.zeros(n) dx[mask_1] = x2[mask_1] - x0[mask_1] dx[mask_2] = x2[mask_2] - x1[mask_2] f1 = fun(x1) f2 = fun(x2) cols, = np.nonzero(e) i, j, _ = find(structure[:, cols]) j = cols[j] mask = use_one_sided[j] df = np.empty(m) rows = i[mask] df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows] rows = i[~mask] df[rows] = f2[rows] - f1[rows] elif method == 'cs': f1 = fun(x0 + h_vec*1.j) df = f1.imag dx = h_vec cols, = np.nonzero(e) i, j, _ = find(structure[:, cols]) j = cols[j] else: raise ValueError("Never be here.") # All that's left is to compute the fraction. We store i, j and # fractions as separate arrays and later construct coo_matrix. row_indices.append(i) col_indices.append(j) fractions.append(df[i] / dx[j]) row_indices = np.hstack(row_indices) col_indices = np.hstack(col_indices) fractions = np.hstack(fractions) J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n)) return csr_matrix(J) def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(), kwargs={}): """Check correctness of a function computing derivatives (Jacobian or gradient) by comparison with a finite difference approximation. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-d array_like of shape (m,) or a scalar. jac : callable Function which computes Jacobian matrix of `fun`. It must work with argument x the same way as `fun`. The return value must be array_like or sparse matrix with an appropriate shape. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to 1-d array. bounds : 2-tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. args, kwargs : tuple and dict, optional Additional arguments passed to `fun` and `jac`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)`` and the same for `jac`. Returns ------- accuracy : float The maximum among all relative errors for elements with absolute values higher than 1 and absolute errors for elements with absolute values less or equal than 1. If `accuracy` is on the order of 1e-6 or lower, then it is likely that your `jac` implementation is correct. See Also -------- approx_derivative : Compute finite difference approximation of derivative. Examples -------- >>> import numpy as np >>> from scipy.optimize import check_derivative >>> >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> def jac(x, c1, c2): ... return np.array([ ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])], ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])] ... ]) ... >>> >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> check_derivative(f, jac, x0, args=(1, 2)) 2.4492935982947064e-16 """ J_to_test = jac(x0, *args, **kwargs) if issparse(J_to_test): J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test, args=args, kwargs=kwargs) J_to_test = csr_matrix(J_to_test) abs_err = J_to_test - J_diff i, j, abs_err_data = find(abs_err) J_diff_data = np.asarray(J_diff[i, j]).ravel() return np.max(np.abs(abs_err_data) / np.maximum(1, np.abs(J_diff_data))) else: J_diff = approx_derivative(fun, x0, bounds=bounds, args=args, kwargs=kwargs) abs_err = np.abs(J_to_test - J_diff) return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
23,753
36.115625
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_hessian_update_strategy.py
"""Hessian update strategies for quasi-Newton optimization methods.""" from __future__ import division, print_function, absolute_import import numpy as np from numpy.linalg import norm from scipy.linalg import get_blas_funcs from warnings import warn __all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1'] class HessianUpdateStrategy(object): """Interface for implementing Hessian update strategies. Many optimization methods make use of Hessian (or inverse Hessian) approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS. Some of these approximations, however, do not actually need to store the entire matrix or can compute the internal matrix product with a given vector in a very efficiently manner. This class serves as an abstract interface between the optimization algorithm and the quasi-Newton update strategies, giving freedom of implementation to store and update the internal matrix as efficiently as possible. Different choices of initialization and update procedure will result in different quasi-Newton strategies. Four methods should be implemented in derived classes: ``initialize``, ``update``, ``dot`` and ``get_matrix``. Notes ----- Any instance of a class that implements this interface, can be accepted by the method ``minimize`` and used by the compatible solvers to approximate the Hessian (or inverse Hessian) used by the optimization algorithms. """ def initialize(self, n, approx_type): """Initialize internal matrix. Allocate internal memory for storing and updating the Hessian or its inverse. Parameters ---------- n : int Problem dimension. approx_type : {'hess', 'inv_hess'} Selects either the Hessian or the inverse Hessian. When set to 'hess' the Hessian will be stored and updated. When set to 'inv_hess' its inverse will be used instead. """ raise NotImplementedError("The method ``initialize(n, approx_type)``" " is not implemented.") def update(self, delta_x, delta_grad): """Update internal matrix. Update Hessian matrix or its inverse (depending on how 'approx_type' is defined) using information about the last evaluated points. Parameters ---------- delta_x : ndarray The difference between two points the gradient function have been evaluated at: ``delta_x = x2 - x1``. delta_grad : ndarray The difference between the gradients: ``delta_grad = grad(x2) - grad(x1)``. """ raise NotImplementedError("The method ``update(delta_x, delta_grad)``" " is not implemented.") def dot(self, p): """Compute the product of the internal matrix with the given vector. Parameters ---------- p : array_like 1-d array representing a vector. Returns ------- Hp : array 1-d represents the result of multiplying the approximation matrix by vector p. """ raise NotImplementedError("The method ``dot(p)``" " is not implemented.") def get_matrix(self): """Return current internal matrix. Returns ------- H : ndarray, shape (n, n) Dense matrix containing either the Hessian or its inverse (depending on how 'approx_type' is defined). """ raise NotImplementedError("The method ``get_matrix(p)``" " is not implemented.") class FullHessianUpdateStrategy(HessianUpdateStrategy): """Hessian update strategy with full dimensional internal representation. """ _syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update _syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update # Symmetric matrix-vector product _symv = get_blas_funcs('symv', dtype='d') def __init__(self, init_scale='auto'): self.init_scale = init_scale # Until initialize is called we can't really use the class, # so it makes sense to set everything to None. self.first_iteration = None self.approx_type = None self.B = None self.H = None def initialize(self, n, approx_type): """Initialize internal matrix. Allocate internal memory for storing and updating the Hessian or its inverse. Parameters ---------- n : int Problem dimension. approx_type : {'hess', 'inv_hess'} Selects either the Hessian or the inverse Hessian. When set to 'hess' the Hessian will be stored and updated. When set to 'inv_hess' its inverse will be used instead. """ self.first_iteration = True self.n = n self.approx_type = approx_type if approx_type not in ('hess', 'inv_hess'): raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.") # Create matrix if self.approx_type == 'hess': self.B = np.eye(n, dtype=float) else: self.H = np.eye(n, dtype=float) def _auto_scale(self, delta_x, delta_grad): # Heuristic to scale matrix at first iteration. # Described in Nocedal and Wright "Numerical Optimization" # p.143 formula (6.20). s_norm2 = np.dot(delta_x, delta_x) y_norm2 = np.dot(delta_grad, delta_grad) ys = np.abs(np.dot(delta_grad, delta_x)) if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0: return 1 if self.approx_type == 'hess': return y_norm2 / ys else: return ys / y_norm2 def _update_implementation(self, delta_x, delta_grad): raise NotImplementedError("The method ``_update_implementation``" " is not implemented.") def update(self, delta_x, delta_grad): """Update internal matrix. Update Hessian matrix or its inverse (depending on how 'approx_type' is defined) using information about the last evaluated points. Parameters ---------- delta_x : ndarray The difference between two points the gradient function have been evaluated at: ``delta_x = x2 - x1``. delta_grad : ndarray The difference between the gradients: ``delta_grad = grad(x2) - grad(x1)``. """ if np.all(delta_x == 0.0): return if np.all(delta_grad == 0.0): warn('delta_grad == 0.0. Check if the approximated ' 'function is linear. If the function is linear ' 'better results can be obtained by defining the ' 'Hessian as zero instead of using quasi-Newton ' 'approximations.', UserWarning) return if self.first_iteration: # Get user specific scale if self.init_scale == "auto": scale = self._auto_scale(delta_x, delta_grad) else: scale = float(self.init_scale) # Scale initial matrix with ``scale * np.eye(n)`` if self.approx_type == 'hess': self.B *= scale else: self.H *= scale self.first_iteration = False self._update_implementation(delta_x, delta_grad) def dot(self, p): """Compute the product of the internal matrix with the given vector. Parameters ---------- p : array_like 1-d array representing a vector. Returns ------- Hp : array 1-d represents the result of multiplying the approximation matrix by vector p. """ if self.approx_type == 'hess': return self._symv(1, self.B, p) else: return self._symv(1, self.H, p) def get_matrix(self): """Return the current internal matrix. Returns ------- M : ndarray, shape (n, n) Dense matrix containing either the Hessian or its inverse (depending on how `approx_type` was defined). """ if self.approx_type == 'hess': M = np.copy(self.B) else: M = np.copy(self.H) li = np.tril_indices_from(M, k=-1) M[li] = M.T[li] return M class BFGS(FullHessianUpdateStrategy): """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. Parameters ---------- exception_strategy : {'skip_update', 'damp_update'}, optional Define how to proceed when the curvature condition is violated. Set it to 'skip_update' to just skip the update. Or, alternatively, set it to 'damp_update' to interpolate between the actual BFGS result and the unmodified matrix. Both exceptions strategies are explained in [1]_, p.536-537. min_curvature : float This number, scaled by a normalization factor, defines the minimum curvature ``dot(delta_grad, delta_x)`` allowed to go unaffected by the exception strategy. By default is equal to 1e-8 when ``exception_strategy = 'skip_update'`` and equal to 0.2 when ``exception_strategy = 'damp_update'``. init_scale : {float, 'auto'} Matrix scale at first iteration. At the first iteration the Hessian matrix or its inverse will be initialized with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. Set it to 'auto' in order to use an automatic heuristic for choosing the initial scale. The heuristic is described in [1]_, p.143. By default uses 'auto'. Notes ----- The update is based on the description in [1]_, p.140. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ def __init__(self, exception_strategy='skip_update', min_curvature=None, init_scale='auto'): if exception_strategy == 'skip_update': if min_curvature is not None: self.min_curvature = min_curvature else: self.min_curvature = 1e-8 elif exception_strategy == 'damp_update': if min_curvature is not None: self.min_curvature = min_curvature else: self.min_curvature = 0.2 else: raise ValueError("`exception_strategy` must be 'skip_update' " "or 'damp_update'.") super(BFGS, self).__init__(init_scale) self.exception_strategy = exception_strategy def _update_inverse_hessian(self, ys, Hy, yHy, s): """Update the inverse Hessian matrix. BFGS update using the formula: ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T) - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)`` where ``s = delta_x`` and ``y = delta_grad``. This formula is equivalent to (6.17) in [1]_ written in a more efficient way for implementation. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H) self.H = self._syr((ys+yHy)/ys**2, s, a=self.H) def _update_hessian(self, ys, Bs, sBs, y): """Update the Hessian matrix. BFGS update using the formula: ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y`` where ``s`` is short for ``delta_x`` and ``y`` is short for ``delta_grad``. Formula (6.19) in [1]_. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ self.B = self._syr(1.0 / ys, y, a=self.B) self.B = self._syr(-1.0 / sBs, Bs, a=self.B) def _update_implementation(self, delta_x, delta_grad): # Auxiliary variables w and z if self.approx_type == 'hess': w = delta_x z = delta_grad else: w = delta_grad z = delta_x # Do some common operations wz = np.dot(w, z) Mw = self.dot(w) wMw = Mw.dot(w) # Guarantee that wMw > 0 by reinitializing matrix. # While this is always true in exact arithmetics, # indefinite matrix may appear due to roundoff errors. if wMw <= 0.0: scale = self._auto_scale(delta_x, delta_grad) # Reinitialize matrix if self.approx_type == 'hess': self.B = scale * np.eye(self.n, dtype=float) else: self.H = scale * np.eye(self.n, dtype=float) # Do common operations for new matrix Mw = self.dot(w) wMw = Mw.dot(w) # Check if curvature condition is violated if wz <= self.min_curvature * wMw: # If the option 'skip_update' is set # we just skip the update when the condion # is violated. if self.exception_strategy == 'skip_update': return # If the option 'damp_update' is set we # interpolate between the actual BFGS # result and the unmodified matrix. elif self.exception_strategy == 'damp_update': update_factor = (1-self.min_curvature) / (1 - wz/wMw) z = update_factor*z + (1-update_factor)*Mw wz = np.dot(w, z) # Update matrix if self.approx_type == 'hess': self._update_hessian(wz, Mw, wMw, z) else: self._update_inverse_hessian(wz, Mw, wMw, z) class SR1(FullHessianUpdateStrategy): """Symmetric-rank-1 Hessian update strategy. Parameters ---------- min_denominator : float This number, scaled by a normalization factor, defines the minimum denominator magnitude allowed in the update. When the condition is violated we skip the update. By default uses ``1e-8``. init_scale : {float, 'auto'}, optional Matrix scale at first iteration. At the first iteration the Hessian matrix or its inverse will be initialized with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. Set it to 'auto' in order to use an automatic heuristic for choosing the initial scale. The heuristic is described in [1]_, p.143. By default uses 'auto'. Notes ----- The update is based on the description in [1]_, p.144-146. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ def __init__(self, min_denominator=1e-8, init_scale='auto'): self.min_denominator = min_denominator super(SR1, self).__init__(init_scale) def _update_implementation(self, delta_x, delta_grad): # Auxiliary variables w and z if self.approx_type == 'hess': w = delta_x z = delta_grad else: w = delta_grad z = delta_x # Do some common operations Mw = self.dot(w) z_minus_Mw = z - Mw denominator = np.dot(w, z_minus_Mw) # If the denominator is too small # we just skip the update. if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw): return # Update matrix if self.approx_type == 'hess': self.B = self._syr(1/denominator, z_minus_Mw, a=self.B) else: self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
15,924
35.948956
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/zeros.py
from __future__ import division, print_function, absolute_import import warnings from . import _zeros from numpy import finfo, sign, sqrt _iter = 100 _xtol = 2e-12 _rtol = 4*finfo(float).eps __all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth'] CONVERGED = 'converged' SIGNERR = 'sign error' CONVERR = 'convergence error' flag_map = {0: CONVERGED, -1: SIGNERR, -2: CONVERR} class RootResults(object): """ Represents the root finding result. Attributes ---------- root : float Estimated root location. iterations : int Number of iterations needed to find the root. function_calls : int Number of times the function was called. converged : bool True if the routine converged. flag : str Description of the cause of termination. """ def __init__(self, root, iterations, function_calls, flag): self.root = root self.iterations = iterations self.function_calls = function_calls self.converged = flag == 0 try: self.flag = flag_map[flag] except KeyError: self.flag = 'unknown error %d' % (flag,) def __repr__(self): attrs = ['converged', 'flag', 'function_calls', 'iterations', 'root'] m = max(map(len, attrs)) + 1 return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a)) for a in attrs]) def results_c(full_output, r): if full_output: x, funcalls, iterations, flag = r results = RootResults(root=x, iterations=iterations, function_calls=funcalls, flag=flag) return x, results else: return r # Newton-Raphson method def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, fprime2=None): """ Find a zero using the Newton-Raphson or secant method. Find a zero of the function `func` given a nearby starting point `x0`. The Newton-Raphson method is used if the derivative `fprime` of `func` is provided, otherwise the secant method is used. If the second order derivative `fprime2` of `func` is provided, then Halley's method is used. Parameters ---------- func : function The function whose zero is wanted. It must be a function of a single variable of the form f(x,a,b,c...), where a,b,c... are extra arguments that can be passed in the `args` parameter. x0 : float An initial estimate of the zero that should be somewhere near the actual zero. fprime : function, optional The derivative of the function when available and convenient. If it is None (default), then the secant method is used. args : tuple, optional Extra arguments to be used in the function call. tol : float, optional The allowable error of the zero value. maxiter : int, optional Maximum number of iterations. fprime2 : function, optional The second order derivative of the function when available and convenient. If it is None (default), then the normal Newton-Raphson or the secant method is used. If it is not None, then Halley's method is used. Returns ------- zero : float Estimated location where function is zero. See Also -------- brentq, brenth, ridder, bisect fsolve : find zeroes in n dimensions. Notes ----- The convergence rate of the Newton-Raphson method is quadratic, the Halley method is cubic, and the secant method is sub-quadratic. This means that if the function is well behaved the actual error in the estimated zero is approximately the square (cube for Halley) of the requested tolerance up to roundoff error. However, the stopping criterion used here is the step size and there is no guarantee that a zero has been found. Consequently the result should be verified. Safer algorithms are brentq, brenth, ridder, and bisect, but they all require that the root first be bracketed in an interval where the function changes sign. The brentq algorithm is recommended for general use in one dimensional problems when such an interval has been found. Examples -------- >>> def f(x): ... return (x**3 - 1) # only one real root at x = 1 >>> from scipy import optimize ``fprime`` not provided, use secant method >>> root = optimize.newton(f, 1.5) >>> root 1.0000000000000016 >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x) >>> root 1.0000000000000016 Only ``fprime`` provided, use Newton Raphson method >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2) >>> root 1.0 Both ``fprime2`` and ``fprime`` provided, use Halley's method >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2, ... fprime2=lambda x: 6 * x) >>> root 1.0 """ if tol <= 0: raise ValueError("tol too small (%g <= 0)" % tol) if maxiter < 1: raise ValueError("maxiter must be greater than 0") # Multiply by 1.0 to convert to floating point. We don't use float(x0) # so it still works if x0 is complex. p0 = 1.0 * x0 if fprime is not None: # Newton-Rapheson method for iter in range(maxiter): fder = fprime(p0, *args) if fder == 0: msg = "derivative was zero." warnings.warn(msg, RuntimeWarning) return p0 fval = func(p0, *args) newton_step = fval / fder if fprime2 is None: # Newton step p = p0 - newton_step else: fder2 = fprime2(p0, *args) # Halley's method p = p0 - newton_step / (1.0 - 0.5 * newton_step * fder2 / fder) if abs(p - p0) < tol: return p p0 = p else: # Secant method if x0 >= 0: p1 = x0*(1 + 1e-4) + 1e-4 else: p1 = x0*(1 + 1e-4) - 1e-4 q0 = func(p0, *args) q1 = func(p1, *args) for iter in range(maxiter): if q1 == q0: if p1 != p0: msg = "Tolerance of %s reached" % (p1 - p0) warnings.warn(msg, RuntimeWarning) return (p1 + p0)/2.0 else: p = p1 - q1*(p1 - p0)/(q1 - q0) if abs(p - p1) < tol: return p p0 = p1 q0 = q1 p1 = p q1 = func(p1, *args) msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) raise RuntimeError(msg) def bisect(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find root of a function within an interval. Basic bisection routine to find a zero of the function `f` between the arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs. Slow but sure. Parameters ---------- f : function Python function returning a number. `f` must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be nonnegative. rtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter cannot be smaller than its default value of ``4*np.finfo(float).eps``. maxiter : number, optional if convergence is not achieved in `maxiter` iterations, an error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where x is the root, and r is a `RootResults` object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. Examples -------- >>> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.bisect(f, 0, 2) >>> root 1.0 >>> root = optimize.bisect(f, -2, 0) >>> root -1.0 See Also -------- brentq, brenth, bisect, newton fixed_point : scalar fixed-point finder fsolve : n-dimensional root-finding """ if not isinstance(args, tuple): args = (args,) if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol: raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) r = _zeros._bisect(f,a,b,xtol,rtol,maxiter,args,full_output,disp) return results_c(full_output, r) def ridder(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root of a function in an interval. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be nonnegative. rtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter cannot be smaller than its default value of ``4*np.finfo(float).eps``. maxiter : number, optional if convergence is not achieved in maxiter iterations, an error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- brentq, brenth, bisect, newton : one-dimensional root-finding fixed_point : scalar fixed-point finder Notes ----- Uses [Ridders1979]_ method to find a zero of the function `f` between the arguments `a` and `b`. Ridders' method is faster than bisection, but not generally as fast as the Brent routines. [Ridders1979]_ provides the classic description and source of the algorithm. A description can also be found in any recent edition of Numerical Recipes. The routine used here diverges slightly from standard presentations in order to be a bit more careful of tolerance. Examples -------- >>> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.ridder(f, 0, 2) >>> root 1.0 >>> root = optimize.ridder(f, -2, 0) >>> root -1.0 References ---------- .. [Ridders1979] Ridders, C. F. J. "A New Algorithm for Computing a Single Root of a Real Continuous Function." IEEE Trans. Circuits Systems 26, 979-980, 1979. """ if not isinstance(args, tuple): args = (args,) if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol: raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) r = _zeros._ridder(f,a,b,xtol,rtol,maxiter,args,full_output,disp) return results_c(full_output, r) def brentq(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root of a function in a bracketing interval using Brent's method. Uses the classic Brent's method to find a zero of the function `f` on the sign changing interval [a , b]. Generally considered the best of the rootfinding routines here. It is a safe version of the secant method that uses inverse quadratic extrapolation. Brent's method combines root bracketing, interval bisection, and inverse quadratic interpolation. It is sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973) claims convergence is guaranteed for functions computable within [a,b]. [Brent1973]_ provides the classic description of the algorithm. Another description can be found in a recent edition of Numerical Recipes, including [PressEtal1992]_. Another description is at http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to understand the algorithm just by reading our code. Our code diverges a bit from standard presentations: we choose a different formula for the extrapolation step. Parameters ---------- f : function Python function returning a number. The function :math:`f` must be continuous, and :math:`f(a)` and :math:`f(b)` must have opposite signs. a : number One end of the bracketing interval :math:`[a, b]`. b : number The other end of the bracketing interval :math:`[a, b]`. xtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be nonnegative. For nice functions, Brent's method will often satisfy the above condition with ``xtol/2`` and ``rtol/2``. [Brent1973]_ rtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter cannot be smaller than its default value of ``4*np.finfo(float).eps``. For nice functions, Brent's method will often satisfy the above condition with ``xtol/2`` and ``rtol/2``. [Brent1973]_ maxiter : number, optional if convergence is not achieved in maxiter iterations, an error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- multivariate local optimizers `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` nonlinear least squares minimizer `leastsq` constrained multivariate optimizers `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` global optimizers `basinhopping`, `brute`, `differential_evolution` local scalar minimizers `fminbound`, `brent`, `golden`, `bracket` n-dimensional root-finding `fsolve` one-dimensional root-finding `brenth`, `ridder`, `bisect`, `newton` scalar fixed-point finder `fixed_point` Notes ----- `f` must be continuous. f(a) and f(b) must have opposite signs. Examples -------- >>> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.brentq(f, -2, 0) >>> root -1.0 >>> root = optimize.brentq(f, 0, 2) >>> root 1.0 References ---------- .. [Brent1973] Brent, R. P., *Algorithms for Minimization Without Derivatives*. Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. .. [PressEtal1992] Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. Cambridge, England: Cambridge University Press, pp. 352-355, 1992. Section 9.3: "Van Wijngaarden-Dekker-Brent Method." """ if not isinstance(args, tuple): args = (args,) if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol: raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) r = _zeros._brentq(f,a,b,xtol,rtol,maxiter,args,full_output,disp) return results_c(full_output, r) def brenth(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """Find root of f in [a,b]. A variation on the classic Brent routine to find a zero of the function f between the arguments a and b that uses hyperbolic extrapolation instead of inverse quadratic extrapolation. There was a paper back in the 1980's ... f(a) and f(b) cannot have the same signs. Generally on a par with the brent routine, but not as heavily tested. It is a safe version of the secant method that uses hyperbolic extrapolation. The version here is by Chuck Harris. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be nonnegative. As with `brentq`, for nice functions the method will often satisfy the above condition with ``xtol/2`` and ``rtol/2``. rtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter cannot be smaller than its default value of ``4*np.finfo(float).eps``. As with `brentq`, for nice functions the method will often satisfy the above condition with ``xtol/2`` and ``rtol/2``. maxiter : number, optional if convergence is not achieved in maxiter iterations, an error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. Examples -------- >>> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.brenth(f, -2, 0) >>> root -1.0 >>> root = optimize.brenth(f, 0, 2) >>> root 1.0 See Also -------- fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers leastsq : nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers basinhopping, differential_evolution, brute : global optimizers fminbound, brent, golden, bracket : local scalar minimizers fsolve : n-dimensional root-finding brentq, brenth, ridder, bisect, newton : one-dimensional root-finding fixed_point : scalar fixed-point finder """ if not isinstance(args, tuple): args = (args,) if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol: raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) r = _zeros._brenth(f,a, b, xtol, rtol, maxiter, args, full_output, disp) return results_c(full_output, r)
21,346
33.823817
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py
import numpy as np import scipy.sparse as sps class CanonicalConstraint(object): """Canonical constraint to use with trust-constr algorithm. It represents the set of constraints of the form:: f_eq(x) = 0 f_ineq(x) <= 0 Where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see below. The class is supposed to be instantiated by factory methods, which should prepare the parameters listed below. Parameters ---------- n_eq, n_ineq : int Number of equality and inequality constraints respectively. fun : callable Function defining the constraints. The signature is ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq` components and ``c_ineq`` is ndarray with `n_ineq` components. jac : callable Function to evaluate the Jacobian of the constraint. The signature is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n) respectively. hess : callable Function to evaluate the Hessian of the constraints multiplied by Lagrange multipliers, that is ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied shape (n, n) and provide a matrix-vector product operation ``H.dot(p)``. keep_feasible : ndarray, shape (n_ineq,) Mask indicating which inequality constraints should be kept feasible. """ def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible): self.n_eq = n_eq self.n_ineq = n_ineq self.fun = fun self.jac = jac self.hess = hess self.keep_feasible = keep_feasible @classmethod def from_PreparedConstraint(cls, constraint): """Create an instance from `PreparedConstrained` object.""" lb, ub = constraint.bounds cfun = constraint.fun keep_feasible = constraint.keep_feasible if np.all(lb == -np.inf) and np.all(ub == np.inf): return cls.empty(cfun.n) if np.all(lb == -np.inf) and np.all(ub == np.inf): return cls.empty(cfun.n) elif np.all(lb == ub): return cls._equal_to_canonical(cfun, lb) elif np.all(lb == -np.inf): return cls._less_to_canonical(cfun, ub, keep_feasible) elif np.all(ub == np.inf): return cls._greater_to_canonical(cfun, lb, keep_feasible) else: return cls._interval_to_canonical(cfun, lb, ub, keep_feasible) @classmethod def empty(cls, n): """Create an "empty" instance. This "empty" instance is required to allow working with unconstrained problems as if they have some constraints. """ empty_fun = np.empty(0) empty_jac = np.empty((0, n)) empty_hess = sps.csr_matrix((n, n)) def fun(x): return empty_fun, empty_fun def jac(x): return empty_jac, empty_jac def hess(x, v_eq, v_ineq): return empty_hess return cls(0, 0, fun, jac, hess, np.empty(0)) @classmethod def concatenate(cls, canonical_constraints, sparse_jacobian): """Concatenate multiple `CanonicalConstraint` into one. `sparse_jacobian` (bool) determines the Jacobian format of the concatenated constraint. Note that items in `canonical_constraints` must have their Jacobians in the same format. """ def fun(x): eq_all = [] ineq_all = [] for c in canonical_constraints: eq, ineq = c.fun(x) eq_all.append(eq) ineq_all.append(ineq) return np.hstack(eq_all), np.hstack(ineq_all) if sparse_jacobian: vstack = sps.vstack else: vstack = np.vstack def jac(x): eq_all = [] ineq_all = [] for c in canonical_constraints: eq, ineq = c.jac(x) eq_all.append(eq) ineq_all.append(ineq) return vstack(eq_all), vstack(ineq_all) def hess(x, v_eq, v_ineq): hess_all = [] index_eq = 0 index_ineq = 0 for c in canonical_constraints: vc_eq = v_eq[index_eq:index_eq + c.n_eq] vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq] hess_all.append(c.hess(x, vc_eq, vc_ineq)) index_eq += c.n_eq index_ineq += c.n_ineq def matvec(p): result = np.zeros_like(p) for h in hess_all: result += h.dot(p) return result n = x.shape[0] return sps.linalg.LinearOperator((n, n), matvec, dtype=float) n_eq = sum(c.n_eq for c in canonical_constraints) n_ineq = sum(c.n_ineq for c in canonical_constraints) keep_feasible = np.array(np.hstack(( c.keep_feasible for c in canonical_constraints)), dtype=bool) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) @classmethod def _equal_to_canonical(cls, cfun, value): empty_fun = np.empty(0) n = cfun.n n_eq = value.shape[0] n_ineq = 0 keep_feasible = np.empty(0, dtype=bool) if cfun.sparse_jacobian: empty_jac = sps.csr_matrix((0, n)) else: empty_jac = np.empty((0, n)) def fun(x): return cfun.fun(x) - value, empty_fun def jac(x): return cfun.jac(x), empty_jac def hess(x, v_eq, v_ineq): return cfun.hess(x, v_eq) empty_fun = np.empty(0) n = cfun.n if cfun.sparse_jacobian: empty_jac = sps.csr_matrix((0, n)) else: empty_jac = np.empty((0, n)) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) @classmethod def _less_to_canonical(cls, cfun, ub, keep_feasible): empty_fun = np.empty(0) n = cfun.n if cfun.sparse_jacobian: empty_jac = sps.csr_matrix((0, n)) else: empty_jac = np.empty((0, n)) finite_ub = ub < np.inf n_eq = 0 n_ineq = np.sum(finite_ub) if np.all(finite_ub): def fun(x): return empty_fun, cfun.fun(x) - ub def jac(x): return empty_jac, cfun.jac(x) def hess(x, v_eq, v_ineq): return cfun.hess(x, v_ineq) else: finite_ub = np.nonzero(finite_ub)[0] keep_feasible = keep_feasible[finite_ub] ub = ub[finite_ub] def fun(x): return empty_fun, cfun.fun(x)[finite_ub] - ub def jac(x): return empty_jac, cfun.jac(x)[finite_ub] def hess(x, v_eq, v_ineq): v = np.zeros(cfun.m) v[finite_ub] = v_ineq return cfun.hess(x, v) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) @classmethod def _greater_to_canonical(cls, cfun, lb, keep_feasible): empty_fun = np.empty(0) n = cfun.n if cfun.sparse_jacobian: empty_jac = sps.csr_matrix((0, n)) else: empty_jac = np.empty((0, n)) finite_lb = lb > -np.inf n_eq = 0 n_ineq = np.sum(finite_lb) if np.all(finite_lb): def fun(x): return empty_fun, lb - cfun.fun(x) def jac(x): return empty_jac, -cfun.jac(x) def hess(x, v_eq, v_ineq): return cfun.hess(x, -v_ineq) else: finite_lb = np.nonzero(finite_lb)[0] keep_feasible = keep_feasible[finite_lb] lb = lb[finite_lb] def fun(x): return empty_fun, lb - cfun.fun(x)[finite_lb] def jac(x): return empty_jac, -cfun.jac(x)[finite_lb] def hess(x, v_eq, v_ineq): v = np.zeros(cfun.m) v[finite_lb] = -v_ineq return cfun.hess(x, v) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) @classmethod def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible): lb_inf = lb == -np.inf ub_inf = ub == np.inf equal = lb == ub less = lb_inf & ~ub_inf greater = ub_inf & ~lb_inf interval = ~equal & ~lb_inf & ~ub_inf equal = np.nonzero(equal)[0] less = np.nonzero(less)[0] greater = np.nonzero(greater)[0] interval = np.nonzero(interval)[0] n_less = less.shape[0] n_greater = greater.shape[0] n_interval = interval.shape[0] n_ineq = n_less + n_greater + 2 * n_interval n_eq = equal.shape[0] keep_feasible = np.hstack((keep_feasible[less], keep_feasible[greater], keep_feasible[interval], keep_feasible[interval])) def fun(x): f = cfun.fun(x) eq = f[equal] - lb[equal] le = f[less] - ub[less] ge = lb[greater] - f[greater] il = f[interval] - ub[interval] ig = lb[interval] - f[interval] return eq, np.hstack((le, ge, il, ig)) def jac(x): J = cfun.jac(x) eq = J[equal] le = J[less] ge = -J[greater] il = J[interval] ig = -il if sps.issparse(J): ineq = sps.vstack((le, ge, il, ig)) else: ineq = np.vstack((le, ge, il, ig)) return eq, ineq def hess(x, v_eq, v_ineq): n_start = 0 v_l = v_ineq[n_start:n_start + n_less] n_start += n_less v_g = v_ineq[n_start:n_start + n_greater] n_start += n_greater v_il = v_ineq[n_start:n_start + n_interval] n_start += n_interval v_ig = v_ineq[n_start:n_start + n_interval] v = np.zeros_like(lb) v[equal] = v_eq v[less] = v_l v[greater] = -v_g v[interval] = v_il - v_ig return cfun.hess(x, v) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian): """Convert initial values of the constraints to the canonical format. The purpose to avoid one additional call to the constraints at the initial point. It takes saved values in `PreparedConstraint`, modify and concatenate them to the the canonical constraint format. """ c_eq = [] c_ineq = [] J_eq = [] J_ineq = [] for c in prepared_constraints: f = c.fun.f J = c.fun.J lb, ub = c.bounds if np.all(lb == ub): c_eq.append(f - lb) J_eq.append(J) elif np.all(lb == -np.inf): finite_ub = ub < np.inf c_ineq.append(f[finite_ub] - ub[finite_ub]) J_ineq.append(J[finite_ub]) elif np.all(ub == np.inf): finite_lb = lb > -np.inf c_ineq.append(lb[finite_lb] - f[finite_lb]) J_ineq.append(-J[finite_lb]) else: lb_inf = lb == -np.inf ub_inf = ub == np.inf equal = lb == ub less = lb_inf & ~ub_inf greater = ub_inf & ~lb_inf interval = ~equal & ~lb_inf & ~ub_inf c_eq.append(f[equal] - lb[equal]) c_ineq.append(f[less] - ub[less]) c_ineq.append(lb[greater] - f[greater]) c_ineq.append(f[interval] - ub[interval]) c_ineq.append(lb[interval] - f[interval]) J_eq.append(J[equal]) J_ineq.append(J[less]) J_ineq.append(-J[greater]) J_ineq.append(J[interval]) J_ineq.append(-J[interval]) c_eq = np.hstack(c_eq) if c_eq else np.empty(0) c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0) if sparse_jacobian: vstack = sps.vstack empty = sps.csr_matrix((0, n)) else: vstack = np.vstack empty = np.empty((0, n)) J_eq = vstack(J_eq) if J_eq else empty J_ineq = vstack(J_ineq) if J_ineq else empty return c_eq, c_ineq, J_eq, J_ineq
12,519
30.938776
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/setup.py
from __future__ import division, print_function, absolute_import def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('_trustregion_constr', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
424
29.357143
75
py