Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py
|
from __future__ import division, print_function, absolute_import
import time
import numpy as np
from scipy.sparse.linalg import LinearOperator
from .._differentiable_functions import VectorFunction
from .._constraints import (
NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds)
from ..optimize import OptimizeResult
from .._differentiable_functions import ScalarFunction
from .equality_constrained_sqp import equality_constrained_sqp
from .canonical_constraint import (CanonicalConstraint,
initial_constraints_as_canonical)
from .tr_interior_point import tr_interior_point
from .report import BasicReport, SQPReport, IPReport
TERMINATION_MESSAGES = {
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`xtol` termination condition is satisfied.",
3: "`callback` function requested termination"
}
class HessianLinearOperator(object):
"""Build LinearOperator from hessp"""
def __init__(self, hessp, n):
self.hessp = hessp
self.n = n
def __call__(self, x, *args):
def matvec(p):
return self.hessp(x, p, *args)
return LinearOperator((self.n, self.n), matvec=matvec)
class LagrangianHessian(object):
"""The Hessian of the Lagrangian as LinearOperator.
The Lagrangian is computed as the objective function plus all the
constraints multiplied with some numbers (Lagrange multipliers).
"""
def __init__(self, n, objective_hess, constraints_hess):
self.n = n
self.objective_hess = objective_hess
self.constraints_hess = constraints_hess
def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
H_objective = self.objective_hess(x)
H_constraints = self.constraints_hess(x, v_eq, v_ineq)
def matvec(p):
return H_objective.dot(p) + H_constraints.dot(p)
return LinearOperator((self.n, self.n), matvec)
def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
start_time, tr_radius, constr_penalty, cg_info):
state.niter += 1
state.nfev = objective.nfev
state.njev = objective.ngev
state.nhev = objective.nhev
state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
if not last_iteration_failed:
state.x = x
state.fun = objective.f
state.grad = objective.g
state.v = [c.fun.v for c in prepared_constraints]
state.constr = [c.fun.f for c in prepared_constraints]
state.jac = [c.fun.J for c in prepared_constraints]
# Compute Lagrangian Gradient
state.lagrangian_grad = np.copy(state.grad)
for c in prepared_constraints:
state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
# Compute maximum constraint violation
state.constr_violation = 0
for i in range(len(prepared_constraints)):
lb, ub = prepared_constraints[i].bounds
c = state.constr[i]
state.constr_violation = np.max([state.constr_violation,
np.max(lb - c),
np.max(c - ub)])
state.execution_time = time.time() - start_time
state.tr_radius = tr_radius
state.constr_penalty = constr_penalty
state.cg_niter += cg_info["niter"]
state.cg_stop_cond = cg_info["stop_cond"]
return state
def update_state_ip(state, x, last_iteration_failed, objective,
prepared_constraints, start_time,
tr_radius, constr_penalty, cg_info,
barrier_parameter, barrier_tolerance):
state = update_state_sqp(state, x, last_iteration_failed, objective,
prepared_constraints, start_time, tr_radius,
constr_penalty, cg_info)
state.barrier_parameter = barrier_parameter
state.barrier_tolerance = barrier_tolerance
return state
def _minimize_trustregion_constr(fun, x0, args, grad,
hess, hessp, bounds, constraints,
xtol=1e-8, gtol=1e-8,
barrier_tol=1e-8,
sparse_jacobian=None,
callback=None, maxiter=1000,
verbose=0, finite_diff_rel_step=None,
initial_constr_penalty=1.0, initial_tr_radius=1.0,
initial_barrier_parameter=0.1,
initial_barrier_tolerance=0.1,
factorization_method=None,
disp=False):
"""Minimize a scalar function subject to constraints.
Parameters
----------
gtol : float, optional
Tolerance for termination by the norm of the Lagrangian gradient.
The algorithm will terminate when both the infinity norm (i.e. max
abs value) of the Lagrangian gradient and the constraint violation
are smaller than ``gtol``. Default is 1e-8.
xtol : float, optional
Tolerance for termination by the change of the independent variable.
The algorithm will terminate when ``tr_radius < xtol``, where
``tr_radius`` is the radius of the trust region used in the algorithm.
Default is 1e-8.
barrier_tol : float, optional
Threshold on the barrier parameter for the algorithm termination.
When inequality constraints are present the algorithm will terminate
only when the barrier parameter is less than `barrier_tol`.
Default is 1e-8.
sparse_jacobian : {bool, None}, optional
Determines how to represent Jacobians of the constraints. If bool,
then Jacobians of all the constraints will be converted to the
corresponding format. If None (default), then Jacobians won't be
converted, but the algorithm can proceed only if they all have the
same format.
initial_tr_radius: float, optional
Initial trust radius. The trust radius gives the maximum distance
between solution points in consecutive iterations. It reflects the
trust the algorithm puts in the local approximation of the optimization
problem. For an accurate local approximation the trust-region should be
large and for an approximation valid only close to the current point it
should be a small one. The trust radius is automatically updated throughout
the optimization process, with ``initial_tr_radius`` being its initial value.
Default is 1 (recommended in [1]_, p. 19).
initial_constr_penalty : float, optional
Initial constraints penalty parameter. The penalty parameter is used for
balancing the requirements of decreasing the objective function
and satisfying the constraints. It is used for defining the merit function:
``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
the constraints. The merit function is used for accepting or rejecting
trial points and ``constr_penalty`` weights the two conflicting goals
of reducing objective function and constraints. The penalty is automatically
updated throughout the optimization process, with
``initial_constr_penalty`` being its initial value. Default is 1
(recommended in [1]_, p 19).
initial_barrier_parameter, initial_barrier_tolerance: float, optional
Initial barrier parameter and initial tolerance for the barrier subproblem.
Both are used only when inequality constraints are present. For dealing with
optimization problems ``min_x f(x)`` subject to inequality constraints
``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
is solved for increasing values of ``barrier_parameter`` and with decreasing
tolerances for the termination, starting with ``initial_barrier_parameter``
for the barrier parameter and ``initial_barrier_tolerance`` for the
barrier subproblem barrier. Default is 0.1 for both values (recommended in [1]_ p. 19).
factorization_method : string or None, optional
Method to factorize the Jacobian of the constraints. Use None (default)
for the auto selection or one of:
- 'NormalEquation' (requires scikit-sparse)
- 'AugmentedSystem'
- 'QRFactorization'
- 'SVDFactorization'
The methods 'NormalEquation' and 'AugmentedSystem' can be used only
with sparse constraints. The projections required by the algorithm
will be computed using, respectively, the the normal equation and the
augmented system approaches explained in [1]_. 'NormalEquation'
computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
performs the LU factorization of an augmented system. They usually
provide similar results. 'AugmentedSystem' is used by default for
sparse matrices.
The methods 'QRFactorization' and 'SVDFactorization' can be used
only with dense constraints. They compute the required projections
using, respectively, QR and SVD factorizations. The 'SVDFactorization'
method can cope with Jacobian matrices with deficient row rank and will
be used whenever other factorization methods fail (which may imply the
conversion of sparse matrices to a dense format when required).
By default 'QRFactorization' is used for dense matrices.
finite_diff_rel_step : None or array_like, optional
Relative step size for the finite difference approximation.
maxiter : int, optional
Maximum number of algorithm iterations. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
* 3 : display progress during iterations (more complete report).
disp : bool, optional
If True (default) then `verbose` will be set to 1 if it was 0.
Returns
-------
`OptimizeResult` with the fields documented below. Note the following:
1. All values corresponding to the constraints are ordered as they
were passed to the solver. And values corresponding to `bounds`
constraints are put *after* other constraints.
2. All numbers of function, Jacobian or Hessian evaluations correspond
to numbers of actual Python function calls. It means, for example,
that if a Jacobian is estimated by finite differences then the
number of Jacobian evaluations will be zero and the number of
function evaluations will be incremented by all calls during the
finite difference estimation.
x : ndarray, shape (n,)
Solution found.
optimality : float
Infinity norm of the Lagrangian gradient at the solution.
constr_violation : float
Maximum constraint violation at the solution.
fun : float
Objective function at the solution.
grad : ndarray, shape (n,)
Gradient of the objective function at the solution.
lagrangian_grad : ndarray, shape (n,)
Gradient of the Lagrangian function at the solution.
niter : int
Total number of iterations.
nfev : integer
Number of the objective function evaluations.
ngev : integer
Number of the objective function gradient evaluations.
nhev : integer
Number of the objective function Hessian evaluations.
cg_niter : int
Total number of the conjugate gradient method iterations.
method : {'equality_constrained_sqp', 'tr_interior_point'}
Optimization method used.
constr : list of ndarray
List of constraint values at the solution.
jac : list of {ndarray, sparse matrix}
List of the Jacobian matrices of the constraints at the solution.
v : list of ndarray
List of the Lagrange multipliers for the constraints at the solution.
For an inequality constraint a positive multiplier means that the upper
bound is active, a negative multiplier means that the lower bound is
active and if a multiplier is zero it means the constraint is not
active.
constr_nfev : list of int
Number of constraint evaluations for each of the constraints.
constr_njev : list of int
Number of Jacobian matrix evaluations for each of the constraints.
constr_nhev : list of int
Number of Hessian evaluations for each of the constraints.
tr_radius : float
Radius of the trust region at the last iteration.
constr_penalty : float
Penalty parameter at the last iteration, see `initial_constr_penalty`.
barrier_tolerance : float
Tolerance for the barrier subproblem at the last iteration.
Only for problems with inequality constraints.
barrier_parameter : float
Barrier parameter at the last iteration. Only for problems
with inequality constraints.
execution_time : float
Total execution time.
message : str
Termination message.
status : {0, 1, 2, 3}
Termination status:
* 0 : The maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `xtol` termination condition is satisfied.
* 3 : `callback` function requested termination.
cg_stop_cond : int
Reason for CG subproblem termination at the last iteration:
* 0 : CG subproblem not evaluated.
* 1 : Iteration limit was reached.
* 2 : Reached the trust-region boundary.
* 3 : Negative curvature detected.
* 4 : Tolerance was satisfied.
"""
x0 = np.atleast_1d(x0).astype(float)
n_vars = np.size(x0)
if callable(hessp) and hess is None:
hess = HessianLinearOperator(hessp, n_vars)
if disp and verbose == 0:
verbose = 1
if bounds is not None:
finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
bounds.keep_feasible, n_vars)
else:
finite_diff_bounds = (-np.inf, np.inf)
# Define Objective Funciton
objective = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, finite_diff_bounds)
# Put constraints in list format when needed
if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
constraints = [constraints]
# Prepare constraints.
prepared_constraints = [
PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
for c in constraints]
# Check that all constraints are either sparse or dense.
n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
if 0 < n_sparse < len(prepared_constraints):
raise ValueError("All constraints must have the same kind of the "
"Jacobian --- either all sparse or all dense. "
"You can set the sparsity globally by setting "
"`sparse_jacobian` to either True of False.")
if prepared_constraints:
sparse_jacobian = n_sparse > 0
if bounds is not None:
prepared_constraints.append(PreparedConstraint(bounds, x0,
sparse_jacobian))
# Concatenate initial constraints to the canonical form.
c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
n_vars, prepared_constraints, sparse_jacobian)
# Prepare all canonical constraints and concatenate it into one.
canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
for c in prepared_constraints]
if len(canonical_all) == 0:
canonical = CanonicalConstraint.empty(n_vars)
elif len(canonical_all) == 1:
canonical = canonical_all[0]
else:
canonical = CanonicalConstraint.concatenate(canonical_all,
sparse_jacobian)
# Generate the Hessian of the Lagrangian.
lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
# Choose appropriate method
if canonical.n_ineq == 0:
method = 'equality_constrained_sqp'
else:
method = 'tr_interior_point'
# Construct OptimizeResult
state = OptimizeResult(
niter=0, nfev=0, njev=0, nhev=0,
cg_niter=0, cg_stop_cond=0,
fun=objective.f, grad=objective.g,
lagrangian_grad=np.copy(objective.g),
constr=[c.fun.f for c in prepared_constraints],
jac=[c.fun.J for c in prepared_constraints],
constr_nfev=[0 for c in prepared_constraints],
constr_njev=[0 for c in prepared_constraints],
constr_nhev=[0 for c in prepared_constraints],
v=[c.fun.v for c in prepared_constraints],
method=method)
# Start counting
start_time = time.time()
# Define stop criteria
if method == 'equality_constrained_sqp':
def stop_criteria(state, x, last_iteration_failed,
optimality, constr_violation,
tr_radius, constr_penalty, cg_info):
state = update_state_sqp(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info)
if verbose == 2:
BasicReport.print_iteration(state.niter,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
SQPReport.print_iteration(state.niter,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.cg_stop_cond)
state.status = None
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif state.tr_radius < xtol:
state.status = 2
elif state.niter > maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
elif method == 'tr_interior_point':
def stop_criteria(state, x, last_iteration_failed, tr_radius,
constr_penalty, cg_info, barrier_parameter,
barrier_tolerance):
state = update_state_ip(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info, barrier_parameter, barrier_tolerance)
if verbose == 2:
BasicReport.print_iteration(state.niter,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
IPReport.print_iteration(state.niter,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.barrier_parameter,
state.cg_stop_cond)
state.status = None
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif (state.tr_radius < xtol
and state.barrier_parameter < barrier_tol):
state.status = 2
elif state.niter > maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
if verbose == 2:
BasicReport.print_header()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_header()
elif method == 'tr_interior_point':
IPReport.print_header()
# Call inferior function to do the optimization
if method == 'equality_constrained_sqp':
def fun_and_constr(x):
f = objective.fun(x)
c_eq, _ = canonical.fun(x)
return f, c_eq
def grad_and_jac(x):
g = objective.grad(x)
J_eq, _ = canonical.jac(x)
return g, J_eq
_, result = equality_constrained_sqp(
fun_and_constr, grad_and_jac, lagrangian_hess,
x0, objective.f, objective.g,
c_eq0, J_eq0,
stop_criteria, state,
initial_constr_penalty, initial_tr_radius,
factorization_method)
elif method == 'tr_interior_point':
_, result = tr_interior_point(
objective.fun, objective.grad, lagrangian_hess,
n_vars, canonical.n_ineq, canonical.n_eq,
canonical.fun, canonical.jac,
x0, objective.f, objective.g,
c_ineq0, J_ineq0, c_eq0, J_eq0,
stop_criteria,
canonical.keep_feasible,
xtol, state, initial_barrier_parameter,
initial_barrier_tolerance,
initial_constr_penalty, initial_tr_radius,
factorization_method)
result.message = TERMINATION_MESSAGES[result.status]
if verbose == 2:
BasicReport.print_footer()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_footer()
elif method == 'tr_interior_point':
IPReport.print_footer()
if verbose >= 1:
print(result.message)
print("Number of iterations: {}, function evaluations: {}, "
"CG iterations: {}, optimality: {:.2e}, "
"constraint violation: {:.2e}, execution time: {:4.2} s."
.format(result.niter, result.nfev, result.cg_niter,
result.optimality, result.constr_violation,
result.execution_time))
return result
| 24,152 | 44.918251 | 96 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/report.py
|
"""Progress report printers."""
class ReportBase(object):
COLUMN_NAMES = NotImplemented
COLUMN_WIDTHS = NotImplemented
ITERATION_FORMATS = NotImplemented
@classmethod
def print_header(cls):
fmt = ("|"
+ "|".join(["{{:^{}}}".format(x) for x in cls.COLUMN_WIDTHS])
+ "|")
separators = ['-' * x for x in cls.COLUMN_WIDTHS]
print(fmt.format(*cls.COLUMN_NAMES))
print(fmt.format(*separators))
@classmethod
def print_iteration(cls, *args):
iteration_format = ["{{:{}}}".format(x) for x in cls.ITERATION_FORMATS]
fmt = "|" + "|".join(iteration_format) + "|"
print(fmt.format(*args))
@classmethod
def print_footer(cls):
print()
class BasicReport(ReportBase):
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
"opt", "c viol"]
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10]
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e",
"^10.2e", "^10.2e", "^10.2e"]
class SQPReport(ReportBase):
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
"opt", "c viol", "penalty", "CG stop"]
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7]
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
"^10.2e", "^10.2e", "^7"]
class IPReport(ReportBase):
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
"opt", "c viol", "penalty", "barrier param", "CG stop"]
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7]
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
"^10.2e", "^10.2e", "^13.2e", "^7"]
| 1,774 | 33.803922 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/__init__.py
|
"""This module contains the equality constrained SQP solver."""
from .minimize_trustregion_constr import _minimize_trustregion_constr
__all__ = ['_minimize_trustregion_constr']
| 180 | 24.857143 | 69 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/projections.py
|
"""Basic linear factorizations needed by the solver."""
from __future__ import division, print_function, absolute_import
from scipy.sparse import (bmat, csc_matrix, eye, issparse)
from scipy.sparse.linalg import LinearOperator
import scipy.linalg
import scipy.sparse.linalg
try:
from sksparse.cholmod import cholesky_AAt
sksparse_available = True
except ImportError:
import warnings
sksparse_available = False
import numpy as np
from warnings import warn
__all__ = [
'orthogonality',
'projections',
]
def orthogonality(A, g):
"""Measure orthogonality between a vector and the null space of a matrix.
Compute a measure of orthogonality between the null space
of the (possibly sparse) matrix ``A`` and a given vector ``g``.
The formula is a simplified (and cheaper) version of formula (3.13)
from [1]_.
``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.
References
----------
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
"On the solution of equality constrained quadratic
programming problems arising in optimization."
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
"""
# Compute vector norms
norm_g = np.linalg.norm(g)
# Compute Frobenius norm of the matrix A
if issparse(A):
norm_A = scipy.sparse.linalg.norm(A, ord='fro')
else:
norm_A = np.linalg.norm(A, ord='fro')
# Check if norms are zero
if norm_g == 0 or norm_A == 0:
return 0
norm_A_g = np.linalg.norm(A.dot(g))
# Orthogonality measure
orth = norm_A_g / (norm_A*norm_g)
return orth
def normal_equation_projections(A, m, n, orth_tol, max_refin, tol):
"""Return linear operators for matrix A using ``NormalEquation`` approach.
"""
# Cholesky factorization
factor = cholesky_AAt(A)
# z = x - A.T inv(A A.T) A x
def null_space(x):
v = factor(A.dot(x))
z = x - A.T.dot(v)
# Iterative refinement to improve roundoff
# errors described in [2]_, algorithm 5.1.
k = 0
while orthogonality(A, z) > orth_tol:
if k >= max_refin:
break
# z_next = z - A.T inv(A A.T) A z
v = factor(A.dot(z))
z = z - A.T.dot(v)
k += 1
return z
# z = inv(A A.T) A x
def least_squares(x):
return factor(A.dot(x))
# z = A.T inv(A A.T) x
def row_space(x):
return A.T.dot(factor(x))
return null_space, least_squares, row_space
def augmented_system_projections(A, m, n, orth_tol, max_refin, tol):
"""Return linear operators for matrix A - ``AugmentedSystem``."""
# Form augmented system
K = csc_matrix(bmat([[eye(n), A.T], [A, None]]))
# LU factorization
# TODO: Use a symmetric indefinite factorization
# to solve the system twice as fast (because
# of the symmetry).
try:
solve = scipy.sparse.linalg.factorized(K)
except RuntimeError:
warn("Singular Jacobian matrix. Using dense SVD decomposition to "
"perform the factorizations.")
return svd_factorization_projections(A.toarray(),
m, n, orth_tol,
max_refin, tol)
# z = x - A.T inv(A A.T) A x
# is computed solving the extended system:
# [I A.T] * [ z ] = [x]
# [A O ] [aux] [0]
def null_space(x):
# v = [x]
# [0]
v = np.hstack([x, np.zeros(m)])
# lu_sol = [ z ]
# [aux]
lu_sol = solve(v)
z = lu_sol[:n]
# Iterative refinement to improve roundoff
# errors described in [2]_, algorithm 5.2.
k = 0
while orthogonality(A, z) > orth_tol:
if k >= max_refin:
break
# new_v = [x] - [I A.T] * [ z ]
# [0] [A O ] [aux]
new_v = v - K.dot(lu_sol)
# [I A.T] * [delta z ] = new_v
# [A O ] [delta aux]
lu_update = solve(new_v)
# [ z ] += [delta z ]
# [aux] [delta aux]
lu_sol += lu_update
z = lu_sol[:n]
k += 1
# return z = x - A.T inv(A A.T) A x
return z
# z = inv(A A.T) A x
# is computed solving the extended system:
# [I A.T] * [aux] = [x]
# [A O ] [ z ] [0]
def least_squares(x):
# v = [x]
# [0]
v = np.hstack([x, np.zeros(m)])
# lu_sol = [aux]
# [ z ]
lu_sol = solve(v)
# return z = inv(A A.T) A x
return lu_sol[n:m+n]
# z = A.T inv(A A.T) x
# is computed solving the extended system:
# [I A.T] * [ z ] = [0]
# [A O ] [aux] [x]
def row_space(x):
# v = [0]
# [x]
v = np.hstack([np.zeros(n), x])
# lu_sol = [ z ]
# [aux]
lu_sol = solve(v)
# return z = A.T inv(A A.T) x
return lu_sol[:n]
return null_space, least_squares, row_space
def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):
"""Return linear operators for matrix A using ``QRFactorization`` approach.
"""
# QRFactorization
Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')
if np.linalg.norm(R[-1, :], np.inf) < tol:
warn('Singular Jacobian matrix. Using SVD decomposition to ' +
'perform the factorizations.')
return svd_factorization_projections(A, m, n,
orth_tol,
max_refin,
tol)
# z = x - A.T inv(A A.T) A x
def null_space(x):
# v = P inv(R) Q.T x
aux1 = Q.T.dot(x)
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
v = np.zeros(m)
v[P] = aux2
z = x - A.T.dot(v)
# Iterative refinement to improve roundoff
# errors described in [2]_, algorithm 5.1.
k = 0
while orthogonality(A, z) > orth_tol:
if k >= max_refin:
break
# v = P inv(R) Q.T x
aux1 = Q.T.dot(z)
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
v[P] = aux2
# z_next = z - A.T v
z = z - A.T.dot(v)
k += 1
return z
# z = inv(A A.T) A x
def least_squares(x):
# z = P inv(R) Q.T x
aux1 = Q.T.dot(x)
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
z = np.zeros(m)
z[P] = aux2
return z
# z = A.T inv(A A.T) x
def row_space(x):
# z = Q inv(R.T) P.T x
aux1 = x[P]
aux2 = scipy.linalg.solve_triangular(R, aux1,
lower=False,
trans='T')
z = Q.dot(aux2)
return z
return null_space, least_squares, row_space
def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):
"""Return linear operators for matrix A using ``SVDFactorization`` approach.
"""
# SVD Factorization
U, s, Vt = scipy.linalg.svd(A, full_matrices=False)
# Remove dimensions related with very small singular values
U = U[:, s > tol]
Vt = Vt[s > tol, :]
s = s[s > tol]
# z = x - A.T inv(A A.T) A x
def null_space(x):
# v = U 1/s V.T x = inv(A A.T) A x
aux1 = Vt.dot(x)
aux2 = 1/s*aux1
v = U.dot(aux2)
z = x - A.T.dot(v)
# Iterative refinement to improve roundoff
# errors described in [2]_, algorithm 5.1.
k = 0
while orthogonality(A, z) > orth_tol:
if k >= max_refin:
break
# v = U 1/s V.T x = inv(A A.T) A x
aux1 = Vt.dot(z)
aux2 = 1/s*aux1
v = U.dot(aux2)
# z_next = z - A.T v
z = z - A.T.dot(v)
k += 1
return z
# z = inv(A A.T) A x
def least_squares(x):
# z = U 1/s V.T x = inv(A A.T) A x
aux1 = Vt.dot(x)
aux2 = 1/s*aux1
z = U.dot(aux2)
return z
# z = A.T inv(A A.T) x
def row_space(x):
# z = V 1/s U.T x
aux1 = U.T.dot(x)
aux2 = 1/s*aux1
z = Vt.T.dot(aux2)
return z
return null_space, least_squares, row_space
def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15):
"""Return three linear operators related with a given matrix A.
Parameters
----------
A : sparse matrix (or ndarray), shape (m, n)
Matrix ``A`` used in the projection.
method : string, optional
Method used for compute the given linear
operators. Should be one of:
- 'NormalEquation': The operators
will be computed using the
so-called normal equation approach
explained in [1]_. In order to do
so the Cholesky factorization of
``(A A.T)`` is computed. Exclusive
for sparse matrices.
- 'AugmentedSystem': The operators
will be computed using the
so-called augmented system approach
explained in [1]_. Exclusive
for sparse matrices.
- 'QRFactorization': Compute projections
using QR factorization. Exclusive for
dense matrices.
- 'SVDFactorization': Compute projections
using SVD factorization. Exclusive for
dense matrices.
orth_tol : float, optional
Tolerance for iterative refinements.
max_refin : int, optional
Maximum number of iterative refinements
tol : float, optional
Tolerance for singular values
Returns
-------
Z : LinearOperator, shape (n, n)
Null-space operator. For a given vector ``x``,
the null space operator is equivalent to apply
a projection matrix ``P = I - A.T inv(A A.T) A``
to the vector. It can be shown that this is
equivalent to project ``x`` into the null space
of A.
LS : LinearOperator, shape (m, n)
Least-Square operator. For a given vector ``x``,
the least-square operator is equivalent to apply a
pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
to the vector. It can be shown that this vector
``pinv(A.T) x`` is the least_square solution to
``A.T y = x``.
Y : LinearOperator, shape (n, m)
Row-space operator. For a given vector ``x``,
the row-space operator is equivalent to apply a
projection matrix ``Q = A.T inv(A A.T)``
to the vector. It can be shown that this
vector ``y = Q x`` the minimum norm solution
of ``A y = x``.
Notes
-----
Uses iterative refinements described in [1]
during the computation of ``Z`` in order to
cope with the possibility of large roundoff errors.
References
----------
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
"On the solution of equality constrained quadratic
programming problems arising in optimization."
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
"""
m, n = np.shape(A)
# The factorization of an empty matrix
# only works for the sparse representation.
if m*n == 0:
A = csc_matrix(A)
# Check Argument
if issparse(A):
if method is None:
method = "AugmentedSystem"
if method not in ("NormalEquation", "AugmentedSystem"):
raise ValueError("Method not allowed for sparse matrix.")
if method == "NormalEquation" and not sksparse_available:
warnings.warn(("Only accepts 'NormalEquation' option when"
" scikit-sparse is available. Using "
"'AugmentedSystem' option instead."),
ImportWarning)
method = 'AugmentedSystem'
else:
if method is None:
method = "QRFactorization"
if method not in ("QRFactorization", "SVDFactorization"):
raise ValueError("Method not allowed for dense array.")
if method == 'NormalEquation':
null_space, least_squares, row_space \
= normal_equation_projections(A, m, n, orth_tol, max_refin, tol)
elif method == 'AugmentedSystem':
null_space, least_squares, row_space \
= augmented_system_projections(A, m, n, orth_tol, max_refin, tol)
elif method == "QRFactorization":
null_space, least_squares, row_space \
= qr_factorization_projections(A, m, n, orth_tol, max_refin, tol)
elif method == "SVDFactorization":
null_space, least_squares, row_space \
= svd_factorization_projections(A, m, n, orth_tol, max_refin, tol)
Z = LinearOperator((n, n), null_space)
LS = LinearOperator((m, n), least_squares)
Y = LinearOperator((n, m), row_space)
return Z, LS, Y
| 13,166 | 31.351351 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py
|
"""Equality-constrained quadratic programming solvers."""
from __future__ import division, print_function, absolute_import
from scipy.sparse import (linalg, bmat, csc_matrix)
from math import copysign
import numpy as np
from numpy.linalg import norm
__all__ = [
'eqp_kktfact',
'sphere_intersections',
'box_intersections',
'box_sphere_intersections',
'inside_box_boundaries',
'modified_dogleg',
'projected_cg'
]
# For comparison with the projected CG
def eqp_kktfact(H, c, A, b):
"""Solve equality-constrained quadratic programming (EQP) problem.
Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0``
using direct factorization of the KKT system.
Parameters
----------
H : sparse matrix, shape (n, n)
Hessian matrix of the EQP problem.
c : array_like, shape (n,)
Gradient of the quadratic objective function.
A : sparse matrix
Jacobian matrix of the EQP problem.
b : array_like, shape (m,)
Right-hand side of the constraint equation.
Returns
-------
x : array_like, shape (n,)
Solution of the KKT problem.
lagrange_multipliers : ndarray, shape (m,)
Lagrange multipliers of the KKT problem.
"""
n, = np.shape(c) # Number of parameters
m, = np.shape(b) # Number of constraints
# Karush-Kuhn-Tucker matrix of coefficients.
# Defined as in Nocedal/Wright "Numerical
# Optimization" p.452 in Eq. (16.4).
kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]]))
# Vector of coefficients.
kkt_vec = np.hstack([-c, -b])
# TODO: Use a symmetric indefinite factorization
# to solve the system twice as fast (because
# of the symmetry).
lu = linalg.splu(kkt_matrix)
kkt_sol = lu.solve(kkt_vec)
x = kkt_sol[:n]
lagrange_multipliers = -kkt_sol[n:n+m]
return x, lagrange_multipliers
def sphere_intersections(z, d, trust_radius,
entire_line=False):
"""Find the intersection between segment (or line) and spherical constraints.
Find the intersection between the segment (or line) defined by the
parametric equation ``x(t) = z + t*d`` and the ball
``||x|| <= trust_radius``.
Parameters
----------
z : array_like, shape (n,)
Initial point.
d : array_like, shape (n,)
Direction.
trust_radius : float
Ball radius.
entire_line : bool, optional
When ``True`` the function returns the intersection between the line
``x(t) = z + t*d`` (``t`` can assume any value) and the ball
``||x|| <= trust_radius``. When ``False`` returns the intersection
between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball.
Returns
-------
ta, tb : float
The line/segment ``x(t) = z + t*d`` is inside the ball for
for ``ta <= t <= tb``.
intersect : bool
When ``True`` there is a intersection between the line/segment
and the sphere. On the other hand, when ``False``, there is no
intersection.
"""
# Special case when d=0
if norm(d) == 0:
return 0, 0, False
# Check for inf trust_radius
if np.isinf(trust_radius):
if entire_line:
ta = -np.inf
tb = np.inf
else:
ta = 0
tb = 1
intersect = True
return ta, tb, intersect
a = np.dot(d, d)
b = 2 * np.dot(z, d)
c = np.dot(z, z) - trust_radius**2
discriminant = b*b - 4*a*c
if discriminant < 0:
intersect = False
return 0, 0, intersect
sqrt_discriminant = np.sqrt(discriminant)
# The following calculation is mathematically
# equivalent to:
# ta = (-b - sqrt_discriminant) / (2*a)
# tb = (-b + sqrt_discriminant) / (2*a)
# but produce smaller round off errors.
# Look at Matrix Computation p.97
# for a better justification.
aux = b + copysign(sqrt_discriminant, b)
ta = -aux / (2*a)
tb = -2*c / aux
ta, tb = sorted([ta, tb])
if entire_line:
intersect = True
else:
# Checks to see if intersection happens
# within vectors length.
if tb < 0 or ta > 1:
intersect = False
ta = 0
tb = 0
else:
intersect = True
# Restrict intersection interval
# between 0 and 1.
ta = max(0, ta)
tb = min(1, tb)
return ta, tb, intersect
def box_intersections(z, d, lb, ub,
entire_line=False):
"""Find the intersection between segment (or line) and box constraints.
Find the intersection between the segment (or line) defined by the
parametric equation ``x(t) = z + t*d`` and the rectangular box
``lb <= x <= ub``.
Parameters
----------
z : array_like, shape (n,)
Initial point.
d : array_like, shape (n,)
Direction.
lb : array_like, shape (n,)
Lower bounds to each one of the components of ``x``. Used
to delimit the rectangular box.
ub : array_like, shape (n, )
Upper bounds to each one of the components of ``x``. Used
to delimit the rectangular box.
entire_line : bool, optional
When ``True`` the function returns the intersection between the line
``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular
box. When ``False`` returns the intersection between the segment
``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box.
Returns
-------
ta, tb : float
The line/segment ``x(t) = z + t*d`` is inside the box for
for ``ta <= t <= tb``.
intersect : bool
When ``True`` there is a intersection between the line (or segment)
and the rectangular box. On the other hand, when ``False``, there is no
intersection.
"""
# Make sure it is a numpy array
z = np.asarray(z)
d = np.asarray(d)
lb = np.asarray(lb)
ub = np.asarray(ub)
# Special case when d=0
if norm(d) == 0:
return 0, 0, False
# Get values for which d==0
zero_d = (d == 0)
# If the boundaries are not satisfied for some coordinate
# for which "d" is zero, there is no box-line intersection.
if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any():
intersect = False
return 0, 0, intersect
# Remove values for which d is zero
not_zero_d = np.logical_not(zero_d)
z = z[not_zero_d]
d = d[not_zero_d]
lb = lb[not_zero_d]
ub = ub[not_zero_d]
# Find a series of intervals (t_lb[i], t_ub[i]).
t_lb = (lb-z) / d
t_ub = (ub-z) / d
# Get the intersection of all those intervals.
ta = max(np.minimum(t_lb, t_ub))
tb = min(np.maximum(t_lb, t_ub))
# Check if intersection is feasible
if ta <= tb:
intersect = True
else:
intersect = False
# Checks to see if intersection happens within vectors length.
if not entire_line:
if tb < 0 or ta > 1:
intersect = False
ta = 0
tb = 0
else:
# Restrict intersection interval between 0 and 1.
ta = max(0, ta)
tb = min(1, tb)
return ta, tb, intersect
def box_sphere_intersections(z, d, lb, ub, trust_radius,
entire_line=False,
extra_info=False):
"""Find the intersection between segment (or line) and box/sphere constraints.
Find the intersection between the segment (or line) defined by the
parametric equation ``x(t) = z + t*d``, the rectangular box
``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``.
Parameters
----------
z : array_like, shape (n,)
Initial point.
d : array_like, shape (n,)
Direction.
lb : array_like, shape (n,)
Lower bounds to each one of the components of ``x``. Used
to delimit the rectangular box.
ub : array_like, shape (n, )
Upper bounds to each one of the components of ``x``. Used
to delimit the rectangular box.
trust_radius : float
Ball radius.
entire_line : bool, optional
When ``True`` the function returns the intersection between the line
``x(t) = z + t*d`` (``t`` can assume any value) and the constraints.
When ``False`` returns the intersection between the segment
``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints.
extra_info : bool, optional
When ``True`` returns ``intersect_sphere`` and ``intersect_box``.
Returns
-------
ta, tb : float
The line/segment ``x(t) = z + t*d`` is inside the rectangular box and
inside the ball for for ``ta <= t <= tb``.
intersect : bool
When ``True`` there is a intersection between the line (or segment)
and both constraints. On the other hand, when ``False``, there is no
intersection.
sphere_info : dict, optional
Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
for which the line intercept the ball. And a boolean value indicating
whether the sphere is intersected by the line.
box_info : dict, optional
Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
for which the line intercept the box. And a boolean value indicating
whether the box is intersected by the line.
"""
ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub,
entire_line)
ta_s, tb_s, intersect_s = sphere_intersections(z, d,
trust_radius,
entire_line)
ta = np.maximum(ta_b, ta_s)
tb = np.minimum(tb_b, tb_s)
if intersect_b and intersect_s and ta <= tb:
intersect = True
else:
intersect = False
if extra_info:
sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s}
box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b}
return ta, tb, intersect, sphere_info, box_info
else:
return ta, tb, intersect
def inside_box_boundaries(x, lb, ub):
"""Check if lb <= x <= ub."""
return (lb <= x).all() and (x <= ub).all()
def reinforce_box_boundaries(x, lb, ub):
"""Return clipped value of x"""
return np.minimum(np.maximum(x, lb), ub)
def modified_dogleg(A, Y, b, trust_radius, lb, ub):
"""Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region.
Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2``
subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification
of the classical dogleg approach.
Parameters
----------
A : LinearOperator (or sparse matrix or ndarray), shape (m, n)
Matrix ``A`` in the minimization problem. It should have
dimension ``(m, n)`` such that ``m < n``.
Y : LinearOperator (or sparse matrix or ndarray), shape (n, m)
LinearOperator that apply the projection matrix
``Q = A.T inv(A A.T)`` to the vector. The obtained vector
``y = Q x`` being the minimum norm solution of ``A y = x``.
b : array_like, shape (m,)
Vector ``b``in the minimization problem.
trust_radius: float
Trust radius to be considered. Delimits a sphere boundary
to the problem.
lb : array_like, shape (n,)
Lower bounds to each one of the components of ``x``.
It is expected that ``lb <= 0``, otherwise the algorithm
may fail. If ``lb[i] = -Inf`` the lower
bound for the i-th component is just ignored.
ub : array_like, shape (n, )
Upper bounds to each one of the components of ``x``.
It is expected that ``ub >= 0``, otherwise the algorithm
may fail. If ``ub[i] = Inf`` the upper bound for the i-th
component is just ignored.
Returns
-------
x : array_like, shape (n,)
Solution to the problem.
Notes
-----
Based on implementations described in p.p. 885-886 from [1]_.
References
----------
.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
"An interior point algorithm for large-scale nonlinear
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
"""
# Compute minimum norm minimizer of 1/2*|| A x + b ||^2.
newton_point = -Y.dot(b)
# Check for interior point
if inside_box_boundaries(newton_point, lb, ub) \
and norm(newton_point) <= trust_radius:
x = newton_point
return x
# Compute gradient vector ``g = A.T b``
g = A.T.dot(b)
# Compute cauchy point
# `cauchy_point = g.T g / (g.T A.T A g)``.
A_g = A.dot(g)
cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g
# Origin
origin_point = np.zeros_like(cauchy_point)
# Check the segment between cauchy_point and newton_point
# for a possible solution.
z = cauchy_point
p = newton_point - cauchy_point
_, alpha, intersect = box_sphere_intersections(z, p, lb, ub,
trust_radius)
if intersect:
x1 = z + alpha*p
else:
# Check the segment between the origin and cauchy_point
# for a possible solution.
z = origin_point
p = cauchy_point
_, alpha, _ = box_sphere_intersections(z, p, lb, ub,
trust_radius)
x1 = z + alpha*p
# Check the segment between origin and newton_point
# for a possible solution.
z = origin_point
p = newton_point
_, alpha, _ = box_sphere_intersections(z, p, lb, ub,
trust_radius)
x2 = z + alpha*p
# Return the best solution among x1 and x2.
if norm(A.dot(x1) + b) < norm(A.dot(x2) + b):
return x1
else:
return x2
def projected_cg(H, c, Z, Y, b, trust_radius=np.inf,
lb=None, ub=None, tol=None,
max_iter=None, max_infeasible_iter=None,
return_all=False):
"""Solve EQP problem with projected CG method.
Solve equality-constrained quadratic programming problem
``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and,
possibly, to trust region constraints ``||x|| < trust_radius``
and box constraints ``lb <= x <= ub``.
Parameters
----------
H : LinearOperator (or sparse matrix or ndarray), shape (n, n)
Operator for computing ``H v``.
c : array_like, shape (n,)
Gradient of the quadratic objective function.
Z : LinearOperator (or sparse matrix or ndarray), shape (n, n)
Operator for projecting ``x`` into the null space of A.
Y : LinearOperator, sparse matrix, ndarray, shape (n, m)
Operator that, for a given a vector ``b``, compute smallest
norm solution of ``A x + b = 0``.
b : array_like, shape (m,)
Right-hand side of the constraint equation.
trust_radius : float, optional
Trust radius to be considered. By default uses ``trust_radius=inf``,
which means no trust radius at all.
lb : array_like, shape (n,), optional
Lower bounds to each one of the components of ``x``.
If ``lb[i] = -Inf`` the lower bound for the i-th
component is just ignored (default).
ub : array_like, shape (n, ), optional
Upper bounds to each one of the components of ``x``.
If ``ub[i] = Inf`` the upper bound for the i-th
component is just ignored (default).
tol : float, optional
Tolerance used to interrupt the algorithm.
max_iter : int, optional
Maximum algorithm iterations. Where ``max_inter <= n-m``.
By default uses ``max_iter = n-m``.
max_infeasible_iter : int, optional
Maximum infeasible (regarding box constraints) iterations the
algorithm is allowed to take.
By default uses ``max_infeasible_iter = n-m``.
return_all : bool, optional
When ``true`` return the list of all vectors through the iterations.
Returns
-------
x : array_like, shape (n,)
Solution of the EQP problem.
info : Dict
Dictionary containing the following:
- niter : Number of iterations.
- stop_cond : Reason for algorithm termination:
1. Iteration limit was reached;
2. Reached the trust-region boundary;
3. Negative curvature detected;
4. Tolerance was satisfied.
- allvecs : List containing all intermediary vectors (optional).
- hits_boundary : True if the proposed step is on the boundary
of the trust region.
Notes
-----
Implementation of Algorithm 6.2 on [1]_.
In the absence of spherical and box constraints, for sufficient
iterations, the method returns a truly optimal result.
In the presence of those constraints the value returned is only
a inexpensive approximation of the optimal value.
References
----------
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
"On the solution of equality constrained quadratic
programming problems arising in optimization."
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
"""
CLOSE_TO_ZERO = 1e-25
n, = np.shape(c) # Number of parameters
m, = np.shape(b) # Number of constraints
# Initial Values
x = Y.dot(-b)
r = Z.dot(H.dot(x) + c)
g = Z.dot(r)
p = -g
# Store ``x`` value
if return_all:
allvecs = [x]
# Values for the first iteration
H_p = H.dot(p)
rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
# If x > trust-region the problem does not have a solution.
tr_distance = trust_radius - norm(x)
if tr_distance < 0:
raise ValueError("Trust region problem does not have a solution.")
# If x == trust_radius, then x is the solution
# to the optimization problem, since x is the
# minimum norm solution to Ax=b.
elif tr_distance < CLOSE_TO_ZERO:
info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True}
if return_all:
allvecs.append(x)
info['allvecs'] = allvecs
return x, info
# Set default tolerance
if tol is None:
tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO)
# Set default lower and upper bounds
if lb is None:
lb = np.full(n, -np.inf)
if ub is None:
ub = np.full(n, np.inf)
# Set maximum iterations
if max_iter is None:
max_iter = n-m
max_iter = min(max_iter, n-m)
# Set maximum infeasible iterations
if max_infeasible_iter is None:
max_infeasible_iter = n-m
hits_boundary = False
stop_cond = 1
counter = 0
last_feasible_x = np.zeros_like(x)
k = 0
for i in range(max_iter):
# Stop criteria - Tolerance : r.T g < tol
if rt_g < tol:
stop_cond = 4
break
k += 1
# Compute curvature
pt_H_p = H_p.dot(p)
# Stop criteria - Negative curvature
if pt_H_p <= 0:
if np.isinf(trust_radius):
raise ValueError("Negative curvature not "
"allowed for unrestrited "
"problems.")
else:
# Find intersection with constraints
_, alpha, intersect = box_sphere_intersections(
x, p, lb, ub, trust_radius, entire_line=True)
# Update solution
if intersect:
x = x + alpha*p
# Reinforce variables are inside box constraints.
# This is only necessary because of roundoff errors.
x = reinforce_box_boundaries(x, lb, ub)
# Atribute information
stop_cond = 3
hits_boundary = True
break
# Get next step
alpha = rt_g / pt_H_p
x_next = x + alpha*p
# Stop criteria - Hits boundary
if np.linalg.norm(x_next) >= trust_radius:
# Find intersection with box constraints
_, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
trust_radius)
# Update solution
if intersect:
x = x + theta*alpha*p
# Reinforce variables are inside box constraints.
# This is only necessary because of roundoff errors.
x = reinforce_box_boundaries(x, lb, ub)
# Atribute information
stop_cond = 2
hits_boundary = True
break
# Check if ``x`` is inside the box and start counter if it is not.
if inside_box_boundaries(x_next, lb, ub):
counter = 0
else:
counter += 1
# Whenever outside box constraints keep looking for intersections.
if counter > 0:
_, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
trust_radius)
if intersect:
last_feasible_x = x + theta*alpha*p
# Reinforce variables are inside box constraints.
# This is only necessary because of roundoff errors.
last_feasible_x = reinforce_box_boundaries(last_feasible_x,
lb, ub)
counter = 0
# Stop after too many infeasible (regarding box constraints) iteration.
if counter > max_infeasible_iter:
break
# Store ``x_next`` value
if return_all:
allvecs.append(x_next)
# Update residual
r_next = r + alpha*H_p
# Project residual g+ = Z r+
g_next = Z.dot(r_next)
# Compute conjugate direction step d
rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389)
beta = rt_g_next / rt_g
p = - g_next + beta*p
# Prepare for next iteration
x = x_next
g = g_next
r = g_next
rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
H_p = H.dot(p)
if not inside_box_boundaries(x, lb, ub):
x = last_feasible_x
hits_boundary = True
info = {'niter': k, 'stop_cond': stop_cond,
'hits_boundary': hits_boundary}
if return_all:
info['allvecs'] = allvecs
return x, info
| 22,641 | 34.378125 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py
|
"""Byrd-Omojokun Trust-Region SQP method."""
from __future__ import division, print_function, absolute_import
from scipy.sparse import eye as speye
from .projections import projections
from .qp_subproblem import modified_dogleg, projected_cg, box_intersections
import numpy as np
from numpy.linalg import norm
__all__ = ['equality_constrained_sqp']
def default_scaling(x):
n, = np.shape(x)
return speye(n)
def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess,
x0, fun0, grad0, constr0,
jac0, stop_criteria,
state,
initial_penalty,
initial_trust_radius,
factorization_method,
trust_lb=None,
trust_ub=None,
scaling=default_scaling):
"""Solve nonlinear equality-constrained problem using trust-region SQP.
Solve optimization problem:
minimize fun(x)
subject to: constr(x) = 0
using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several
implementation details are based on [2]_ and [3]_, p. 549.
References
----------
.. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the
implementation of an algorithm for large-scale equality
constrained optimization." SIAM Journal on
Optimization 8.3 (1998): 682-706.
.. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
"An interior point algorithm for large-scale nonlinear
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891.
LARGE_REDUCTION_RATIO = 0.9
INTERMEDIARY_REDUCTION_RATIO = 0.3
SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892.
TRUST_ENLARGEMENT_FACTOR_L = 7.0
TRUST_ENLARGEMENT_FACTOR_S = 2.0
MAX_TRUST_REDUCTION = 0.5
MIN_TRUST_REDUCTION = 0.1
SOC_THRESHOLD = 0.1
TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885.
BOX_FACTOR = 0.5
n, = np.shape(x0) # Number of parameters
# Set default lower and upper bounds.
if trust_lb is None:
trust_lb = np.full(n, -np.inf)
if trust_ub is None:
trust_ub = np.full(n, np.inf)
# Initial values
x = np.copy(x0)
trust_radius = initial_trust_radius
penalty = initial_penalty
# Compute Values
f = fun0
c = grad0
b = constr0
A = jac0
S = scaling(x)
# Get projections
Z, LS, Y = projections(A, factorization_method)
# Compute least-square lagrange multipliers
v = -LS.dot(c)
# Compute Hessian
H = lagr_hess(x, v)
# Update state parameters
optimality = norm(c + A.T.dot(v), np.inf)
constr_violation = norm(b, np.inf) if len(b) > 0 else 0
cg_info = {'niter': 0, 'stop_cond': 0,
'hits_boundary': False}
last_iteration_failed = False
while not stop_criteria(state, x, last_iteration_failed,
optimality, constr_violation,
trust_radius, penalty, cg_info):
# Normal Step - `dn`
# minimize 1/2*||A dn + b||^2
# subject to:
# ||dn|| <= TR_FACTOR * trust_radius
# BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub.
dn = modified_dogleg(A, Y, b,
TR_FACTOR*trust_radius,
BOX_FACTOR*trust_lb,
BOX_FACTOR*trust_ub)
# Tangential Step - `dt`
# Solve the QP problem:
# minimize 1/2 dt.T H dt + dt.T (H dn + c)
# subject to:
# A dt = 0
# ||dt|| <= sqrt(trust_radius**2 - ||dn||**2)
# lb - dn <= dt <= ub - dn
c_t = H.dot(dn) + c
b_t = np.zeros_like(b)
trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2)
lb_t = trust_lb - dn
ub_t = trust_ub - dn
dt, cg_info = projected_cg(H, c_t, Z, Y, b_t,
trust_radius_t,
lb_t, ub_t)
# Compute update (normal + tangential steps).
d = dn + dt
# Compute second order model: 1/2 d H d + c.T d + f.
quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d)
# Compute linearized constraint: l = A d + b.
linearized_constr = A.dot(d)+b
# Compute new penalty parameter according to formula (3.52),
# reference [2]_, p.891.
vpred = norm(b) - norm(linearized_constr)
# Guarantee `vpred` always positive,
# regardless of roundoff errors.
vpred = max(1e-16, vpred)
previous_penalty = penalty
if quadratic_model > 0:
new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred)
penalty = max(penalty, new_penalty)
# Compute predicted reduction according to formula (3.52),
# reference [2]_, p.891.
predicted_reduction = -quadratic_model + penalty*vpred
# Compute merit function at current point
merit_function = f + penalty*norm(b)
# Evaluate function and constraints at trial point
x_next = x + S.dot(d)
f_next, b_next = fun_and_constr(x_next)
# Compute merit function at trial point
merit_function_next = f_next + penalty*norm(b_next)
# Compute actual reduction according to formula (3.54),
# reference [2]_, p.892.
actual_reduction = merit_function - merit_function_next
# Compute reduction ratio
reduction_ratio = actual_reduction / predicted_reduction
# Second order correction (SOC), reference [2]_, p.892.
if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \
norm(dn) <= SOC_THRESHOLD * norm(dt):
# Compute second order correction
y = -Y.dot(b_next)
# Make sure increment is inside box constraints
_, t, intersect = box_intersections(d, y, trust_lb, trust_ub)
# Compute tentative point
x_soc = x + S.dot(d + t*y)
f_soc, b_soc = fun_and_constr(x_soc)
# Recompute actual reduction
merit_function_soc = f_soc + penalty*norm(b_soc)
actual_reduction_soc = merit_function - merit_function_soc
# Recompute reduction ratio
reduction_ratio_soc = actual_reduction_soc / predicted_reduction
if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO:
x_next = x_soc
f_next = f_soc
b_next = b_soc
reduction_ratio = reduction_ratio_soc
# Readjust trust region step, formula (3.55), reference [2]_, p.892.
if reduction_ratio >= LARGE_REDUCTION_RATIO:
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d),
trust_radius)
elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO:
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d),
trust_radius)
# Reduce trust region step, according to reference [3]_, p.696.
elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO:
trust_reduction \
= (1-SUFFICIENT_REDUCTION_RATIO)/(1-reduction_ratio)
new_trust_radius = trust_reduction * norm(d)
if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius:
trust_radius *= MAX_TRUST_REDUCTION
elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius:
trust_radius = new_trust_radius
else:
trust_radius *= MIN_TRUST_REDUCTION
# Update iteration
if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO:
x = x_next
f, b = f_next, b_next
c, A = grad_and_jac(x)
S = scaling(x)
# Get projections
Z, LS, Y = projections(A, factorization_method)
# Compute least-square lagrange multipliers
v = -LS.dot(c)
# Compute Hessian
H = lagr_hess(x, v)
# Set Flag
last_iteration_failed = False
# Otimality values
optimality = norm(c + A.T.dot(v), np.inf)
constr_violation = norm(b, np.inf) if len(b) > 0 else 0
else:
penalty = previous_penalty
last_iteration_failed = True
return x, state
| 8,676 | 38.621005 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py
|
"""Trust-region interior point method.
References
----------
.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
"An interior point algorithm for large-scale nonlinear
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
.. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal.
"On the local behavior of an interior point method for
nonlinear programming." Numerical analysis 1997 (1997): 37-56.
.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
from __future__ import division, print_function, absolute_import
import scipy.sparse as sps
import numpy as np
from .equality_constrained_sqp import equality_constrained_sqp
from scipy.sparse.linalg import LinearOperator
__all__ = ['tr_interior_point']
class BarrierSubproblem:
"""
Barrier optimization problem:
minimize fun(x) - barrier_parameter*sum(log(s))
subject to: constr_eq(x) = 0
constr_ineq(x) + s = 0
"""
def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq,
constr, jac, barrier_parameter, tolerance,
enforce_feasibility, global_stop_criteria,
xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0,
jac_eq0):
# Store parameters
self.n_vars = n_vars
self.x0 = x0
self.s0 = s0
self.fun = fun
self.grad = grad
self.lagr_hess = lagr_hess
self.constr = constr
self.jac = jac
self.barrier_parameter = barrier_parameter
self.tolerance = tolerance
self.n_eq = n_eq
self.n_ineq = n_ineq
self.enforce_feasibility = enforce_feasibility
self.global_stop_criteria = global_stop_criteria
self.xtol = xtol
self.fun0 = self._compute_function(fun0, constr_ineq0, s0)
self.grad0 = self._compute_gradient(grad0)
self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0)
self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0)
self.terminate = False
def update(self, barrier_parameter, tolerance):
self.barrier_parameter = barrier_parameter
self.tolerance = tolerance
def get_slack(self, z):
return z[self.n_vars:self.n_vars+self.n_ineq]
def get_variables(self, z):
return z[:self.n_vars]
def function_and_constraints(self, z):
"""Returns barrier function and constraints at given point.
For z = [x, s], returns barrier function:
function(z) = fun(x) - barrier_parameter*sum(log(s))
and barrier constraints:
constraints(z) = [ constr_eq(x) ]
[ constr_ineq(x) + s ]
"""
# Get variables and slack variables
x = self.get_variables(z)
s = self.get_slack(z)
# Compute function and constraints
f = self.fun(x)
c_eq, c_ineq = self.constr(x)
# Return objective function and constraints
return (self._compute_function(f, c_ineq, s),
self._compute_constr(c_ineq, c_eq, s))
def _compute_function(self, f, c_ineq, s):
# Use technique from Nocedal and Wright book, ref [3]_, p.576,
# to guarantee constraints from `enforce_feasibility`
# stay feasible along iterations.
s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility]
log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s]
# Compute barrier objective function
return f - self.barrier_parameter*np.sum(log_s)
def _compute_constr(self, c_ineq, c_eq, s):
# Compute barrier constraint
return np.hstack((c_eq,
c_ineq + s))
def scaling(self, z):
"""Returns scaling vector.
Given by:
scaling = [ones(n_vars), s]
"""
s = self.get_slack(z)
diag_elements = np.hstack((np.ones(self.n_vars), s))
# Diagonal Matrix
def matvec(vec):
return diag_elements*vec
return LinearOperator((self.n_vars+self.n_ineq,
self.n_vars+self.n_ineq),
matvec)
def gradient_and_jacobian(self, z):
"""Returns scaled gradient.
Return scalled gradient:
gradient = [ grad(x) ]
[ -barrier_parameter*ones(n_ineq) ]
and scaled Jacobian Matrix:
jacobian = [ jac_eq(x) 0 ]
[ jac_ineq(x) S ]
Both of them scaled by the previously defined scaling factor.
"""
# Get variables and slack variables
x = self.get_variables(z)
s = self.get_slack(z)
# Compute first derivatives
g = self.grad(x)
J_eq, J_ineq = self.jac(x)
# Return gradient and jacobian
return (self._compute_gradient(g),
self._compute_jacobian(J_eq, J_ineq, s))
def _compute_gradient(self, g):
return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq)))
def _compute_jacobian(self, J_eq, J_ineq, s):
if self.n_ineq == 0:
return J_eq
else:
if sps.issparse(J_eq) or sps.issparse(J_ineq):
# It is expected that J_eq and J_ineq
# are already `csr_matrix` because of
# the way ``BoxConstraint``, ``NonlinearConstraint``
# and ``LinearConstraint`` are defined.
J_eq = sps.csr_matrix(J_eq)
J_ineq = sps.csr_matrix(J_ineq)
return self._assemble_sparse_jacobian(J_eq, J_ineq, s)
else:
S = np.diag(s)
zeros = np.zeros((self.n_eq, self.n_ineq))
# Convert to matrix
if sps.issparse(J_ineq):
J_ineq = J_ineq.toarray()
if sps.issparse(J_eq):
J_eq = J_eq.toarray()
# Concatenate matrices
return np.asarray(np.bmat([[J_eq, zeros],
[J_ineq, S]]))
def _assemble_sparse_jacobian(self, J_eq, J_ineq, s):
"""Assemble sparse jacobian given its components.
Given ``J_eq``, ``J_ineq`` and ``s`` returns:
jacobian = [ J_eq, 0 ]
[ J_ineq, diag(s) ]
It is equivalent to:
sps.bmat([[ J_eq, None ],
[ J_ineq, diag(s) ]], "csr")
but significantly more efficient for this
given structure.
"""
n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq
J_aux = sps.vstack([J_eq, J_ineq], "csr")
indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data
new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int),
np.arange(n_ineq+1, dtype=int)))
size = indices.size+n_ineq
new_indices = np.empty(size)
new_data = np.empty(size)
mask = np.full(size, False, bool)
mask[new_indptr[-n_ineq:]-1] = True
new_indices[mask] = n_vars+np.arange(n_ineq)
new_indices[~mask] = indices
new_data[mask] = s
new_data[~mask] = data
J = sps.csr_matrix((new_data, new_indices, new_indptr),
(n_eq + n_ineq, n_vars + n_ineq))
return J
def lagrangian_hessian_x(self, z, v):
"""Returns Lagrangian Hessian (in relation to `x`) -> Hx"""
x = self.get_variables(z)
# Get lagrange multipliers relatated to nonlinear equality constraints
v_eq = v[:self.n_eq]
# Get lagrange multipliers relatated to nonlinear ineq. constraints
v_ineq = v[self.n_eq:self.n_eq+self.n_ineq]
lagr_hess = self.lagr_hess
return lagr_hess(x, v_eq, v_ineq)
def lagrangian_hessian_s(self, z, v):
"""Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S"""
s = self.get_slack(z)
# Using the primal formulation:
# S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s).
# Reference [1]_ p. 882, formula (3.1)
primal = self.barrier_parameter
# Using the primal-dual formulation
# S Hs S = diag(s)*diag(v/s)*diag(s)
# Reference [1]_ p. 883, formula (3.11)
primal_dual = v[-self.n_ineq:]*s
# Uses the primal-dual formulation for
# positives values of v_ineq, and primal
# formulation for the remaining ones.
return np.where(v[-self.n_ineq:] > 0, primal_dual, primal)
def lagrangian_hessian(self, z, v):
"""Returns scaled Lagrangian Hessian"""
# Compute Hessian in relation to x and s
Hx = self.lagrangian_hessian_x(z, v)
if self.n_ineq > 0:
S_Hs_S = self.lagrangian_hessian_s(z, v)
# The scaled Lagragian Hessian is:
# [ Hx 0 ]
# [ 0 S Hs S ]
def matvec(vec):
vec_x = self.get_variables(vec)
vec_s = self.get_slack(vec)
if self.n_ineq > 0:
return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s))
else:
return Hx.dot(vec_x)
return LinearOperator((self.n_vars+self.n_ineq,
self.n_vars+self.n_ineq),
matvec)
def stop_criteria(self, state, z, last_iteration_failed,
optimality, constr_violation,
trust_radius, penalty, cg_info):
"""Stop criteria to the barrier problem.
The criteria here proposed is similar to formula (2.3)
from [1]_, p.879.
"""
x = self.get_variables(z)
if self.global_stop_criteria(state, x,
last_iteration_failed,
trust_radius, penalty,
cg_info,
self.barrier_parameter,
self.tolerance):
self.terminate = True
return True
else:
g_cond = (optimality < self.tolerance and
constr_violation < self.tolerance)
x_cond = trust_radius < self.xtol
return g_cond or x_cond
def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq,
constr, jac, x0, fun0, grad0,
constr_ineq0, jac_ineq0, constr_eq0,
jac_eq0, stop_criteria,
enforce_feasibility, xtol, state,
initial_barrier_parameter,
initial_tolerance,
initial_penalty,
initial_trust_radius,
factorization_method):
"""Trust-region interior points method.
Solve problem:
minimize fun(x)
subject to: constr_ineq(x) <= 0
constr_eq(x) = 0
using trust-region interior point method described in [1]_.
"""
# BOUNDARY_PARAMETER controls the decrease on the slack
# variables. Represents ``tau`` from [1]_ p.885, formula (3.18).
BOUNDARY_PARAMETER = 0.995
# BARRIER_DECAY_RATIO controls the decay of the barrier parameter
# and of the subproblem toloerance. Represents ``theta`` from [1]_ p.879.
BARRIER_DECAY_RATIO = 0.2
# TRUST_ENLARGEMENT controls the enlargement on trust radius
# after each iteration
TRUST_ENLARGEMENT = 5
# Default enforce_feasibility
if enforce_feasibility is None:
enforce_feasibility = np.zeros(n_ineq, bool)
# Initial Values
barrier_parameter = initial_barrier_parameter
tolerance = initial_tolerance
trust_radius = initial_trust_radius
# Define initial value for the slack variables
s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq))
# Define barrier subproblem
subprob = BarrierSubproblem(
x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac,
barrier_parameter, tolerance, enforce_feasibility,
stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0,
constr_eq0, jac_eq0)
# Define initial parameter for the first iteration.
z = np.hstack((x0, s0))
fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0
grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0
# Define trust region bounds
trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf),
np.full(subprob.n_ineq, -BOUNDARY_PARAMETER)))
trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf)
# Solves a sequence of barrier problems
while True:
# Solve SQP subproblem
z, state = equality_constrained_sqp(
subprob.function_and_constraints,
subprob.gradient_and_jacobian,
subprob.lagrangian_hessian,
z, fun0_subprob, grad0_subprob,
constr0_subprob, jac0_subprob, subprob.stop_criteria,
state, initial_penalty, trust_radius,
factorization_method, trust_lb, trust_ub, subprob.scaling)
if subprob.terminate:
break
# Update parameters
trust_radius = max(initial_trust_radius,
TRUST_ENLARGEMENT*state.tr_radius)
# TODO: Use more advanced strategies from [2]_
# to update this parameters.
barrier_parameter *= BARRIER_DECAY_RATIO
tolerance *= BARRIER_DECAY_RATIO
# Update Barrier Problem
subprob.update(barrier_parameter, tolerance)
# Compute initial values for next iteration
fun0_subprob, constr0_subprob = subprob.function_and_constraints(z)
grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z)
# Get x and s
x = subprob.get_variables(z)
return x, state
| 13,890 | 38.916667 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.sparse import csc_matrix
from scipy.optimize._trustregion_constr.projections \
import projections, orthogonality
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
import pytest
import sys
import platform
try:
from sksparse.cholmod import cholesky_AAt
sksparse_available = True
available_sparse_methods = ("NormalEquation", "AugmentedSystem")
except ImportError:
import warnings
sksparse_available = False
available_sparse_methods = ("AugmentedSystem",)
available_dense_methods = ('QRFactorization', 'SVDFactorization')
class TestProjections(TestCase):
def test_nullspace_and_least_squares_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
At_dense = A_dense.T
A = csc_matrix(A_dense)
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
for method in available_sparse_methods:
Z, LS, _ = projections(A, method)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_array_almost_equal(A.dot(x), 0)
# Test orthogonality
assert_array_almost_equal(orthogonality(A, x), 0)
# Test if x is the least square solution
x = LS.matvec(z)
x2 = scipy.linalg.lstsq(At_dense, z)[0]
assert_array_almost_equal(x, x2)
def test_iterative_refinements_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A_dense)
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8],
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
for method in available_sparse_methods:
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
atol = 1e-13 * abs(x).max()
err = abs(A.dot(x)).max()
assert_allclose(A.dot(x), 0, atol=atol)
# Test orthogonality
assert_allclose(orthogonality(A, x), 0, atol=1e-13)
def test_rowspace_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A_dense)
test_points = ([1, 2, 3],
[1, 10, 3],
[1.12, 10, 0])
for method in available_sparse_methods:
_, _, Y = projections(A, method)
for z in test_points:
# Test if x is solution of A x = z
x = Y.matvec(z)
assert_array_almost_equal(A.dot(x), z)
# Test if x is in the return row space of A
A_ext = np.vstack((A_dense, x))
assert_equal(np.linalg.matrix_rank(A_dense),
np.linalg.matrix_rank(A_ext))
def test_nullspace_and_least_squares_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
At = A.T
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
for method in available_dense_methods:
Z, LS, _ = projections(A, method)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_array_almost_equal(A.dot(x), 0)
# Test orthogonality
assert_array_almost_equal(orthogonality(A, x), 0)
# Test if x is the least square solution
x = LS.matvec(z)
x2 = scipy.linalg.lstsq(At, z)[0]
assert_array_almost_equal(x, x2)
def test_compare_dense_and_sparse(self):
D = np.diag(range(1, 101))
A = np.hstack([D, D, D, D])
A_sparse = csc_matrix(A)
np.random.seed(0)
Z, LS, Y = projections(A)
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
for k in range(20):
z = np.random.normal(size=(400,))
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
x = np.random.normal(size=(100,))
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
def test_compare_dense_and_sparse2(self):
D1 = np.diag([-1.7, 1, 0.5])
D2 = np.diag([1, -0.6, -0.3])
D3 = np.diag([-0.3, -1.5, 2])
A = np.hstack([D1, D2, D3])
A_sparse = csc_matrix(A)
np.random.seed(0)
Z, LS, Y = projections(A)
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
for k in range(1):
z = np.random.normal(size=(9,))
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
x = np.random.normal(size=(3,))
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
def test_iterative_refinements_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
for method in available_dense_methods:
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_array_almost_equal(A.dot(x), 0, decimal=14)
# Test orthogonality
assert_array_almost_equal(orthogonality(A, x), 0, decimal=16)
def test_rowspace_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_points = ([1, 2, 3],
[1, 10, 3],
[1.12, 10, 0])
for method in available_dense_methods:
_, _, Y = projections(A, method)
for z in test_points:
# Test if x is solution of A x = z
x = Y.matvec(z)
assert_array_almost_equal(A.dot(x), z)
# Test if x is in the return row space of A
A_ext = np.vstack((A, x))
assert_equal(np.linalg.matrix_rank(A),
np.linalg.matrix_rank(A_ext))
class TestOrthogonality(TestCase):
def test_dense_matrix(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_vectors = ([-1.98931144, -1.56363389,
-0.84115584, 2.2864762,
5.599141, 0.09286976,
1.37040802, -0.28145812],
[697.92794044, -4091.65114008,
-3327.42316335, 836.86906951,
99434.98929065, -1285.37653682,
-4109.21503806, 2935.29289083])
test_expected_orth = (0, 0)
for i in range(len(test_vectors)):
x = test_vectors[i]
orth = test_expected_orth[i]
assert_array_almost_equal(orthogonality(A, x), orth)
def test_sparse_matrix(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A)
test_vectors = ([-1.98931144, -1.56363389,
-0.84115584, 2.2864762,
5.599141, 0.09286976,
1.37040802, -0.28145812],
[697.92794044, -4091.65114008,
-3327.42316335, 836.86906951,
99434.98929065, -1285.37653682,
-4109.21503806, 2935.29289083])
test_expected_orth = (0, 0)
for i in range(len(test_vectors)):
x = test_vectors[i]
orth = test_expected_orth[i]
assert_array_almost_equal(orthogonality(A, x), orth)
| 9,171 | 39.946429 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py
|
import numpy as np
from scipy.sparse import csc_matrix
from scipy.optimize._trustregion_constr.qp_subproblem \
import (eqp_kktfact,
projected_cg,
box_intersections,
sphere_intersections,
box_sphere_intersections,
modified_dogleg)
from scipy.optimize._trustregion_constr.projections \
import projections
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
import pytest
class TestEQPDirectFactorization(TestCase):
# From Example 16.2 Nocedal/Wright "Numerical
# Optimization" p.452.
def test_nocedal_example(self):
H = csc_matrix([[6, 2, 1],
[2, 5, 2],
[1, 2, 4]])
A = csc_matrix([[1, 0, 1],
[0, 1, 1]])
c = np.array([-8, -3, -3])
b = -np.array([3, 0])
x, lagrange_multipliers = eqp_kktfact(H, c, A, b)
assert_array_almost_equal(x, [2, -1, 1])
assert_array_almost_equal(lagrange_multipliers, [3, -2])
class TestSphericalBoundariesIntersections(TestCase):
def test_2d_sphere_constraints(self):
# Interior inicial point
ta, tb, intersect = sphere_intersections([0, 0],
[1, 0], 0.5)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# No intersection between line and circle
ta, tb, intersect = sphere_intersections([2, 0],
[0, 1], 1)
assert_equal(intersect, False)
# Outside inicial point pointing toward outside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 1)
assert_equal(intersect, False)
# Outside inicial point pointing toward inside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[-1, 0], 1.5)
assert_array_almost_equal([ta, tb], [0.5, 1])
assert_equal(intersect, True)
# Inicial point on the boundary
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 2)
assert_array_almost_equal([ta, tb], [0, 0])
assert_equal(intersect, True)
def test_2d_sphere_constraints_line_intersections(self):
# Interior inicial point
ta, tb, intersect = sphere_intersections([0, 0],
[1, 0], 0.5,
entire_line=True)
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
assert_equal(intersect, True)
# No intersection between line and circle
ta, tb, intersect = sphere_intersections([2, 0],
[0, 1], 1,
entire_line=True)
assert_equal(intersect, False)
# Outside inicial point pointing toward outside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 1,
entire_line=True)
assert_array_almost_equal([ta, tb], [-3, -1])
assert_equal(intersect, True)
# Outside inicial point pointing toward inside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[-1, 0], 1.5,
entire_line=True)
assert_array_almost_equal([ta, tb], [0.5, 3.5])
assert_equal(intersect, True)
# Inicial point on the boundary
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [-4, 0])
assert_equal(intersect, True)
class TestBoxBoundariesIntersections(TestCase):
def test_2d_box_constraints(self):
# Box constraint in the direction of vector d
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[1, 1], [3, 3])
assert_array_almost_equal([ta, tb], [0.5, 1])
assert_equal(intersect, True)
# Negative direction
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[1, -3], [3, -1])
assert_equal(intersect, False)
# Some constraints are absent (set to +/- inf)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-np.inf, 1],
[np.inf, np.inf])
assert_array_almost_equal([ta, tb], [0.5, 1])
assert_equal(intersect, True)
# Intersect on the face of the box
ta, tb, intersect = box_intersections([1, 0], [0, 1],
[1, 1], [3, 3])
assert_array_almost_equal([ta, tb], [1, 1])
assert_equal(intersect, True)
# Interior inicial pointoint
ta, tb, intersect = box_intersections([0, 0], [4, 4],
[-2, -3], [3, 2])
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# No intersection between line and box constraints
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, -3], [-1, -1])
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, 3], [-1, 1])
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, -np.inf],
[-1, np.inf])
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([0, 0], [1, 100],
[1, 1], [3, 3])
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
[1, 1], [3, 3])
assert_equal(intersect, False)
# Inicial point on the boundary
ta, tb, intersect = box_intersections([2, 2], [0, 1],
[-2, -2], [2, 2])
assert_array_almost_equal([ta, tb], [0, 0])
assert_equal(intersect, True)
def test_2d_box_constraints_entire_line(self):
# Box constraint in the direction of vector d
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[1, 1], [3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [0.5, 1.5])
assert_equal(intersect, True)
# Negative direction
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[1, -3], [3, -1],
entire_line=True)
assert_array_almost_equal([ta, tb], [-1.5, -0.5])
assert_equal(intersect, True)
# Some constraints are absent (set to +/- inf)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-np.inf, 1],
[np.inf, np.inf],
entire_line=True)
assert_array_almost_equal([ta, tb], [0.5, np.inf])
assert_equal(intersect, True)
# Intersect on the face of the box
ta, tb, intersect = box_intersections([1, 0], [0, 1],
[1, 1], [3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [1, 3])
assert_equal(intersect, True)
# Interior inicial pointoint
ta, tb, intersect = box_intersections([0, 0], [4, 4],
[-2, -3], [3, 2],
entire_line=True)
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
assert_equal(intersect, True)
# No intersection between line and box constraints
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, -3], [-1, -1],
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, 3], [-1, 1],
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, -np.inf],
[-1, np.inf],
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([0, 0], [1, 100],
[1, 1], [3, 3],
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
[1, 1], [3, 3],
entire_line=True)
assert_equal(intersect, False)
# Inicial point on the boundary
ta, tb, intersect = box_intersections([2, 2], [0, 1],
[-2, -2], [2, 2],
entire_line=True)
assert_array_almost_equal([ta, tb], [-4, 0])
assert_equal(intersect, True)
def test_3d_box_constraints(self):
# Simple case
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
[1, 1, 1], [3, 3, 3])
assert_array_almost_equal([ta, tb], [1, 1])
assert_equal(intersect, True)
# Negative direction
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
[1, 1, 1], [3, 3, 3])
assert_equal(intersect, False)
# Interior Point
ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
[1, 1, 1], [3, 3, 3])
assert_array_almost_equal([ta, tb], [0, 1])
assert_equal(intersect, True)
def test_3d_box_constraints_entire_line(self):
# Simple case
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
[1, 1, 1], [3, 3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [1, 3])
assert_equal(intersect, True)
# Negative direction
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
[1, 1, 1], [3, 3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [-3, -1])
assert_equal(intersect, True)
# Interior Point
ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
[1, 1, 1], [3, 3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [-1, 1])
assert_equal(intersect, True)
class TestBoxSphereBoundariesIntersections(TestCase):
def test_2d_box_constraints(self):
# Both constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
[-1, -2], [1, 2], 2,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# None of the contraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
[-1, -3], [1, 3], 10,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 1])
assert_equal(intersect, True)
# Box Constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 10,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# Spherical Constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.25])
assert_equal(intersect, True)
# Infeasible problems
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=False)
assert_equal(intersect, False)
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[2, 4], [2, 4], 2,
entire_line=False)
assert_equal(intersect, False)
def test_2d_box_constraints_entire_line(self):
# Both constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
[-1, -2], [1, 2], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# None of the contraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
[-1, -3], [1, 3], 10,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 2])
assert_equal(intersect, True)
# Box Constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 10,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# Spherical Constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.25])
assert_equal(intersect, True)
# Infeasible problems
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[2, 4], [2, 4], 2,
entire_line=True)
assert_equal(intersect, False)
class TestModifiedDogleg(TestCase):
def test_cauchypoint_equalsto_newtonpoint(self):
A = np.array([[1, 8]])
b = np.array([-16])
_, _, Y = projections(A)
newton_point = np.array([0.24615385, 1.96923077])
# Newton point inside boundaries
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf])
assert_array_almost_equal(x, newton_point)
# Spherical constraint active
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf])
assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point))
# Box Constraints active
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf])
assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1)
def test_3d_example(self):
A = np.array([[1, 8, 1],
[4, 2, 2]])
b = np.array([-16, 2])
Z, LS, Y = projections(A)
newton_point = np.array([-1.37090909, 2.23272727, -0.49090909])
cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585])
origin = np.zeros_like(newton_point)
# newton_point inside boundaries
x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
assert_array_almost_equal(x, newton_point)
# line between cauchy_point and newton_point contains best point
# (spherical constrain is active).
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = cauchy_point
d = newton_point-cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, 0.40807330*np.ones(3))
assert_array_almost_equal(np.linalg.norm(x), 2)
# line between cauchy_point and newton_point contains best point
# (box constrain is active).
x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = cauchy_point
d = newton_point-cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, 0.7498195*np.ones(3))
assert_array_almost_equal(x[0], -1)
# line between origin and cauchy_point contains best point
# (spherical constrain is active).
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = origin
d = cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, 0.573936265*np.ones(3))
assert_array_almost_equal(np.linalg.norm(x), 1)
# line between origin and newton_point contains best point
# (box constrain is active).
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
[np.inf, 1, np.inf])
z = origin
d = newton_point
t = ((x-z)/(d))
assert_array_almost_equal(t, 0.4478827364*np.ones(3))
assert_array_almost_equal(x[1], 1)
class TestProjectCG(TestCase):
# From Example 16.2 Nocedal/Wright "Numerical
# Optimization" p.452.
def test_nocedal_example(self):
H = csc_matrix([[6, 2, 1],
[2, 5, 2],
[1, 2, 4]])
A = csc_matrix([[1, 0, 1],
[0, 1, 1]])
c = np.array([-8, -3, -3])
b = -np.array([3, 0])
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b)
assert_equal(info["stop_cond"], 4)
assert_equal(info["hits_boundary"], False)
assert_array_almost_equal(x, [2, -1, 1])
def test_compare_with_direct_fact(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b, tol=0)
x_kkt, _ = eqp_kktfact(H, c, A, b)
assert_equal(info["stop_cond"], 1)
assert_equal(info["hits_boundary"], False)
assert_array_almost_equal(x, x_kkt)
def test_trust_region_infeasible(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 1
Z, _, Y = projections(A)
with pytest.raises(ValueError):
projected_cg(H, c, Z, Y, b, trust_radius=trust_radius)
def test_trust_region_barely_feasible(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 2.32379000772445021283
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
trust_radius=trust_radius)
assert_equal(info["stop_cond"], 2)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
assert_array_almost_equal(x, -Y.dot(b))
def test_hits_boundary(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 3
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
trust_radius=trust_radius)
assert_equal(info["stop_cond"], 2)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
def test_negative_curvature_unconstrained(self):
H = csc_matrix([[1, 2, 1, 3],
[2, 0, 2, 4],
[1, 2, 0, 2],
[3, 4, 2, 0]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 0, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
with pytest.raises(ValueError):
projected_cg(H, c, Z, Y, b, tol=0)
def test_negative_curvature(self):
H = csc_matrix([[1, 2, 1, 3],
[2, 0, 2, 4],
[1, 2, 0, 2],
[3, 4, 2, 0]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 0, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
trust_radius = 1000
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
trust_radius=trust_radius)
assert_equal(info["stop_cond"], 3)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
# The box contraints are inactive at the solution but
# are active during the iterations.
def test_inactive_box_constraints(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
lb=[0.5, -np.inf,
-np.inf, -np.inf],
return_all=True)
x_kkt, _ = eqp_kktfact(H, c, A, b)
assert_equal(info["stop_cond"], 1)
assert_equal(info["hits_boundary"], False)
assert_array_almost_equal(x, x_kkt)
# The box contraints active and the termination is
# by maximum iterations (infeasible iteraction).
def test_active_box_constraints_maximum_iterations_reached(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
lb=[0.8, -np.inf,
-np.inf, -np.inf],
return_all=True)
assert_equal(info["stop_cond"], 1)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(A.dot(x), -b)
assert_array_almost_equal(x[0], 0.8)
# The box contraints are active and the termination is
# because it hits boundary (without infeasible iteraction).
def test_active_box_constraints_hits_boundaries(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 3
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
ub=[np.inf, np.inf, 1.6, np.inf],
trust_radius=trust_radius,
return_all=True)
assert_equal(info["stop_cond"], 2)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(x[2], 1.6)
# The box contraints are active and the termination is
# because it hits boundary (infeasible iteraction).
def test_active_box_constraints_hits_boundaries_infeasible_iter(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 4
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
ub=[np.inf, 0.1, np.inf, np.inf],
trust_radius=trust_radius,
return_all=True)
assert_equal(info["stop_cond"], 2)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(x[1], 0.1)
# The box contraints are active and the termination is
# because it hits boundary (no infeasible iteraction).
def test_active_box_constraints_negative_curvature(self):
H = csc_matrix([[1, 2, 1, 3],
[2, 0, 2, 4],
[1, 2, 0, 2],
[3, 4, 2, 0]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 0, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
trust_radius = 1000
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
ub=[np.inf, np.inf, 100, np.inf],
trust_radius=trust_radius)
assert_equal(info["stop_cond"], 3)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(x[2], 100)
| 27,920 | 41.955385 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_equal, assert_equal
from scipy.optimize._constraints import (NonlinearConstraint, Bounds,
PreparedConstraint)
from scipy.optimize._trustregion_constr.canonical_constraint \
import CanonicalConstraint, initial_constraints_as_canonical
def create_quadratic_function(n, m, rng):
a = rng.rand(m)
A = rng.rand(m, n)
H = rng.rand(m, n, n)
HT = np.transpose(H, (1, 2, 0))
def fun(x):
return a + A.dot(x) + 0.5 * H.dot(x).dot(x)
def jac(x):
return A + H.dot(x)
def hess(x, v):
return HT.dot(v)
return fun, jac, hess
def test_bounds_cases():
# Test 1: no constraints.
user_constraint = Bounds(-np.inf, np.inf)
x0 = np.array([-1, 2])
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_equal(c.n_eq, 0)
assert_equal(c.n_ineq, 0)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [])
J_eq, J_ineq = c.jac(x0)
assert_array_equal(J_eq, np.empty((0, 2)))
assert_array_equal(J_ineq, np.empty((0, 2)))
assert_array_equal(c.keep_feasible, [])
# Test 2: infinite lower bound.
user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True])
x0 = np.array([-1, -2, -3], dtype=float)
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_equal(c.n_eq, 0)
assert_equal(c.n_ineq, 2)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [-1, -4])
J_eq, J_ineq = c.jac(x0)
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]]))
assert_array_equal(c.keep_feasible, [False, True])
# Test 3: infinite upper bound.
user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True])
x0 = np.array([1, 2, 3], dtype=float)
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_equal(c.n_eq, 0)
assert_equal(c.n_ineq, 2)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [-1, -1])
J_eq, J_ineq = c.jac(x0)
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]]))
assert_array_equal(c.keep_feasible, [True, False])
# Test 4: interval constraint.
user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3],
[False, True, True, True])
x0 = np.array([0, 10, 8, 5])
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_equal(c.n_eq, 1)
assert_equal(c.n_ineq, 4)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [2])
assert_array_equal(c_ineq, [-1, -2, -1, -6])
J_eq, J_ineq = c.jac(x0)
assert_array_equal(J_eq, [[0, 0, 0, 1]])
assert_array_equal(J_ineq, [[1, 0, 0, 0],
[0, 0, 1, 0],
[-1, 0, 0, 0],
[0, 0, -1, 0]])
assert_array_equal(c.keep_feasible, [False, True, False, True])
def test_nonlinear_constraint():
n = 3
m = 5
rng = np.random.RandomState(0)
x0 = rng.rand(n)
fun, jac, hess = create_quadratic_function(n, m, rng)
f = fun(x0)
J = jac(x0)
lb = [-10, 3, -np.inf, -np.inf, -5]
ub = [10, 3, np.inf, 3, np.inf]
user_constraint = NonlinearConstraint(
fun, lb, ub, jac, hess, [True, False, False, True, False])
for sparse_jacobian in [False, True]:
prepared_constraint = PreparedConstraint(user_constraint, x0,
sparse_jacobian)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_array_equal(c.n_eq, 1)
assert_array_equal(c.n_ineq, 4)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [f[1] - lb[1]])
assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4],
f[0] - ub[0], lb[0] - f[0]])
J_eq, J_ineq = c.jac(x0)
if sparse_jacobian:
J_eq = J_eq.toarray()
J_ineq = J_ineq.toarray()
assert_array_equal(J_eq, J[1, None])
assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0])))
v_eq = rng.rand(c.n_eq)
v_ineq = rng.rand(c.n_ineq)
v = np.zeros(m)
v[1] = v_eq[0]
v[3] = v_ineq[0]
v[4] = -v_ineq[1]
v[0] = v_ineq[2] - v_ineq[3]
assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v))
assert_array_equal(c.keep_feasible, [True, False, True, True])
def test_concatenation():
rng = np.random.RandomState(0)
n = 4
x0 = np.random.rand(n)
f1 = x0
J1 = np.eye(n)
lb1 = [-1, -np.inf, -2, 3]
ub1 = [1, np.inf, np.inf, 3]
bounds = Bounds(lb1, ub1, [False, False, True, False])
fun, jac, hess = create_quadratic_function(n, 5, rng)
f2 = fun(x0)
J2 = jac(x0)
lb2 = [-10, 3, -np.inf, -np.inf, -5]
ub2 = [10, 3, np.inf, 5, np.inf]
nonlinear = NonlinearConstraint(
fun, lb2, ub2, jac, hess, [True, False, False, True, False])
for sparse_jacobian in [False, True]:
bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared)
c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared)
c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian)
assert_equal(c.n_eq, 2)
assert_equal(c.n_ineq, 7)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
lb1[0] - f1[0], f2[3] - ub2[3],
lb2[4] - f2[4], f2[0] - ub2[0],
lb2[0] - f2[0]])
J_eq, J_ineq = c.jac(x0)
if sparse_jacobian:
J_eq = J_eq.toarray()
J_ineq = J_ineq.toarray()
assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
-J2[4], J2[0], -J2[0])))
v_eq = rng.rand(c.n_eq)
v_ineq = rng.rand(c.n_ineq)
v = np.zeros(5)
v[1] = v_eq[1]
v[3] = v_ineq[3]
v[4] = -v_ineq[4]
v[0] = v_ineq[5] - v_ineq[6]
H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n))
assert_array_equal(H, hess(x0, v))
assert_array_equal(c.keep_feasible,
[True, False, False, True, False, True, True])
def test_empty():
x = np.array([1, 2, 3])
c = CanonicalConstraint.empty(3)
assert_equal(c.n_eq, 0)
assert_equal(c.n_ineq, 0)
c_eq, c_ineq = c.fun(x)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [])
J_eq, J_ineq = c.jac(x)
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(J_ineq, np.empty((0, 3)))
H = c.hess(x, None, None).toarray()
assert_array_equal(H, np.zeros((3, 3)))
def test_initial_constraints_as_canonical():
rng = np.random.RandomState(0)
n = 4
x0 = np.random.rand(n)
lb1 = [-1, -np.inf, -2, 3]
ub1 = [1, np.inf, np.inf, 3]
bounds = Bounds(lb1, ub1, [False, False, True, False])
fun, jac, hess = create_quadratic_function(n, 5, rng)
lb2 = [-10, 3, -np.inf, -np.inf, -5]
ub2 = [10, 3, np.inf, 5, np.inf]
nonlinear = NonlinearConstraint(
fun, lb2, ub2, jac, hess, [True, False, False, True, False])
for sparse_jacobian in [False, True]:
bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
f1 = bounds_prepared.fun.f
J1 = bounds_prepared.fun.J
f2 = nonlinear_prepared.fun.f
J2 = nonlinear_prepared.fun.J
c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
n, [bounds_prepared, nonlinear_prepared], sparse_jacobian)
assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
lb1[0] - f1[0], f2[3] - ub2[3],
lb2[4] - f2[4], f2[0] - ub2[0],
lb2[0] - f2[0]])
if sparse_jacobian:
J1 = J1.toarray()
J2 = J2.toarray()
J_eq = J_eq.toarray()
J_ineq = J_ineq.toarray()
assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
-J2[4], J2[0], -J2[0])))
def test_initial_constraints_as_canonical_empty():
n = 3
for sparse_jacobian in [False, True]:
c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
n, [], sparse_jacobian)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [])
if sparse_jacobian:
J_eq = J_eq.toarray()
J_ineq = J_ineq.toarray()
assert_array_equal(J_eq, np.empty((0, n)))
assert_array_equal(J_ineq, np.empty((0, n)))
| 9,794 | 32.20339 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test__root.py
|
"""
Unit tests for optimization routines from _root.py.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_
from pytest import raises as assert_raises
import numpy as np
from scipy.optimize import root
class TestRoot(object):
def test_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return np.array([x**3 - 1, y**3 - 1])
def dfunc(z):
x, y = z
return np.array([[3*x**2, 0], [0, 3*y**2]])
for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
'diagbroyden', 'krylov']:
if method in ('linearmixing', 'excitingmixing'):
# doesn't converge
continue
if method in ('hybr', 'lm'):
jac = dfunc
else:
jac = None
sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method)
sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method)
msg = "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x))
assert_(sol1.success, msg)
assert_(sol2.success, msg)
assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(),
msg)
def test_minimize_scalar_coerce_args_param(self):
# github issue #3503
def func(z, f=1):
x, y = z
return np.array([x**3 - 1, y**3 - f])
root(func, [1.1, 1.1], args=1.5)
def test_f_size(self):
# gh8320
# check that decreasing the size of the returned array raises an error
# and doesn't segfault
class fun(object):
def __init__(self):
self.count = 0
def __call__(self, x):
self.count += 1
if not (self.count % 5):
ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0
else:
ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0,
0.5 * (x[1] - x[0]) ** 3 + x[1]])
return ret
F = fun()
with assert_raises(ValueError):
sol = root(F, [0.1, 0.0], method='lm')
| 2,257 | 30.361111 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test__numdiff.py
|
from __future__ import division
import math
from itertools import product
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_
from pytest import raises as assert_raises
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
from scipy.optimize._numdiff import (
_adjust_scheme_to_bounds, approx_derivative, check_derivative,
group_columns)
def test_group_columns():
structure = [
[1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0]
]
for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]:
A = transform(structure)
order = np.arange(6)
groups_true = np.array([0, 1, 2, 0, 1, 2])
groups = group_columns(A, order)
assert_equal(groups, groups_true)
order = [1, 2, 4, 3, 5, 0]
groups_true = np.array([2, 0, 1, 2, 0, 1])
groups = group_columns(A, order)
assert_equal(groups, groups_true)
# Test repeatability.
groups_1 = group_columns(A)
groups_2 = group_columns(A)
assert_equal(groups_1, groups_2)
class TestAdjustSchemeToBounds(object):
def test_no_bounds(self):
x0 = np.zeros(3)
h = np.ones(3) * 1e-2
inf_lower = np.empty_like(x0)
inf_upper = np.empty_like(x0)
inf_lower.fill(-np.inf)
inf_upper.fill(np.inf)
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', inf_lower, inf_upper)
assert_allclose(h_adjusted, h)
assert_(np.all(one_sided))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 2, '1-sided', inf_lower, inf_upper)
assert_allclose(h_adjusted, h)
assert_(np.all(one_sided))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', inf_lower, inf_upper)
assert_allclose(h_adjusted, h)
assert_(np.all(~one_sided))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 2, '2-sided', inf_lower, inf_upper)
assert_allclose(h_adjusted, h)
assert_(np.all(~one_sided))
def test_with_bound(self):
x0 = np.array([0.0, 0.85, -0.85])
lb = -np.ones(3)
ub = np.ones(3)
h = np.array([1, 1, -1]) * 1e-1
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
assert_allclose(h_adjusted, h)
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
assert_allclose(h_adjusted, np.abs(h))
assert_(np.all(~one_sided))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 2, '2-sided', lb, ub)
assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
assert_equal(one_sided, np.array([False, True, True]))
def test_tight_bounds(self):
lb = np.array([-0.03, -0.03])
ub = np.array([0.05, 0.05])
x0 = np.array([0.0, 0.03])
h = np.array([-0.1, -0.1])
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
assert_allclose(h_adjusted, np.array([0.05, -0.06]))
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
assert_allclose(h_adjusted, np.array([0.025, -0.03]))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
assert_allclose(h_adjusted, np.array([0.03, -0.03]))
assert_equal(one_sided, np.array([False, True]))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 2, '2-sided', lb, ub)
assert_allclose(h_adjusted, np.array([0.015, -0.015]))
assert_equal(one_sided, np.array([False, True]))
class TestApproxDerivativesDense(object):
def fun_scalar_scalar(self, x):
return np.sinh(x)
def jac_scalar_scalar(self, x):
return np.cosh(x)
def fun_scalar_vector(self, x):
return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
def jac_scalar_vector(self, x):
return np.array(
[2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
def fun_vector_scalar(self, x):
return np.sin(x[0] * x[1]) * np.log(x[0])
def wrong_dimensions_fun(self, x):
return np.array([x**2, np.tan(x), np.exp(x)])
def jac_vector_scalar(self, x):
return np.array([
x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
np.sin(x[0] * x[1]) / x[0],
x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
])
def fun_vector_vector(self, x):
return np.array([
x[0] * np.sin(x[1]),
x[1] * np.cos(x[0]),
x[0] ** 3 * x[1] ** -0.5
])
def jac_vector_vector(self, x):
return np.array([
[np.sin(x[1]), x[0] * np.cos(x[1])],
[-x[1] * np.sin(x[0]), np.cos(x[0])],
[3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
])
def fun_parametrized(self, x, c0, c1=1.0):
return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])])
def jac_parametrized(self, x, c0, c1=0.1):
return np.array([
[c0 * np.exp(c0 * x[0]), 0],
[0, c1 * np.exp(c1 * x[1])]
])
def fun_with_nan(self, x):
return x if np.abs(x) <= 1e-8 else np.nan
def jac_with_nan(self, x):
return 1.0 if np.abs(x) <= 1e-8 else np.nan
def fun_zero_jacobian(self, x):
return np.array([x[0] * x[1], np.cos(x[0] * x[1])])
def jac_zero_jacobian(self, x):
return np.array([
[x[1], x[0]],
[-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])]
])
def fun_non_numpy(self, x):
return math.exp(x)
def jac_non_numpy(self, x):
return math.exp(x)
def test_scalar_scalar(self):
x0 = 1.0
jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
method='2-point')
jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0)
jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
method='cs')
jac_true = self.jac_scalar_scalar(x0)
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
def test_scalar_vector(self):
x0 = 0.5
jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
method='2-point')
jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0)
jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
method='cs')
jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
def test_vector_scalar(self):
x0 = np.array([100.0, -0.5])
jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
method='2-point')
jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0)
jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
method='cs')
jac_true = self.jac_vector_scalar(x0)
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
assert_allclose(jac_diff_3, jac_true, rtol=1e-7)
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
def test_vector_vector(self):
x0 = np.array([-100.0, 0.2])
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
method='2-point')
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0)
jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
method='cs')
jac_true = self.jac_vector_vector(x0)
assert_allclose(jac_diff_2, jac_true, rtol=1e-5)
assert_allclose(jac_diff_3, jac_true, rtol=1e-6)
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
def test_wrong_dimensions(self):
x0 = 1.0
assert_raises(RuntimeError, approx_derivative,
self.wrong_dimensions_fun, x0)
f0 = self.wrong_dimensions_fun(np.atleast_1d(x0))
assert_raises(ValueError, approx_derivative,
self.wrong_dimensions_fun, x0, f0=f0)
def test_custom_rel_step(self):
x0 = np.array([-0.1, 0.1])
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
method='2-point', rel_step=1e-4)
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
rel_step=1e-4)
jac_true = self.jac_vector_vector(x0)
assert_allclose(jac_diff_2, jac_true, rtol=1e-2)
assert_allclose(jac_diff_3, jac_true, rtol=1e-4)
def test_options(self):
x0 = np.array([1.0, 1.0])
c0 = -1.0
c1 = 1.0
lb = 0.0
ub = 2.0
f0 = self.fun_parametrized(x0, c0, c1=c1)
rel_step = np.array([-1e-6, 1e-7])
jac_true = self.jac_parametrized(x0, c0, c1)
jac_diff_2 = approx_derivative(
self.fun_parametrized, x0, method='2-point', rel_step=rel_step,
f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
jac_diff_3 = approx_derivative(
self.fun_parametrized, x0, rel_step=rel_step,
f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
def test_with_bounds_2_point(self):
lb = -np.ones(2)
ub = np.ones(2)
x0 = np.array([-2.0, 0.2])
assert_raises(ValueError, approx_derivative,
self.fun_vector_vector, x0, bounds=(lb, ub))
x0 = np.array([-1.0, 1.0])
jac_diff = approx_derivative(self.fun_vector_vector, x0,
method='2-point', bounds=(lb, ub))
jac_true = self.jac_vector_vector(x0)
assert_allclose(jac_diff, jac_true, rtol=1e-6)
def test_with_bounds_3_point(self):
lb = np.array([1.0, 1.0])
ub = np.array([2.0, 2.0])
x0 = np.array([1.0, 2.0])
jac_true = self.jac_vector_vector(x0)
jac_diff = approx_derivative(self.fun_vector_vector, x0)
assert_allclose(jac_diff, jac_true, rtol=1e-9)
jac_diff = approx_derivative(self.fun_vector_vector, x0,
bounds=(lb, np.inf))
assert_allclose(jac_diff, jac_true, rtol=1e-9)
jac_diff = approx_derivative(self.fun_vector_vector, x0,
bounds=(-np.inf, ub))
assert_allclose(jac_diff, jac_true, rtol=1e-9)
jac_diff = approx_derivative(self.fun_vector_vector, x0,
bounds=(lb, ub))
assert_allclose(jac_diff, jac_true, rtol=1e-9)
def test_tight_bounds(self):
x0 = np.array([10.0, 10.0])
lb = x0 - 3e-9
ub = x0 + 2e-9
jac_true = self.jac_vector_vector(x0)
jac_diff = approx_derivative(
self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub))
assert_allclose(jac_diff, jac_true, rtol=1e-6)
jac_diff = approx_derivative(
self.fun_vector_vector, x0, method='2-point',
rel_step=1e-6, bounds=(lb, ub))
assert_allclose(jac_diff, jac_true, rtol=1e-6)
jac_diff = approx_derivative(
self.fun_vector_vector, x0, bounds=(lb, ub))
assert_allclose(jac_diff, jac_true, rtol=1e-6)
jac_diff = approx_derivative(
self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub))
assert_allclose(jac_true, jac_diff, rtol=1e-6)
def test_bound_switches(self):
lb = -1e-8
ub = 1e-8
x0 = 0.0
jac_true = self.jac_with_nan(x0)
jac_diff_2 = approx_derivative(
self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
bounds=(lb, ub))
jac_diff_3 = approx_derivative(
self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
x0 = 1e-8
jac_true = self.jac_with_nan(x0)
jac_diff_2 = approx_derivative(
self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
bounds=(lb, ub))
jac_diff_3 = approx_derivative(
self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
def test_non_numpy(self):
x0 = 1.0
jac_true = self.jac_non_numpy(x0)
jac_diff_2 = approx_derivative(self.jac_non_numpy, x0,
method='2-point')
jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
assert_allclose(jac_diff_3, jac_true, rtol=1e-8)
# math.exp cannot handle complex arguments, hence this raises
assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0,
**dict(method='cs'))
def test_check_derivative(self):
x0 = np.array([-10.0, 10])
accuracy = check_derivative(self.fun_vector_vector,
self.jac_vector_vector, x0)
assert_(accuracy < 1e-9)
accuracy = check_derivative(self.fun_vector_vector,
self.jac_vector_vector, x0)
assert_(accuracy < 1e-6)
x0 = np.array([0.0, 0.0])
accuracy = check_derivative(self.fun_zero_jacobian,
self.jac_zero_jacobian, x0)
assert_(accuracy == 0)
accuracy = check_derivative(self.fun_zero_jacobian,
self.jac_zero_jacobian, x0)
assert_(accuracy == 0)
class TestApproxDerivativeSparse(object):
# Example from Numerical Optimization 2nd edition, p. 198.
def setup_method(self):
np.random.seed(0)
self.n = 50
self.lb = -0.1 * (1 + np.arange(self.n))
self.ub = 0.1 * (1 + np.arange(self.n))
self.x0 = np.empty(self.n)
self.x0[::2] = (1 - 1e-7) * self.lb[::2]
self.x0[1::2] = (1 - 1e-7) * self.ub[1::2]
self.J_true = self.jac(self.x0)
def fun(self, x):
e = x[1:]**3 - x[:-1]**2
return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0))
def jac(self, x):
n = x.size
J = np.zeros((n, n))
J[0, 0] = -4 * x[0]
J[0, 1] = 6 * x[1]**2
for i in range(1, n - 1):
J[i, i - 1] = -6 * x[i-1]
J[i, i] = 9 * x[i]**2 - 4 * x[i]
J[i, i + 1] = 6 * x[i+1]**2
J[-1, -1] = 9 * x[-1]**2
J[-1, -2] = -6 * x[-2]
return J
def structure(self, n):
A = np.zeros((n, n), dtype=int)
A[0, 0] = 1
A[0, 1] = 1
for i in range(1, n - 1):
A[i, i - 1: i + 2] = 1
A[-1, -1] = 1
A[-1, -2] = 1
return A
def test_all(self):
A = self.structure(self.n)
order = np.arange(self.n)
groups_1 = group_columns(A, order)
np.random.shuffle(order)
groups_2 = group_columns(A, order)
for method, groups, l, u in product(
['2-point', '3-point', 'cs'], [groups_1, groups_2],
[-np.inf, self.lb], [np.inf, self.ub]):
J = approx_derivative(self.fun, self.x0, method=method,
bounds=(l, u), sparsity=(A, groups))
assert_(isinstance(J, csr_matrix))
assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
rel_step = 1e-8 * np.ones_like(self.x0)
rel_step[::2] *= -1
J = approx_derivative(self.fun, self.x0, method=method,
rel_step=rel_step, sparsity=(A, groups))
assert_allclose(J.toarray(), self.J_true, rtol=1e-5)
def test_no_precomputed_groups(self):
A = self.structure(self.n)
J = approx_derivative(self.fun, self.x0, sparsity=A)
assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
def test_equivalence(self):
structure = np.ones((self.n, self.n), dtype=int)
groups = np.arange(self.n)
for method in ['2-point', '3-point', 'cs']:
J_dense = approx_derivative(self.fun, self.x0, method=method)
J_sparse = approx_derivative(
self.fun, self.x0, sparsity=(structure, groups), method=method)
assert_equal(J_dense, J_sparse.toarray())
def test_check_derivative(self):
def jac(x):
return csr_matrix(self.jac(x))
accuracy = check_derivative(self.fun, jac, self.x0,
bounds=(self.lb, self.ub))
assert_(accuracy < 1e-9)
accuracy = check_derivative(self.fun, jac, self.x0,
bounds=(self.lb, self.ub))
assert_(accuracy < 1e-9)
class TestApproxDerivativeLinearOperator(object):
def fun_scalar_scalar(self, x):
return np.sinh(x)
def jac_scalar_scalar(self, x):
return np.cosh(x)
def fun_scalar_vector(self, x):
return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
def jac_scalar_vector(self, x):
return np.array(
[2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
def fun_vector_scalar(self, x):
return np.sin(x[0] * x[1]) * np.log(x[0])
def jac_vector_scalar(self, x):
return np.array([
x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
np.sin(x[0] * x[1]) / x[0],
x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
])
def fun_vector_vector(self, x):
return np.array([
x[0] * np.sin(x[1]),
x[1] * np.cos(x[0]),
x[0] ** 3 * x[1] ** -0.5
])
def jac_vector_vector(self, x):
return np.array([
[np.sin(x[1]), x[0] * np.cos(x[1])],
[-x[1] * np.sin(x[0]), np.cos(x[0])],
[3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
])
def test_scalar_scalar(self):
x0 = 1.0
jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
method='2-point',
as_linear_operator=True)
jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
as_linear_operator=True)
jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
method='cs',
as_linear_operator=True)
jac_true = self.jac_scalar_scalar(x0)
np.random.seed(1)
for i in range(10):
p = np.random.uniform(-10, 10, size=(1,))
assert_allclose(jac_diff_2.dot(p), jac_true*p,
rtol=1e-5)
assert_allclose(jac_diff_3.dot(p), jac_true*p,
rtol=5e-6)
assert_allclose(jac_diff_4.dot(p), jac_true*p,
rtol=5e-6)
def test_scalar_vector(self):
x0 = 0.5
jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
method='2-point',
as_linear_operator=True)
jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0,
as_linear_operator=True)
jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
method='cs',
as_linear_operator=True)
jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
np.random.seed(1)
for i in range(10):
p = np.random.uniform(-10, 10, size=(1,))
assert_allclose(jac_diff_2.dot(p), jac_true.dot(p),
rtol=1e-5)
assert_allclose(jac_diff_3.dot(p), jac_true.dot(p),
rtol=5e-6)
assert_allclose(jac_diff_4.dot(p), jac_true.dot(p),
rtol=5e-6)
def test_vector_scalar(self):
x0 = np.array([100.0, -0.5])
jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
method='2-point',
as_linear_operator=True)
jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
as_linear_operator=True)
jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
method='cs',
as_linear_operator=True)
jac_true = self.jac_vector_scalar(x0)
np.random.seed(1)
for i in range(10):
p = np.random.uniform(-10, 10, size=x0.shape)
assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)),
rtol=1e-5)
assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)),
rtol=5e-6)
assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)),
rtol=1e-7)
def test_vector_vector(self):
x0 = np.array([-100.0, 0.2])
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
method='2-point',
as_linear_operator=True)
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
as_linear_operator=True)
jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
method='cs',
as_linear_operator=True)
jac_true = self.jac_vector_vector(x0)
np.random.seed(1)
for i in range(10):
p = np.random.uniform(-10, 10, size=x0.shape)
assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6)
assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7)
def test_exception(self):
x0 = np.array([-100.0, 0.2])
assert_raises(ValueError, approx_derivative,
self.fun_vector_vector, x0,
method='2-point', bounds=(1, np.inf))
| 22,992 | 37.385643 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_lsq_common.py
|
from __future__ import division, absolute_import, print_function
from numpy.testing import assert_, assert_allclose, assert_equal
from pytest import raises as assert_raises
import numpy as np
from scipy.sparse.linalg import LinearOperator
from scipy.optimize._lsq.common import (
step_size_to_bound, find_active_constraints, make_strictly_feasible,
CL_scaling_vector, intersect_trust_region, build_quadratic_1d,
minimize_quadratic_1d, evaluate_quadratic, reflective_transformation,
left_multiplied_operator, right_multiplied_operator)
class TestBounds(object):
def test_step_size_to_bounds(self):
lb = np.array([-1.0, 2.5, 10.0])
ub = np.array([1.0, 5.0, 100.0])
x = np.array([0.0, 2.5, 12.0])
s = np.array([0.1, 0.0, 0.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 10)
assert_equal(hits, [1, 0, 0])
s = np.array([0.01, 0.05, -1.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 2)
assert_equal(hits, [0, 0, -1])
s = np.array([10.0, -0.0001, 100.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, np.array(-0))
assert_equal(hits, [0, -1, 0])
s = np.array([1.0, 0.5, -2.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 1.0)
assert_equal(hits, [1, 0, -1])
s = np.zeros(3)
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, np.inf)
assert_equal(hits, [0, 0, 0])
def test_find_active_constraints(self):
lb = np.array([0.0, -10.0, 1.0])
ub = np.array([1.0, 0.0, 100.0])
x = np.array([0.5, -5.0, 2.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [0, 0, 0])
x = np.array([0.0, 0.0, 10.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 1, 0])
active = find_active_constraints(x, lb, ub, rtol=0)
assert_equal(active, [-1, 1, 0])
x = np.array([1e-9, -1e-8, 100 - 1e-9])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [0, 0, 1])
active = find_active_constraints(x, lb, ub, rtol=1.5e-9)
assert_equal(active, [-1, 0, 1])
lb = np.array([1.0, -np.inf, -np.inf])
ub = np.array([np.inf, 10.0, np.inf])
x = np.ones(3)
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 0, 0])
# Handles out-of-bound cases.
x = np.array([0.0, 11.0, 0.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 1, 0])
active = find_active_constraints(x, lb, ub, rtol=0)
assert_equal(active, [-1, 1, 0])
def test_make_strictly_feasible(self):
lb = np.array([-0.5, -0.8, 2.0])
ub = np.array([0.8, 1.0, 3.0])
x = np.array([-0.5, 0.0, 2 + 1e-10])
x_new = make_strictly_feasible(x, lb, ub, rstep=0)
assert_(x_new[0] > -0.5)
assert_equal(x_new[1:], x[1:])
x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4)
assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)])
x = np.array([-0.5, -1, 3.1])
x_new = make_strictly_feasible(x, lb, ub)
assert_(np.all((x_new >= lb) & (x_new <= ub)))
x_new = make_strictly_feasible(x, lb, ub, rstep=0)
assert_(np.all((x_new >= lb) & (x_new <= ub)))
lb = np.array([-1, 100.0])
ub = np.array([1, 100.0 + 1e-10])
x = np.array([0, 100.0])
x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8)
assert_equal(x_new, [0, 100.0 + 0.5e-10])
def test_scaling_vector(self):
lb = np.array([-np.inf, -5.0, 1.0, -np.inf])
ub = np.array([1.0, np.inf, 10.0, np.inf])
x = np.array([0.5, 2.0, 5.0, 0.0])
g = np.array([1.0, 0.1, -10.0, 0.0])
v, dv = CL_scaling_vector(x, g, lb, ub)
assert_equal(v, [1.0, 7.0, 5.0, 1.0])
assert_equal(dv, [0.0, 1.0, -1.0, 0.0])
class TestQuadraticFunction(object):
def setup_method(self):
self.J = np.array([
[0.1, 0.2],
[-1.0, 1.0],
[0.5, 0.2]])
self.g = np.array([0.8, -2.0])
self.diag = np.array([1.0, 2.0])
def test_build_quadratic_1d(self):
s = np.zeros(2)
a, b = build_quadratic_1d(self.J, self.g, s)
assert_equal(a, 0)
assert_equal(b, 0)
a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
assert_equal(a, 0)
assert_equal(b, 0)
s = np.array([1.0, -1.0])
a, b = build_quadratic_1d(self.J, self.g, s)
assert_equal(a, 2.05)
assert_equal(b, 2.8)
a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
assert_equal(a, 3.55)
assert_equal(b, 2.8)
s0 = np.array([0.5, 0.5])
a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0)
assert_equal(a, 3.55)
assert_allclose(b, 2.39)
assert_allclose(c, -0.1525)
def test_minimize_quadratic_1d(self):
a = 5
b = -1
t, y = minimize_quadratic_1d(a, b, 1, 2)
assert_equal(t, 1)
assert_equal(y, a * t**2 + b * t)
t, y = minimize_quadratic_1d(a, b, -2, -1)
assert_equal(t, -1)
assert_equal(y, a * t**2 + b * t)
t, y = minimize_quadratic_1d(a, b, -1, 1)
assert_equal(t, 0.1)
assert_equal(y, a * t**2 + b * t)
c = 10
t, y = minimize_quadratic_1d(a, b, -1, 1, c=c)
assert_equal(t, 0.1)
assert_equal(y, a * t**2 + b * t + c)
def test_evaluate_quadratic(self):
s = np.array([1.0, -1.0])
value = evaluate_quadratic(self.J, self.g, s)
assert_equal(value, 4.85)
value = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
assert_equal(value, 6.35)
s = np.array([[1.0, -1.0],
[1.0, 1.0],
[0.0, 0.0]])
values = evaluate_quadratic(self.J, self.g, s)
assert_allclose(values, [4.85, -0.91, 0.0])
values = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
assert_allclose(values, [6.35, 0.59, 0.0])
class TestTrustRegion(object):
def test_intersect(self):
Delta = 1.0
x = np.zeros(3)
s = np.array([1.0, 0.0, 0.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_equal(t_neg, -1)
assert_equal(t_pos, 1)
s = np.array([-1.0, 1.0, -1.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_allclose(t_neg, -3**-0.5)
assert_allclose(t_pos, 3**-0.5)
x = np.array([0.5, -0.5, 0])
s = np.array([0, 0, 1.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_allclose(t_neg, -2**-0.5)
assert_allclose(t_pos, 2**-0.5)
x = np.ones(3)
assert_raises(ValueError, intersect_trust_region, x, s, Delta)
x = np.zeros(3)
s = np.zeros(3)
assert_raises(ValueError, intersect_trust_region, x, s, Delta)
def test_reflective_transformation():
lb = np.array([-1, -2], dtype=float)
ub = np.array([5, 3], dtype=float)
y = np.array([0, 0])
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, y)
assert_equal(g, np.ones(2))
y = np.array([-4, 4], dtype=float)
x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf]))
assert_equal(x, [2, 4])
assert_equal(g, [-1, 1])
x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub)
assert_equal(x, [-4, 2])
assert_equal(g, [1, -1])
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, [2, 2])
assert_equal(g, [-1, -1])
lb = np.array([-np.inf, -2])
ub = np.array([5, np.inf])
y = np.array([10, 10], dtype=float)
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, [0, 10])
assert_equal(g, [-1, 1])
def test_linear_operators():
A = np.arange(6).reshape((3, 2))
d_left = np.array([-1, 2, 5])
DA = np.diag(d_left).dot(A)
J_left = left_multiplied_operator(A, d_left)
d_right = np.array([5, 10])
AD = A.dot(np.diag(d_right))
J_right = right_multiplied_operator(A, d_right)
x = np.array([-2, 3])
X = -2 * np.arange(2, 8).reshape((2, 3))
xt = np.array([0, -2, 15])
assert_allclose(DA.dot(x), J_left.matvec(x))
assert_allclose(DA.dot(X), J_left.matmat(X))
assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt))
assert_allclose(AD.dot(x), J_right.matvec(x))
assert_allclose(AD.dot(X), J_right.matmat(X))
assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt))
| 8,749 | 30.702899 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_regression.py
|
"""Regression tests for optimize.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_almost_equal
from pytest import raises as assert_raises
import scipy.optimize
class TestRegression(object):
def test_newton_x0_is_0(self):
# Regression test for gh-1601
tgt = 1
res = scipy.optimize.newton(lambda x: x - 1, 0)
assert_almost_equal(res, tgt)
def test_newton_integers(self):
# Regression test for gh-1741
root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2,
fprime=lambda x: 2*x)
assert_almost_equal(root, 1.0)
def test_lmdif_errmsg(self):
# This shouldn't cause a crash on Python 3
class SomeError(Exception):
pass
counter = [0]
def func(x):
counter[0] += 1
if counter[0] < 3:
return x**2 - np.array([9, 10, 11])
else:
raise SomeError()
assert_raises(SomeError,
scipy.optimize.leastsq,
func, [1, 2, 3])
| 1,151 | 25.790698 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_slsqp.py
|
"""
Unit test for SLSQP optimization.
"""
from __future__ import division, print_function, absolute_import
import pytest
from numpy.testing import (assert_, assert_array_almost_equal,
assert_allclose, assert_equal)
from pytest import raises as assert_raises
import numpy as np
from scipy.optimize import fmin_slsqp, minimize
class MyCallBack(object):
"""pass a custom callback function
This makes sure it's being used.
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
def __call__(self, x):
self.been_called = True
self.ncalls += 1
class TestSLSQP(object):
"""
Test SLSQP algorithm using Example 14.4 from Numerical Methods for
Engineers by Steven Chapra and Raymond Canale.
This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2,
which has a maximum at x=2, y=1.
"""
def setup_method(self):
self.opts = {'disp': False}
def fun(self, d, sign=1.0):
"""
Arguments:
d - A list of two elements, where d[0] represents x and d[1] represents y
in the following equation.
sign - A multiplier for f. Since we want to optimize it, and the scipy
optimizers can only minimize functions, we need to multiply it by
-1 to achieve the desired solution
Returns:
2*x*y + 2*x - x**2 - 2*y**2
"""
x = d[0]
y = d[1]
return sign*(2*x*y + 2*x - x**2 - 2*y**2)
def jac(self, d, sign=1.0):
"""
This is the derivative of fun, returning a numpy array
representing df/dx and df/dy.
"""
x = d[0]
y = d[1]
dfdx = sign*(-2*x + 2*y + 2)
dfdy = sign*(2*x - 4*y)
return np.array([dfdx, dfdy], float)
def fun_and_jac(self, d, sign=1.0):
return self.fun(d, sign), self.jac(d, sign)
def f_eqcon(self, x, sign=1.0):
""" Equality constraint """
return np.array([x[0] - x[1]])
def fprime_eqcon(self, x, sign=1.0):
""" Equality constraint, derivative """
return np.array([[1, -1]])
def f_eqcon_scalar(self, x, sign=1.0):
""" Scalar equality constraint """
return self.f_eqcon(x, sign)[0]
def fprime_eqcon_scalar(self, x, sign=1.0):
""" Scalar equality constraint, derivative """
return self.fprime_eqcon(x, sign)[0].tolist()
def f_ieqcon(self, x, sign=1.0):
""" Inequality constraint """
return np.array([x[0] - x[1] - 1.0])
def fprime_ieqcon(self, x, sign=1.0):
""" Inequality constraint, derivative """
return np.array([[1, -1]])
def f_ieqcon2(self, x):
""" Vector inequality constraint """
return np.asarray(x)
def fprime_ieqcon2(self, x):
""" Vector inequality constraint, derivative """
return np.identity(x.shape[0])
# minimize
def test_minimize_unbounded_approximated(self):
# Minimize, method='SLSQP': unbounded, approximated jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_unbounded_given(self):
# Minimize, method='SLSQP': unbounded, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
jac=self.jac, method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_bounded_approximated(self):
# Minimize, method='SLSQP': bounded, approximated jacobian.
with np.errstate(invalid='ignore'):
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
bounds=((2.5, None), (None, 0.5)),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2.5, 0.5])
assert_(2.5 <= res.x[0])
assert_(res.x[1] <= 0.5)
def test_minimize_unbounded_combined(self):
# Minimize, method='SLSQP': unbounded, combined function and jacobian.
res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ),
jac=True, method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_equality_approximated(self):
# Minimize with method='SLSQP': equality constraint, approx. jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, )},
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given(self):
# Minimize with method='SLSQP': equality constraint, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
method='SLSQP', args=(-1.0,),
constraints={'type': 'eq', 'fun':self.f_eqcon,
'args': (-1.0, )},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given2(self):
# Minimize with method='SLSQP': equality constraint, given jacobian
# for fun and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0,),
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, ),
'jac': self.fprime_eqcon},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given_cons_scalar(self):
# Minimize with method='SLSQP': scalar equality constraint, given
# jacobian for fun and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0,),
constraints={'type': 'eq',
'fun': self.f_eqcon_scalar,
'args': (-1.0, ),
'jac': self.fprime_eqcon_scalar},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_inequality_given(self):
# Minimize with method='SLSQP': inequality constraint, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0, ),
constraints={'type': 'ineq',
'fun': self.f_ieqcon,
'args': (-1.0, )},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1], atol=1e-3)
def test_minimize_inequality_given_vector_constraints(self):
# Minimize with method='SLSQP': vector inequality constraint, given
# jacobian.
res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
method='SLSQP', args=(-1.0,),
constraints={'type': 'ineq',
'fun': self.f_ieqcon2,
'jac': self.fprime_ieqcon2},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_bound_equality_given2(self):
# Minimize with method='SLSQP': bounds, eq. const., given jac. for
# fun. and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0, ),
bounds=[(-0.8, 1.), (-1, 0.8)],
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, ),
'jac': self.fprime_eqcon},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [0.8, 0.8], atol=1e-3)
assert_(-0.8 <= res.x[0] <= 1)
assert_(-1 <= res.x[1] <= 0.8)
# fmin_slsqp
def test_unbounded_approximated(self):
# SLSQP: unbounded, approximated jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1])
def test_unbounded_given(self):
# SLSQP: unbounded, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
fprime = self.jac, iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1])
def test_equality_approximated(self):
# SLSQP: equality constraint, approximated jacobian.
res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,),
eqcons = [self.f_eqcon],
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_equality_given(self):
# SLSQP: equality constraint, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0,),
eqcons = [self.f_eqcon], iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_equality_given2(self):
# SLSQP: equality constraint, given jacobian for fun and const.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0,),
f_eqcons = self.f_eqcon,
fprime_eqcons = self.fprime_eqcon,
iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_inequality_given(self):
# SLSQP: inequality constraint, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0, ),
ieqcons = [self.f_ieqcon],
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1], decimal=3)
def test_bound_equality_given2(self):
# SLSQP: bounds, eq. const., given jac. for fun. and const.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0, ),
bounds = [(-0.8, 1.), (-1, 0.8)],
f_eqcons = self.f_eqcon,
fprime_eqcons = self.fprime_eqcon,
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [0.8, 0.8], decimal=3)
assert_(-0.8 <= x[0] <= 1)
assert_(-1 <= x[1] <= 0.8)
def test_scalar_constraints(self):
# Regression test for gh-2182
x = fmin_slsqp(lambda z: z**2, [3.],
ieqcons=[lambda z: z[0] - 1],
iprint=0)
assert_array_almost_equal(x, [1.])
x = fmin_slsqp(lambda z: z**2, [3.],
f_ieqcons=lambda z: [z[0] - 1],
iprint=0)
assert_array_almost_equal(x, [1.])
def test_integer_bounds(self):
# This should not raise an exception
fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0)
def test_obj_must_return_scalar(self):
# Regression test for Github Issue #5433
# If objective function does not return a scalar, raises ValueError
with assert_raises(ValueError):
fmin_slsqp(lambda x: [0, 1], [1, 2, 3])
def test_obj_returns_scalar_in_list(self):
# Test for Github Issue #5433 and PR #6691
# Objective function should be able to return length-1 Python list
# containing the scalar
fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0)
def test_callback(self):
# Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback
callback = MyCallBack()
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
method='SLSQP', callback=callback, options=self.opts)
assert_(res['success'], res['message'])
assert_(callback.been_called)
assert_equal(callback.ncalls, res['nit'])
def test_inconsistent_linearization(self):
# SLSQP must be able to solve this problem, even if the
# linearized problem at the starting point is infeasible.
# Linearized constraints are
#
# 2*x0[0]*x[0] >= 1
#
# At x0 = [0, 1], the second constraint is clearly infeasible.
# This triggers a call with n2==1 in the LSQ subroutine.
x = [0, 1]
f1 = lambda x: x[0] + x[1] - 2
f2 = lambda x: x[0]**2 - 1
sol = minimize(
lambda x: x[0]**2 + x[1]**2,
x,
constraints=({'type':'eq','fun': f1},
{'type':'ineq','fun': f2}),
bounds=((0,None), (0,None)),
method='SLSQP')
x = sol.x
assert_allclose(f1(x), 0, atol=1e-8)
assert_(f2(x) >= -1e-8)
assert_(sol.success, sol)
@pytest.mark.xfail(reason="This bug is not fixed")
def test_regression_5743(self):
# SLSQP must not indicate success for this problem,
# which is infeasible.
x = [1, 2]
sol = minimize(
lambda x: x[0]**2 + x[1]**2,
x,
constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1},
{'type':'ineq','fun': lambda x: x[0]-2}),
bounds=((0,None), (0,None)),
method='SLSQP')
assert_(not sol.success, sol)
def test_gh_6676(self):
def func(x):
return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2
sol = minimize(func, [0, 0, 0], method='SLSQP')
assert_(sol.jac.shape == (3,))
def test_invalid_bounds(self):
# Raise correct error when lower bound is greater than upper bound.
# See Github issue 6875.
bounds_list = [
((1, 2), (2, 1)),
((2, 1), (1, 2)),
((2, 1), (2, 1)),
((np.inf, 0), (np.inf, 0)),
((1, -np.inf), (0, 1)),
]
for bounds in bounds_list:
with assert_raises(ValueError):
minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP')
def test_bounds_clipping(self):
#
# SLSQP returns bogus results for initial guess out of bounds, gh-6859
#
def f(x):
return (x[0] - 1)**2
sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)])
assert_(sol.success)
assert_allclose(sol.x, 0, atol=1e-10)
sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)])
assert_(sol.success)
assert_allclose(sol.x, 2, atol=1e-10)
sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)])
assert_(sol.success)
assert_allclose(sol.x, 0, atol=1e-10)
sol = minimize(f, [10], method='slsqp', bounds=[(2, None)])
assert_(sol.success)
assert_allclose(sol.x, 2, atol=1e-10)
sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)])
assert_(sol.success)
assert_allclose(sol.x, 0, atol=1e-10)
sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)])
assert_(sol.success)
assert_allclose(sol.x, 0, atol=1e-10)
def test_infeasible_initial(self):
# Check SLSQP behavior with infeasible initial point
def f(x):
x, = x
return x*x - 2*x + 1
cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}]
cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}]
cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x},
{'type': 'ineq', 'fun': lambda x: x + 1}]
sol = minimize(f, [10], method='slsqp', constraints=cons_u)
assert_(sol.success)
assert_allclose(sol.x, 0, atol=1e-10)
sol = minimize(f, [-10], method='slsqp', constraints=cons_l)
assert_(sol.success)
assert_allclose(sol.x, 2, atol=1e-10)
sol = minimize(f, [-10], method='slsqp', constraints=cons_u)
assert_(sol.success)
assert_allclose(sol.x, 0, atol=1e-10)
sol = minimize(f, [10], method='slsqp', constraints=cons_l)
assert_(sol.success)
assert_allclose(sol.x, 2, atol=1e-10)
sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul)
assert_(sol.success)
assert_allclose(sol.x, 0, atol=1e-10)
sol = minimize(f, [10], method='slsqp', constraints=cons_ul)
assert_(sol.success)
assert_allclose(sol.x, 0, atol=1e-10)
| 17,715 | 37.765864 | 88 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_trustregion_krylov.py
|
"""
Unit tests for Krylov space trust-region subproblem solver.
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize._trlib import (get_trlib_quadratic_subproblem)
from numpy.testing import (assert_, assert_array_equal,
assert_almost_equal,
assert_equal, assert_array_almost_equal,
assert_array_less)
KrylovQP = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6)
KrylovQP_disp = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6, disp=True)
class TestKrylovQuadraticSubproblem(object):
def test_for_the_easy_case(self):
# `H` is chosen such that `g` is not orthogonal to the
# eigenvector associated with the smallest eigenvalue.
H = np.array([[1.0, 0.0, 4.0],
[0.0, 2.0, 0.0],
[4.0, 0.0, 3.0]])
g = np.array([5.0, 0.0, 4.0])
# Trust Radius
trust_radius = 1.0
# Solve Subproblem
subprob = KrylovQP(x=0,
fun=lambda x: 0,
jac=lambda x: g,
hess=lambda x: None,
hessp=lambda x, y: H.dot(y))
p, hits_boundary = subprob.solve(trust_radius)
assert_array_almost_equal(p, np.array([-1.0, 0.0, 0.0]))
assert_equal(hits_boundary, True)
# check kkt satisfaction
assert_almost_equal(
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
0.0)
# check trust region constraint
assert_almost_equal(np.linalg.norm(p), trust_radius)
trust_radius = 0.5
p, hits_boundary = subprob.solve(trust_radius)
assert_array_almost_equal(p,
np.array([-0.46125446, 0., -0.19298788]))
assert_equal(hits_boundary, True)
# check kkt satisfaction
assert_almost_equal(
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
0.0)
# check trust region constraint
assert_almost_equal(np.linalg.norm(p), trust_radius)
def test_for_the_hard_case(self):
# `H` is chosen such that `g` is orthogonal to the
# eigenvector associated with the smallest eigenvalue.
H = np.array([[1.0, 0.0, 4.0],
[0.0, 2.0, 0.0],
[4.0, 0.0, 3.0]])
g = np.array([0.0, 2.0, 0.0])
# Trust Radius
trust_radius = 1.0
# Solve Subproblem
subprob = KrylovQP(x=0,
fun=lambda x: 0,
jac=lambda x: g,
hess=lambda x: None,
hessp=lambda x, y: H.dot(y))
p, hits_boundary = subprob.solve(trust_radius)
assert_array_almost_equal(p, np.array([0.0, -1.0, 0.0]))
# check kkt satisfaction
assert_almost_equal(
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
0.0)
# check trust region constraint
assert_almost_equal(np.linalg.norm(p), trust_radius)
trust_radius = 0.5
p, hits_boundary = subprob.solve(trust_radius)
assert_array_almost_equal(p, np.array([0.0, -0.5, 0.0]))
# check kkt satisfaction
assert_almost_equal(
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
0.0)
# check trust region constraint
assert_almost_equal(np.linalg.norm(p), trust_radius)
def test_for_interior_convergence(self):
H = np.array([[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988],
[0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588],
[0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867],
[-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166],
[0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]])
g = np.array([0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534])
trust_radius = 1.1
# Solve Subproblem
subprob = KrylovQP(x=0,
fun=lambda x: 0,
jac=lambda x: g,
hess=lambda x: None,
hessp=lambda x, y: H.dot(y))
p, hits_boundary = subprob.solve(trust_radius)
# check kkt satisfaction
assert_almost_equal(
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
0.0)
assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999,
-0.67005053, 0.31586769])
assert_array_almost_equal(hits_boundary, False)
def test_for_very_close_to_zero(self):
H = np.array([[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]])
g = np.array([0, 0, 0, 0, 1e-6])
trust_radius = 1.1
# Solve Subproblem
subprob = KrylovQP(x=0,
fun=lambda x: 0,
jac=lambda x: g,
hess=lambda x: None,
hessp=lambda x, y: H.dot(y))
p, hits_boundary = subprob.solve(trust_radius)
# check kkt satisfaction
assert_almost_equal(
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
0.0)
# check trust region constraint
assert_almost_equal(np.linalg.norm(p), trust_radius)
assert_array_almost_equal(p, [0.06910534, -0.01432721,
-0.65311947, -0.23815972,
-0.84954934])
assert_array_almost_equal(hits_boundary, True)
def test_disp(self, capsys):
H = -np.eye(5)
g = np.array([0, 0, 0, 0, 1e-6])
trust_radius = 1.1
subprob = KrylovQP_disp(x=0,
fun=lambda x: 0,
jac=lambda x: g,
hess=lambda x: None,
hessp=lambda x, y: H.dot(y))
p, hits_boundary = subprob.solve(trust_radius)
out, err = capsys.readouterr()
assert_(out.startswith(' TR Solving trust region problem'), repr(out))
| 6,727 | 37.666667 | 89 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_nnls.py
|
""" Unit tests for nonnegative least squares
Author: Uwe Schmitt
Sep 2008
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_
from pytest import raises as assert_raises
from scipy.optimize import nnls
from numpy import arange, dot
from numpy.linalg import norm
class TestNNLS(object):
def test_nnls(self):
a = arange(25.0).reshape(-1,5)
x = arange(5.0)
y = dot(a,x)
x, res = nnls(a,y)
assert_(res < 1e-7)
assert_(norm(dot(a,x)-y) < 1e-7)
def test_maxiter(self):
# test that maxiter argument does stop iterations
# NB: did not manage to find a test case where the default value
# of maxiter is not sufficient, so use a too-small value
rndm = np.random.RandomState(1234)
a = rndm.uniform(size=(100, 100))
b = rndm.uniform(size=100)
with assert_raises(RuntimeError):
nnls(a, b, maxiter=1)
| 988 | 25.72973 | 72 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test__spectral.py
|
from __future__ import division, absolute_import, print_function
import itertools
import numpy as np
from numpy import exp
from numpy.testing import assert_, assert_equal
from scipy.optimize import root
def test_performance():
# Compare performance results to those listed in
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
# and
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
# and those produced by dfsane.f from M. Raydan's website.
#
# Where the results disagree, the largest limits are taken.
e_a = 1e-5
e_r = 1e-4
table_1 = [
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12
]
# Check also scaling invariance
for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
['cruz', 'cheng']):
for problem in table_1:
n = problem['n']
func = lambda x, n: yscale*problem['F'](x/xscale, n)
args = (n,)
x0 = problem['x0'](n) * xscale
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
sigma_0 = xscale/yscale
with np.errstate(over='ignore'):
sol = root(func, x0, args=args,
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
sigma_0=sigma_0, sigma_eps=sigma_eps,
line_search=line_search),
method='DF-SANE')
err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
fatol, sol.success, sol.nit, sol.nfev])
assert_(sol.success, err_msg)
assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval
assert_(sol.nit <= problem['nit'], err_msg)
assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)
def test_complex():
def func(z):
return z**2 - 1 + 2j
x0 = 2.0j
ftol = 1e-4
sol = root(func, x0, tol=ftol, method='DF-SANE')
assert_(sol.success)
f0 = np.linalg.norm(func(x0))
fx = np.linalg.norm(func(sol.x))
assert_(fx <= ftol*f0)
def test_linear_definite():
# The DF-SANE paper proves convergence for "strongly isolated"
# solutions.
#
# For linear systems F(x) = A x - b = 0, with A positive or
# negative definite, the solution is strongly isolated.
def check_solvability(A, b, line_search='cruz'):
func = lambda x: A.dot(x) - b
xp = np.linalg.solve(A, b)
eps = np.linalg.norm(func(xp)) * 1e3
sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
method='DF-SANE')
assert_(sol.success)
assert_(np.linalg.norm(func(sol.x)) <= eps)
n = 90
# Test linear pos.def. system
np.random.seed(1234)
A = np.arange(n*n).reshape(n, n)
A = A + n*n * np.diag(1 + np.arange(n))
assert_(np.linalg.eigvals(A).min() > 0)
b = np.arange(n) * 1.0
check_solvability(A, b, 'cruz')
check_solvability(A, b, 'cheng')
# Test linear neg.def. system
check_solvability(-A, b, 'cruz')
check_solvability(-A, b, 'cheng')
def test_shape():
def f(x, arg):
return x - arg
for dt in [float, complex]:
x = np.zeros([2,2])
arg = np.ones([2,2], dtype=dt)
sol = root(f, x, args=(arg,), method='DF-SANE')
assert_(sol.success)
assert_equal(sol.x.shape, x.shape)
# Some of the test functions and initial guesses listed in
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
def F_1(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0] - 1) - 1
g[1:] = i*(exp(x[1:] - 1) - x[1:])
return g
def x0_1(n):
x0 = np.empty([n])
x0.fill(n/(n-1))
return x0
def F_2(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0]) - 1
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
return g
def x0_2(n):
x0 = np.empty([n])
x0.fill(1/n**2)
return x0
def F_4(x, n):
assert_equal(n % 3, 0)
g = np.zeros([n])
# Note: the first line is typoed in some of the references;
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
return g
def x0_4(n):
assert_equal(n % 3, 0)
x0 = np.array([-1, 1/2, -1] * (n//3))
return x0
def F_6(x, n):
c = 0.9
mu = (np.arange(1, n+1) - 0.5)/n
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
def x0_6(n):
return np.ones([n])
def F_7(x, n):
assert_equal(n % 3, 0)
def phi(t):
v = 0.5*t - 2
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
v[t >= 2] = (0.5*t + 2)[t >= 2]
return v
g = np.zeros([n])
g[::3] = 1e4 * x[1::3]**2 - 1
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
g[2::3] = phi(x[2::3])
return g
def x0_7(n):
assert_equal(n % 3, 0)
return np.array([1e-3, 18, 1] * (n//3))
def F_9(x, n):
g = np.zeros([n])
i = np.arange(2, n)
g[0] = x[0]**3/3 + x[1]**2/2
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
return g
def x0_9(n):
return np.ones([n])
def F_10(x, n):
return np.log(1 + x) - x/n
def x0_10(n):
return np.ones([n])
| 6,585 | 30.21327 | 120 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_nonlin.py
|
""" Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_
import pytest
from scipy._lib.six import xrange
from scipy.optimize import nonlin, root
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
from .test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F2_lucky(x):
return x
F2_lucky.xin = [0,0,0,0,0,0]
F2_lucky.KNOWN_BAD = {}
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@pytest.mark.xfail
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
self._check_func_fail(f, func)
continue
self._check_nonlin_func(f, func)
def test_tol_norm_called(self):
# Check that supplying tol_norm keyword to nonlin_solve works
self._tol_norm_used = False
def local_norm_func(x):
self._tol_norm_used = True
return np.absolute(x).max()
nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0,
tol_norm=local_norm_func)
assert_(self._tol_norm_used)
def test_problem_root(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
self._check_func_fail(f, meth)
continue
self._check_root(f, meth)
class TestSecant(object):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(object):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3)
class TestNonlinOldTests(object):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
| 15,068 | 32.561247 | 88 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_minpack.py
|
"""
Unit tests for optimization routines from minpack.py.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
from pytest import raises as assert_raises
import numpy as np
from numpy import array, float64, matrix
from scipy import optimize
from scipy.special import lambertw
from scipy.optimize.minpack import leastsq, curve_fit, fixed_point
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
from scipy.optimize import OptimizeWarning
class ReturnShape(object):
"""This class exists to create a callable that does not have a '__name__' attribute.
__init__ takes the argument 'shape', which should be a tuple of ints. When an instance
it called with a single argument 'x', it returns numpy.ones(shape).
"""
def __init__(self, shape):
self.shape = shape
def __call__(self, x):
return np.ones(self.shape)
def dummy_func(x, shape):
"""A function that returns an array of ones of the given shape.
`x` is ignored.
"""
return np.ones(shape)
# Function and jacobian for tests of solvers for systems of nonlinear
# equations
def pressure_network(flow_rates, Qtot, k):
"""Evaluate non-linear equation system representing
the pressures and flows in a system of n parallel pipes::
f_i = P_i - P_0, for i = 1..n
f_0 = sum(Q_i) - Qtot
Where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
Q is the flow rate.
Parameters
----------
flow_rates : float
A 1D array of n flow rates [kg/s].
k : float
A 1D array of n valve coefficients [1/kg m].
Qtot : float
A scalar, the total input flow rate [kg/s].
Returns
-------
F : float
A 1D array, F[i] == f_i.
"""
P = k * flow_rates**2
F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
return F
def pressure_network_jacobian(flow_rates, Qtot, k):
"""Return the jacobian of the equation system F(flow_rates)
computed by `pressure_network` with respect to
*flow_rates*. See `pressure_network` for the detailed
description of parrameters.
Returns
-------
jac : float
*n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
and *f_i* and *Q_i* are described in the doc for `pressure_network`
"""
n = len(flow_rates)
pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
jac = np.empty((n, n))
jac[:n-1, :n-1] = pdiff * 0
jac[:n-1, n-1] = 0
jac[n-1, :] = np.ones(n)
return jac
def pressure_network_fun_and_grad(flow_rates, Qtot, k):
return (pressure_network(flow_rates, Qtot, k),
pressure_network_jacobian(flow_rates, Qtot, k))
class TestFSolve(object):
def test_pressure_network_no_gradient(self):
# fsolve without gradient, equal pipes -> equal flows.
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows, info, ier, mesg = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
full_output=True)
assert_array_almost_equal(final_flows, np.ones(4))
assert_(ier == 1, mesg)
def test_pressure_network_with_gradient(self):
# fsolve with gradient, equal pipes -> equal flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
fprime=pressure_network_jacobian)
assert_array_almost_equal(final_flows, np.ones(4))
def test_wrong_shape_func_callable(self):
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
def test_wrong_shape_fprime_callable(self):
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_wrong_shape_fprime_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_float32(self):
func = lambda x: np.array([x[0] - 100, x[1] - 1000], dtype=np.float32)**2
p = optimize.fsolve(func, np.array([1, 1], np.float32))
assert_allclose(func(p), [0, 0], atol=1e-3)
class TestRootHybr(object):
def test_pressure_network_no_gradient(self):
# root/hybr without gradient, equal pipes -> equal flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='hybr', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient(self):
# root/hybr with gradient, equal pipes -> equal flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = matrix([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
args=(Qtot, k), method='hybr',
jac=pressure_network_jacobian).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient_combined(self):
# root/hybr with gradient and function combined, equal pipes -> equal
# flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network_fun_and_grad,
initial_guess, args=(Qtot, k),
method='hybr', jac=True).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestRootLM(object):
def test_pressure_network_no_gradient(self):
# root/lm without gradient, equal pipes -> equal flows
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='lm', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestLeastSq(object):
def setup_method(self):
x = np.linspace(0, 10, 40)
a,b,c = 3.1, 42, -304.2
self.x = x
self.abc = a,b,c
y_true = a*x**2 + b*x + c
np.random.seed(0)
self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
def residuals(self, p, y, x):
a,b,c = p
err = y-(a*x**2 + b*x + c)
return err
def test_basic(self):
p0 = array([0,0,0])
params_fit, ier = leastsq(self.residuals, p0,
args=(self.y_meas, self.x))
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
# low precision due to random
assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_full_output(self):
p0 = matrix([0,0,0])
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
def test_input_untouched(self):
p0 = array([0,0,0],dtype=float64)
p0_copy = array(p0, copy=True)
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
assert_array_equal(p0, p0_copy)
def test_wrong_shape_func_callable(self):
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.leastsq, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
def test_wrong_shape_Dfun_callable(self):
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
def test_wrong_shape_Dfun_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
def test_float32(self):
# Regression test for gh-1447
def func(p,x,y):
q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
return q - y
x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
1.231], dtype=np.float32)
y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
0.034,0.0396], dtype=np.float32)
p0 = np.array([1.0,1.0,1.0,1.0])
p1, success = optimize.leastsq(func, p0, args=(x,y))
assert_(success in [1,2,3,4])
assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
class TestCurveFit(object):
def setup_method(self):
self.y = array([1.0, 3.2, 9.5, 13.7])
self.x = array([1.0, 2.0, 3.0, 4.0])
def test_one_argument(self):
def func(x,a):
return x**a
popt, pcov = curve_fit(func, self.x, self.y)
assert_(len(popt) == 1)
assert_(pcov.shape == (1,1))
assert_almost_equal(popt[0], 1.9149, decimal=4)
assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
# Test if we get the same with full_output. Regression test for #1415.
res = curve_fit(func, self.x, self.y, full_output=1)
(popt2, pcov2, infodict, errmsg, ier) = res
assert_array_almost_equal(popt, popt2)
def test_two_argument(self):
def func(x, a, b):
return b*x**a
popt, pcov = curve_fit(func, self.x, self.y)
assert_(len(popt) == 2)
assert_(pcov.shape == (2,2))
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
decimal=4)
def test_func_is_classmethod(self):
class test_self(object):
"""This class tests if curve_fit passes the correct number of
arguments when the model function is a class instance method.
"""
def func(self, x, a, b):
return b * x**a
test_self_inst = test_self()
popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
assert_(pcov.shape == (2,2))
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
decimal=4)
def test_regression_2639(self):
# This test fails if epsfcn in leastsq is too large.
x = [574.14200000000005, 574.154, 574.16499999999996,
574.17700000000002, 574.18799999999999, 574.19899999999996,
574.21100000000001, 574.22199999999998, 574.23400000000004,
574.245]
y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
1550.0, 949.0, 841.0]
guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
0.0035019999999983615, 859.0]
good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
1.0068462e-02, 8.57450661e+02]
def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
+ A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
assert_allclose(popt, good, rtol=1e-5)
def test_pcov(self):
xdata = np.array([0, 1, 2, 3, 4, 5])
ydata = np.array([1, 1, 5, 7, 8, 12])
sigma = np.array([1, 2, 1, 2, 1, 2])
def f(x, a, b):
return a*x + b
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
method=method)
perr_scaled = np.sqrt(np.diag(pcov))
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
method=method)
perr_scaled = np.sqrt(np.diag(pcov))
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
absolute_sigma=True, method=method)
perr = np.sqrt(np.diag(pcov))
assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
absolute_sigma=True, method=method)
perr = np.sqrt(np.diag(pcov))
assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3)
# infinite variances
def f_flat(x, a, b):
return a*x
pcov_expected = np.array([np.inf]*4).reshape(2, 2)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning,
"Covariance of the parameters could not be estimated")
popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma)
popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
assert_(pcov.shape == (2, 2))
assert_array_equal(pcov, pcov_expected)
assert_(pcov1.shape == (2, 2))
assert_array_equal(pcov1, pcov_expected)
def test_array_like(self):
# Test sequence input. Regression test for gh-3037.
def f_linear(x, a, b):
return a*x + b
x = [1, 2, 3, 4]
y = [3, 5, 7, 9]
assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
def test_indeterminate_covariance(self):
# Test that a warning is returned when pcov is indeterminate
xdata = np.array([1, 2, 3, 4, 5, 6])
ydata = np.array([1, 2, 3, 4, 5.5, 6])
_assert_warns(OptimizeWarning, curve_fit,
lambda x, a, b: a*x, xdata, ydata)
def test_NaN_handling(self):
# Test for correct handling of NaNs in input data: gh-3422
# create input with NaNs
xdata = np.array([1, np.nan, 3])
ydata = np.array([1, 2, 3])
assert_raises(ValueError, curve_fit,
lambda x, a, b: a*x + b, xdata, ydata)
assert_raises(ValueError, curve_fit,
lambda x, a, b: a*x + b, ydata, xdata)
assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b,
xdata, ydata, **{"check_finite": True})
def test_method_argument(self):
def f(x, a, b):
return a * np.exp(-b*x)
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
for method in ['trf', 'dogbox', 'lm', None]:
popt, pcov = curve_fit(f, xdata, ydata, method=method)
assert_allclose(popt, [2., 2.])
assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
def test_bounds(self):
def f(x, a, b):
return a * np.exp(-b*x)
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
# The minimum w/out bounds is at [2., 2.],
# and with bounds it's at [1.5, smth].
bounds = ([1., 0], [1.5, 3.])
for method in [None, 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
method=method)
assert_allclose(popt[0], 1.5)
# With bounds, the starting estimate is feasible.
popt, pcov = curve_fit(f, xdata, ydata, method='trf',
bounds=([0., 0], [0.6, np.inf]))
assert_allclose(popt[0], 0.6)
# method='lm' doesn't support bounds.
assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
method='lm')
def test_bounds_p0(self):
# This test is for issue #5719. The problem was that an initial guess
# was ignored when 'trf' or 'dogbox' methods were invoked.
def f(x, a):
return np.sin(x + a)
xdata = np.linspace(-2*np.pi, 2*np.pi, 40)
ydata = np.sin(xdata)
bounds = (-3 * np.pi, 3 * np.pi)
for method in ['trf', 'dogbox']:
popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi)
popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi,
bounds=bounds, method=method)
# If the initial guess is ignored, then popt_2 would be close 0.
assert_allclose(popt_1, popt_2)
def test_jac(self):
# Test that Jacobian callable is handled correctly and
# weighted if sigma is provided.
def f(x, a, b):
return a * np.exp(-b*x)
def jac(x, a, b):
e = np.exp(-b*x)
return np.vstack((e, -a * x * e)).T
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
# Test numerical options for least_squares backend.
for method in ['trf', 'dogbox']:
for scheme in ['2-point', '3-point', 'cs']:
popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
method=method)
assert_allclose(popt, [2, 2])
# Test the analytic option.
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
assert_allclose(popt, [2, 2])
# Now add an outlier and provide sigma.
ydata[5] = 100
sigma = np.ones(xdata.shape[0])
sigma[5] = 200
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
jac=jac)
# Still the optimization process is influenced somehow,
# have to set rtol=1e-3.
assert_allclose(popt, [2, 2], rtol=1e-3)
def test_maxfev_and_bounds(self):
# gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
# but with bounds, the parameter is `max_nfev` (via least_squares)
x = np.arange(0, 10)
y = 2*x
popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100)
popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100)
assert_allclose(popt1, 2, atol=1e-14)
assert_allclose(popt2, 2, atol=1e-14)
def test_curvefit_simplecovariance(self):
def func(x, a, b):
return a * np.exp(-b*x)
def jac(x, a, b):
e = np.exp(-b*x)
return np.vstack((e, -a * x * e)).T
np.random.seed(0)
xdata = np.linspace(0, 4, 50)
y = func(xdata, 2.5, 1.3)
ydata = y + 0.2 * np.random.normal(size=len(xdata))
sigma = np.zeros(len(xdata)) + 0.2
covar = np.diag(sigma**2)
for jac1, jac2 in [(jac, jac), (None, None)]:
for absolute_sigma in [False, True]:
popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
jac=jac1, absolute_sigma=absolute_sigma)
popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar,
jac=jac2, absolute_sigma=absolute_sigma)
assert_allclose(popt1, popt2, atol=1e-14)
assert_allclose(pcov1, pcov2, atol=1e-14)
def test_curvefit_covariance(self):
def funcp(x, a, b):
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
return rotn.dot(a * np.exp(-b*x))
def jacp(x, a, b):
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
e = np.exp(-b*x)
return rotn.dot(np.vstack((e, -a * x * e)).T)
def func(x, a, b):
return a * np.exp(-b*x)
def jac(x, a, b):
e = np.exp(-b*x)
return np.vstack((e, -a * x * e)).T
np.random.seed(0)
xdata = np.arange(1, 4)
y = func(xdata, 2.5, 1.0)
ydata = y + 0.2 * np.random.normal(size=len(xdata))
sigma = np.zeros(len(xdata)) + 0.2
covar = np.diag(sigma**2)
# Get a rotation matrix, and obtain ydatap = R ydata
# Chisq = ydata^T C^{-1} ydata
# = ydata^T R^T R C^{-1} R^T R ydata
# = ydatap^T Cp^{-1} ydatap
# Cp^{-1} = R C^{-1} R^T
# Cp = R C R^T, since R^-1 = R^T
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
ydatap = rotn.dot(ydata)
covarp = rotn.dot(covar).dot(rotn.T)
for jac1, jac2 in [(jac, jacp), (None, None)]:
for absolute_sigma in [False, True]:
popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
jac=jac1, absolute_sigma=absolute_sigma)
popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp,
jac=jac2, absolute_sigma=absolute_sigma)
assert_allclose(popt1, popt2, atol=1e-14)
assert_allclose(pcov1, pcov2, atol=1e-14)
class TestFixedPoint(object):
def test_scalar_trivial(self):
# f(x) = 2x; fixed point should be x=0
def func(x):
return 2.0*x
x0 = 1.0
x = fixed_point(func, x0)
assert_almost_equal(x, 0.0)
def test_scalar_basic1(self):
# f(x) = x**2; x0=1.05; fixed point should be x=1
def func(x):
return x**2
x0 = 1.05
x = fixed_point(func, x0)
assert_almost_equal(x, 1.0)
def test_scalar_basic2(self):
# f(x) = x**0.5; x0=1.05; fixed point should be x=1
def func(x):
return x**0.5
x0 = 1.05
x = fixed_point(func, x0)
assert_almost_equal(x, 1.0)
def test_array_trivial(self):
def func(x):
return 2.0*x
x0 = [0.3, 0.15]
olderr = np.seterr(all='ignore')
try:
x = fixed_point(func, x0)
finally:
np.seterr(**olderr)
assert_almost_equal(x, [0.0, 0.0])
def test_array_basic1(self):
# f(x) = c * x**2; fixed point should be x=1/c
def func(x, c):
return c * x**2
c = array([0.75, 1.0, 1.25])
x0 = [1.1, 1.15, 0.9]
olderr = np.seterr(all='ignore')
try:
x = fixed_point(func, x0, args=(c,))
finally:
np.seterr(**olderr)
assert_almost_equal(x, 1.0/c)
def test_array_basic2(self):
# f(x) = c * x**0.5; fixed point should be x=c**2
def func(x, c):
return c * x**0.5
c = array([0.75, 1.0, 1.25])
x0 = [0.8, 1.1, 1.1]
x = fixed_point(func, x0, args=(c,))
assert_almost_equal(x, c**2)
def test_lambertw(self):
# python-list/2010-December/594592.html
xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0,
args=(), xtol=1e-12, maxiter=500)
assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0)
assert_allclose(xxroot, lambertw(1)/2)
def test_no_acceleration(self):
# github issue 5460
ks = 2
kl = 6
m = 1.3
n0 = 1.001
i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1))
def func(n):
return np.log(kl/ks/n) / np.log((i0*n/(n - 1))) + 1
n = fixed_point(func, n0, method='iteration')
assert_allclose(n, m)
| 24,862 | 36.053651 | 113 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from copy import deepcopy
from numpy.linalg import norm
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
from scipy.optimize import (BFGS,
SR1,
HessianUpdateStrategy,
minimize)
class Rosenbrock:
"""Rosenbrock function.
The following optimization problem:
minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
"""
def __init__(self, n=2, random_state=0):
rng = np.random.RandomState(random_state)
self.x0 = rng.uniform(-1, 1, n)
self.x_opt = np.ones(n)
def fun(self, x):
x = np.asarray(x)
r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def grad(self, x):
x = np.asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = np.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def hess(self, x):
x = np.atleast_1d(x)
H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
diagonal = np.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + np.diag(diagonal)
return H
class TestHessianUpdateStrategy(TestCase):
def test_hessian_initialization(self):
quasi_newton = (BFGS(), SR1())
for qn in quasi_newton:
qn.initialize(5, 'hess')
B = qn.get_matrix()
assert_array_equal(B, np.eye(5))
# For this list of points it is known
# that no exception occur during the
# Hessian update. Hence no update is
# skiped or damped.
def test_rosenbrock_with_no_exception(self):
# Define auxiliar problem
prob = Rosenbrock(n=5)
# Define iteration points
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
[0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
[0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
[0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
[0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
[0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
[0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
[0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
[0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
[0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
[0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
[0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
[0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338],
[0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691],
[0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041],
[0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744],
[0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623],
[0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448],
[0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437],
[0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581],
[0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553],
[0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149],
[0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663],
[0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288],
[0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356],
[1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912],
[0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305],
[1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047],
[1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297],
[0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032],
[0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786],
[0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]]
# Get iteration points
grad_list = [prob.grad(x) for x in x_list]
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
for i in range(len(x_list)-1)]
delta_grad = [grad_list[i+1]-grad_list[i]
for i in range(len(grad_list)-1)]
# Check curvature condition
for i in range(len(delta_x)):
s = delta_x[i]
y = delta_grad[i]
if np.dot(s, y) <= 0:
raise ArithmeticError()
# Define QuasiNewton update
for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4),
SR1(init_scale=1)):
hess = deepcopy(quasi_newton)
inv_hess = deepcopy(quasi_newton)
hess.initialize(len(x_list[0]), 'hess')
inv_hess.initialize(len(x_list[0]), 'inv_hess')
# Compare the hessian and its inverse
for i in range(len(delta_x)):
s = delta_x[i]
y = delta_grad[i]
hess.update(s, y)
inv_hess.update(s, y)
B = hess.get_matrix()
H = inv_hess.get_matrix()
assert_array_almost_equal(np.linalg.inv(B), H, decimal=10)
B_true = prob.hess(x_list[i+1])
assert_array_less(norm(B - B_true)/norm(B_true), 0.1)
def test_SR1_skip_update(self):
# Define auxiliar problem
prob = Rosenbrock(n=5)
# Define iteration points
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
[0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
[0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
[0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
[0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
[0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
[0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
[0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
[0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
[0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
[0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
[0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
[0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]]
# Get iteration points
grad_list = [prob.grad(x) for x in x_list]
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
for i in range(len(x_list)-1)]
delta_grad = [grad_list[i+1]-grad_list[i]
for i in range(len(grad_list)-1)]
hess = SR1(init_scale=1, min_denominator=1e-2)
hess.initialize(len(x_list[0]), 'hess')
# Compare the hessian and its inverse
for i in range(len(delta_x)-1):
s = delta_x[i]
y = delta_grad[i]
hess.update(s, y)
# Test skip update
B = np.copy(hess.get_matrix())
s = delta_x[17]
y = delta_grad[17]
hess.update(s, y)
B_updated = np.copy(hess.get_matrix())
assert_array_equal(B, B_updated)
def test_BFGS_skip_update(self):
# Define auxiliar problem
prob = Rosenbrock(n=5)
# Define iteration points
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]]
# Get iteration points
grad_list = [prob.grad(x) for x in x_list]
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
for i in range(len(x_list)-1)]
delta_grad = [grad_list[i+1]-grad_list[i]
for i in range(len(grad_list)-1)]
hess = BFGS(init_scale=1, min_curvature=10)
hess.initialize(len(x_list[0]), 'hess')
# Compare the hessian and its inverse
for i in range(len(delta_x)-1):
s = delta_x[i]
y = delta_grad[i]
hess.update(s, y)
# Test skip update
B = np.copy(hess.get_matrix())
s = delta_x[5]
y = delta_grad[5]
hess.update(s, y)
B_updated = np.copy(hess.get_matrix())
assert_array_equal(B, B_updated)
| 10,562 | 47.013636 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_trustregion.py
|
"""
Unit tests for trust-region optimization routines.
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess,
rosen_hess_prod)
from numpy.testing import assert_, assert_equal, assert_allclose
class Accumulator:
""" This is for testing callbacks."""
def __init__(self):
self.count = 0
self.accum = None
def __call__(self, x):
self.count += 1
if self.accum is None:
self.accum = np.array(x)
else:
self.accum += x
class TestTrustRegionSolvers(object):
def setup_method(self):
self.x_opt = [1.0, 1.0]
self.easy_guess = [2.0, 2.0]
self.hard_guess = [-1.2, 1.0]
def test_dogleg_accuracy(self):
# test the accuracy and the return_all option
x0 = self.hard_guess
r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8,
method='dogleg', options={'return_all': True},)
assert_allclose(x0, r['allvecs'][0])
assert_allclose(r['x'], r['allvecs'][-1])
assert_allclose(r['x'], self.x_opt)
def test_dogleg_callback(self):
# test the callback mechanism and the maxiter and return_all options
accumulator = Accumulator()
maxiter = 5
r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess,
callback=accumulator, method='dogleg',
options={'return_all': True, 'maxiter': maxiter},)
assert_equal(accumulator.count, maxiter)
assert_equal(len(r['allvecs']), maxiter+1)
assert_allclose(r['x'], r['allvecs'][-1])
assert_allclose(sum(r['allvecs'][1:]), accumulator.accum)
def test_solver_concordance(self):
# Assert that dogleg uses fewer iterations than ncg on the Rosenbrock
# test function, although this does not necessarily mean
# that dogleg is faster or better than ncg even for this function
# and especially not for other test functions.
f = rosen
g = rosen_der
h = rosen_hess
for x0 in (self.easy_guess, self.hard_guess):
r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
method='dogleg', options={'return_all': True})
r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
method='trust-ncg',
options={'return_all': True})
r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8,
method='trust-krylov',
options={'return_all': True})
r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
method='newton-cg', options={'return_all': True})
r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8,
method='trust-exact',
options={'return_all': True})
assert_allclose(self.x_opt, r_dogleg['x'])
assert_allclose(self.x_opt, r_trust_ncg['x'])
assert_allclose(self.x_opt, r_trust_krylov['x'])
assert_allclose(self.x_opt, r_ncg['x'])
assert_allclose(self.x_opt, r_iterative['x'])
assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs']))
def test_trust_ncg_hessp(self):
for x0 in (self.easy_guess, self.hard_guess, self.x_opt):
r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod,
tol=1e-8, method='trust-ncg')
assert_allclose(self.x_opt, r['x'])
def test_trust_ncg_start_in_optimum(self):
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
tol=1e-8, method='trust-ncg')
assert_allclose(self.x_opt, r['x'])
def test_trust_krylov_start_in_optimum(self):
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
tol=1e-8, method='trust-krylov')
assert_allclose(self.x_opt, r['x'])
def test_trust_exact_start_in_optimum(self):
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
tol=1e-8, method='trust-exact')
assert_allclose(self.x_opt, r['x'])
| 4,436 | 40.46729 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test__linprog_ip_clean_inputs.py
|
"""
Unit test for Linear Programming via Simplex Algorithm.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_allclose
from pytest import raises as assert_raises
from scipy.optimize._linprog_ip import _clean_inputs
from copy import deepcopy
def test_aliasing():
c = 1
A_ub = [[1]]
b_ub = [1]
A_eq = [[1]]
b_eq = [1]
bounds = (-np.inf, np.inf)
c_copy = deepcopy(c)
A_ub_copy = deepcopy(A_ub)
b_ub_copy = deepcopy(b_ub)
A_eq_copy = deepcopy(A_eq)
b_eq_copy = deepcopy(b_eq)
bounds_copy = deepcopy(bounds)
_clean_inputs(c, A_ub, b_ub, A_eq, b_eq, bounds)
assert_(c == c_copy, "c modified by _clean_inputs")
assert_(A_ub == A_ub_copy, "A_ub modified by _clean_inputs")
assert_(b_ub == b_ub_copy, "b_ub modified by _clean_inputs")
assert_(A_eq == A_eq_copy, "A_eq modified by _clean_inputs")
assert_(b_eq == b_eq_copy, "b_eq modified by _clean_inputs")
assert_(bounds == bounds_copy, "bounds modified by _clean_inputs")
def test_aliasing2():
c = np.array([1, 1])
A_ub = np.array([[1, 1], [2, 2]])
b_ub = np.array([[1], [1]])
A_eq = np.array([[1, 1]])
b_eq = np.array([1])
bounds = [(-np.inf, np.inf), (None, 1)]
c_copy = c.copy()
A_ub_copy = A_ub.copy()
b_ub_copy = b_ub.copy()
A_eq_copy = A_eq.copy()
b_eq_copy = b_eq.copy()
bounds_copy = deepcopy(bounds)
_clean_inputs(c, A_ub, b_ub, A_eq, b_eq, bounds)
assert_allclose(c, c_copy, err_msg="c modified by _clean_inputs")
assert_allclose(A_ub, A_ub_copy, err_msg="A_ub modified by _clean_inputs")
assert_allclose(b_ub, b_ub_copy, err_msg="b_ub modified by _clean_inputs")
assert_allclose(A_eq, A_eq_copy, err_msg="A_eq modified by _clean_inputs")
assert_allclose(b_eq, b_eq_copy, err_msg="b_eq modified by _clean_inputs")
assert_(bounds == bounds_copy, "bounds modified by _clean_inputs")
def test_missing_inputs():
c = [1, 2]
A_ub = np.array([[1, 1], [2, 2]])
b_ub = np.array([1, 1])
A_eq = np.array([[1, 1], [2, 2]])
b_eq = np.array([1, 1])
assert_raises(TypeError, _clean_inputs)
assert_raises(TypeError, _clean_inputs, c=None)
assert_raises(ValueError, _clean_inputs, c=c, A_ub=A_ub)
assert_raises(ValueError, _clean_inputs, c=c, A_ub=A_ub, b_ub=None)
assert_raises(ValueError, _clean_inputs, c=c, b_ub=b_ub)
assert_raises(ValueError, _clean_inputs, c=c, A_ub=None, b_ub=b_ub)
assert_raises(ValueError, _clean_inputs, c=c, A_eq=A_eq)
assert_raises(ValueError, _clean_inputs, c=c, A_eq=A_eq, b_eq=None)
assert_raises(ValueError, _clean_inputs, c=c, b_eq=b_eq)
assert_raises(ValueError, _clean_inputs, c=c, A_eq=None, b_eq=b_eq)
def test_too_many_dimensions():
cb = [1, 2, 3, 4]
A = np.random.rand(4, 4)
bad2D = [[1, 2], [3, 4]]
bad3D = np.random.rand(4, 4, 4)
assert_raises(ValueError, _clean_inputs, c=bad2D, A_ub=A, b_ub=cb)
assert_raises(ValueError, _clean_inputs, c=cb, A_ub=bad3D, b_ub=cb)
assert_raises(ValueError, _clean_inputs, c=cb, A_ub=A, b_ub=bad2D)
assert_raises(ValueError, _clean_inputs, c=cb, A_eq=bad3D, b_eq=cb)
assert_raises(ValueError, _clean_inputs, c=cb, A_eq=A, b_eq=bad2D)
def test_too_few_dimensions():
bad = np.random.rand(4, 4).ravel()
cb = np.random.rand(4)
assert_raises(ValueError, _clean_inputs, c=cb, A_ub=bad, b_ub=cb)
assert_raises(ValueError, _clean_inputs, c=cb, A_eq=bad, b_eq=cb)
def test_inconsistent_dimensions():
m = 2
n = 4
c = [1, 2, 3, 4]
Agood = np.random.rand(m, n)
Abad = np.random.rand(m, n + 1)
bgood = np.random.rand(m)
bbad = np.random.rand(m + 1)
boundsbad = [(0, 1)] * (n + 1)
assert_raises(ValueError, _clean_inputs, c=c, A_ub=Abad, b_ub=bgood)
assert_raises(ValueError, _clean_inputs, c=c, A_ub=Agood, b_ub=bbad)
assert_raises(ValueError, _clean_inputs, c=c, A_eq=Abad, b_eq=bgood)
assert_raises(ValueError, _clean_inputs, c=c, A_eq=Agood, b_eq=bbad)
assert_raises(ValueError, _clean_inputs, c=c, bounds=boundsbad)
def test_type_errors():
bad = "hello"
c = [1, 2]
A_ub = np.array([[1, 1], [2, 2]])
b_ub = np.array([1, 1])
A_eq = np.array([[1, 1], [2, 2]])
b_eq = np.array([1, 1])
bounds = [(0, 1)]
assert_raises(
TypeError,
_clean_inputs,
c=bad,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=bounds)
assert_raises(
TypeError,
_clean_inputs,
c=c,
A_ub=bad,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=bounds)
assert_raises(
TypeError,
_clean_inputs,
c=c,
A_ub=A_ub,
b_ub=bad,
A_eq=A_eq,
b_eq=b_eq,
bounds=bounds)
assert_raises(
TypeError,
_clean_inputs,
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=bad,
b_eq=b_eq,
bounds=bounds)
assert_raises(
TypeError,
_clean_inputs,
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=bad)
assert_raises(
TypeError,
_clean_inputs,
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds="hi")
assert_raises(
TypeError,
_clean_inputs,
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=["hi"])
assert_raises(
TypeError,
_clean_inputs,
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=[
("hi")])
assert_raises(TypeError, _clean_inputs, c=c, A_ub=A_ub,
b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=[(1, "")])
assert_raises(TypeError, _clean_inputs, c=c, A_ub=A_ub,
b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=[(1, 2), (1, "")])
def test_non_finite_errors():
c = [1, 2]
A_ub = np.array([[1, 1], [2, 2]])
b_ub = np.array([1, 1])
A_eq = np.array([[1, 1], [2, 2]])
b_eq = np.array([1, 1])
bounds = [(0, 1)]
assert_raises(
ValueError, _clean_inputs, c=[0, None], A_ub=A_ub, b_ub=b_ub,
A_eq=A_eq, b_eq=b_eq, bounds=bounds)
assert_raises(
ValueError, _clean_inputs, c=[np.inf, 0], A_ub=A_ub, b_ub=b_ub,
A_eq=A_eq, b_eq=b_eq, bounds=bounds)
assert_raises(
ValueError, _clean_inputs, c=[0, -np.inf], A_ub=A_ub, b_ub=b_ub,
A_eq=A_eq, b_eq=b_eq, bounds=bounds)
assert_raises(
ValueError, _clean_inputs, c=[np.nan, 0], A_ub=A_ub, b_ub=b_ub,
A_eq=A_eq, b_eq=b_eq, bounds=bounds)
assert_raises(ValueError, _clean_inputs, c=c, A_ub=[[1, 2], [None, 1]],
b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds)
assert_raises(
ValueError,
_clean_inputs,
c=c,
A_ub=A_ub,
b_ub=[
np.inf,
1],
A_eq=A_eq,
b_eq=b_eq,
bounds=bounds)
assert_raises(ValueError, _clean_inputs, c=c, A_ub=A_ub, b_ub=b_ub, A_eq=[
[1, 2], [1, -np.inf]], b_eq=b_eq, bounds=bounds)
assert_raises(
ValueError,
_clean_inputs,
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=[
1,
np.nan],
bounds=bounds)
def test__clean_inputs1():
c = [1, 2]
A_ub = [[1, 1], [2, 2]]
b_ub = [1, 1]
A_eq = [[1, 1], [2, 2]]
b_eq = [1, 1]
bounds = None
outputs = _clean_inputs(
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=bounds)
assert_allclose(outputs[0], np.array(c))
assert_allclose(outputs[1], np.array(A_ub))
assert_allclose(outputs[2], np.array(b_ub))
assert_allclose(outputs[3], np.array(A_eq))
assert_allclose(outputs[4], np.array(b_eq))
assert_(outputs[5] == [(0, None)] * 2, "")
assert_(outputs[0].shape == (2,), "")
assert_(outputs[1].shape == (2, 2), "")
assert_(outputs[2].shape == (2,), "")
assert_(outputs[3].shape == (2, 2), "")
assert_(outputs[4].shape == (2,), "")
def test__clean_inputs2():
c = 1
A_ub = [[1]]
b_ub = 1
A_eq = [[1]]
b_eq = 1
bounds = (0, 1)
outputs = _clean_inputs(
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=bounds)
assert_allclose(outputs[0], np.array(c))
assert_allclose(outputs[1], np.array(A_ub))
assert_allclose(outputs[2], np.array(b_ub))
assert_allclose(outputs[3], np.array(A_eq))
assert_allclose(outputs[4], np.array(b_eq))
assert_(outputs[5] == [(0, 1)], "")
assert_(outputs[0].shape == (1,), "")
assert_(outputs[1].shape == (1, 1), "")
assert_(outputs[2].shape == (1,), "")
assert_(outputs[3].shape == (1, 1), "")
assert_(outputs[4].shape == (1,), "")
def test__clean_inputs3():
c = [[1, 2]]
A_ub = np.random.rand(2, 2)
b_ub = [[1], [2]]
A_eq = np.random.rand(2, 2)
b_eq = [[1], [2]]
bounds = [(0, 1)]
outputs = _clean_inputs(
c=c,
A_ub=A_ub,
b_ub=b_ub,
A_eq=A_eq,
b_eq=b_eq,
bounds=bounds)
assert_allclose(outputs[0], np.array([1, 2]))
assert_allclose(outputs[2], np.array([1, 2]))
assert_allclose(outputs[4], np.array([1, 2]))
assert_(outputs[5] == [(0, 1)] * 2, "")
assert_(outputs[0].shape == (2,), "")
assert_(outputs[2].shape == (2,), "")
assert_(outputs[4].shape == (2,), "")
def test_bad_bounds():
c = [1, 2]
assert_raises(ValueError, _clean_inputs, c=c, bounds=(1, -2))
assert_raises(ValueError, _clean_inputs, c=c, bounds=[(1, -2)])
assert_raises(ValueError, _clean_inputs, c=c, bounds=[(1, -2), (1, 2)])
assert_raises(ValueError, _clean_inputs, c=c, bounds=(1, 2, 2))
assert_raises(ValueError, _clean_inputs, c=c, bounds=[(1, 2, 2)])
assert_raises(ValueError, _clean_inputs, c=c, bounds=[(1, 2), (1, 2, 2)])
assert_raises(ValueError, _clean_inputs, c=c,
bounds=[(1, 2), (1, 2), (1, 2)])
def test_good_bounds():
c = [1, 2]
outputs = _clean_inputs(c=c, bounds=None)
assert_(outputs[5] == [(0, None)] * 2, "")
outputs = _clean_inputs(c=c, bounds=(1, 2))
assert_(outputs[5] == [(1, 2)] * 2, "")
outputs = _clean_inputs(c=c, bounds=[(1, 2)])
assert_(outputs[5] == [(1, 2)] * 2, "")
outputs = _clean_inputs(c=c, bounds=[(1, np.inf)])
assert_(outputs[5] == [(1, None)] * 2, "")
outputs = _clean_inputs(c=c, bounds=[(-np.inf, 1)])
assert_(outputs[5] == [(None, 1)] * 2, "")
outputs = _clean_inputs(c=c, bounds=[(-np.inf, np.inf), (-np.inf, np.inf)])
assert_(outputs[5] == [(None, None)] * 2, "")
| 10,865 | 28.688525 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_optimize.py
|
"""
Unit tests for optimization routines from optimize.py
Authors:
Ed Schofield, Nov 2005
Andrew Straw, April 2008
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_,
assert_almost_equal, assert_warns,
assert_array_less)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy import optimize
def test_check_grad():
# Verify if check_grad is able to estimate the derivative of the
# logistic function.
def logit(x):
return 1 / (1 + np.exp(-x))
def der_logit(x):
return np.exp(-x) / (1 + np.exp(-x))**2
x0 = np.array([1.5])
r = optimize.check_grad(logit, der_logit, x0)
assert_almost_equal(r, 0)
r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
assert_almost_equal(r, 0)
# Check if the epsilon parameter is being considered.
r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
assert_(r > 1e-7)
class CheckOptimize(object):
""" Base test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setup_method(self):
self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(x)
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
class CheckOptimizeParameterized(CheckOptimize):
def test_cg(self):
# conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='CG', jac=self.grad,
options=opts)
params, fopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams,
self.grad, (), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_cg_cornercase(self):
def f(r):
return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
# Check several initial guesses. (Too far away from the
# minimum, the function ends up in the flat region of exp.)
for x0 in np.linspace(-0.75, 3, 71):
sol = optimize.minimize(f, [x0], method='CG')
assert_(sol.success)
assert_allclose(sol.x, [0.5], rtol=1e-5)
def test_bfgs(self):
# Broyden-Fletcher-Goldfarb-Shanno optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_infinite(self):
# Test corner case where -Inf is the minimum. See gh-2019.
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if self.use_wrapper:
opts = {'disp': self.disp}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
def test_powell(self):
# Powell (direction set) optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g. MKL, data alignment
# etc. affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[34:39],
[[0.72949016, -0.44156936, 0.47100962],
[0.72949016, -0.44156936, 0.48052496],
[1.45898031, -0.88313872, 0.95153458],
[0.72949016, -0.44156936, 0.47576729],
[1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_neldermead(self):
# Nelder-Mead simplex algorithm
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag, final_simplex = (
res['x'], res['fun'], res['nit'], res['nfev'],
res['status'], res['final_simplex'])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[76:78],
[[0.1928968, -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex(self):
# Nelder-Mead simplex algorithm
simplex = np.zeros((4, 3))
simplex[...] = self.startparams
for j in range(3):
simplex[j+1,j] += 0.1
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': True, 'initial_simplex': simplex}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['nit'], res['nfev'], \
res['status']
assert_allclose(res['allvecs'][0], simplex[0])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.17.0. Don't allow them to increase.
assert_(self.funccalls == 100, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.15.0
assert_allclose(self.trace[50:52],
[[0.14687474, -0.5103282, 0.48252111],
[0.14474003, -0.5282084, 0.48743951]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex_bad(self):
# Check it fails with a bad simplices
bad_simplices = []
simplex = np.zeros((3, 2))
simplex[...] = self.startparams[:2]
for j in range(2):
simplex[j+1,j] += 0.1
bad_simplices.append(simplex)
simplex = np.zeros((3, 3))
bad_simplices.append(simplex)
for simplex in bad_simplices:
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False, 'initial_simplex': simplex}
assert_raises(ValueError,
optimize.minimize, self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
else:
assert_raises(ValueError, optimize.fmin, self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
def test_ncg_negative_maxiter(self):
# Regression test for gh-8241
opts = {'maxiter': -1}
result = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)
assert_(result.status == 1)
def test_ncg(self):
# line-search Newton conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0
#assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self):
# Newton conjugate gradient with Hessian
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess=self.hess,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess=self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self):
# Newton conjugate gradient with Hessian times a vector p.
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp=self.hessp,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p=self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_neldermead_xatol_fatol():
# gh4484
# test we can call with fatol, xatol specified
func = lambda x: x[0]**2 + x[1]**2
optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
xatol=1e-3, fatol=1e-3)
assert_warns(DeprecationWarning,
optimize._minimize._minimize_neldermead,
func, [1, 1], xtol=1e-3, ftol=1e-3, maxiter=2)
def test_neldermead_adaptive():
func = lambda x: np.sum(x**2)
p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, 0.32308456,
0.9692297, 0.4471682, 0.77411992, 0.80441652, 0.35994957, 0.75487856,
0.99973421, 0.65063887, 0.09626474]
res = optimize.minimize(func, p0, method='Nelder-Mead')
assert_equal(res.success, False)
res = optimize.minimize(func, p0, method='Nelder-Mead',
options={'adaptive':True})
assert_equal(res.success, True)
class TestOptimizeWrapperDisp(CheckOptimizeParameterized):
use_wrapper = True
disp = True
class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):
use_wrapper = True
disp = False
class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):
use_wrapper = False
disp = True
class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):
use_wrapper = False
disp = False
class TestOptimizeSimple(CheckOptimize):
def test_bfgs_nan(self):
# Test corner case where nan is fed to optimizer. See gh-2067.
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
with np.errstate(over='ignore', invalid='ignore'):
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
def test_bfgs_nan_return(self):
# Test corner cases where fun returns NaN. See gh-4793.
# First case: NaN from first call.
func = lambda x: np.nan
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0)
assert_(np.isnan(result['fun']))
assert_(result['success'] is False)
# Second case: NaN from second call.
func = lambda x: 0 if x == 0 else np.nan
fprime = lambda x: np.ones_like(x) # Steer away from zero.
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0, jac=fprime)
assert_(np.isnan(result['fun']))
assert_(result['success'] is False)
def test_bfgs_numerical_jacobian(self):
# BFGS with numerical jacobian and a vector epsilon parameter.
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_bfgs_gh_2169(self):
def f(x):
if x < 0:
return 1.79769313e+308
else:
return x + 1./x
xs = optimize.fmin_bfgs(f, [10.], disp=False)
assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
def test_l_bfgs_b(self):
# limited-memory bound-constrained BFGS algorithm
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[0., -0.52489628, 0.48753042],
[0., -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
# L-BFGS-B with numerical jacobian
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with combined objective function and jacobian
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_maxiter(self):
# gh7854
# Ensure that not more than maxiters are ever run.
class Callback(object):
def __init__(self):
self.nit = 0
self.fun = None
self.x = None
def __call__(self, x):
self.x = x
self.fun = optimize.rosen(x)
self.nit += 1
c = Callback()
res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
callback=c, options={'maxiter': 5})
assert_equal(res.nit, 5)
assert_almost_equal(res.x, c.x)
assert_almost_equal(res.fun, c.fun)
assert_equal(res.status, 1)
assert_(res.success is False)
assert_equal(res.message.decode(), 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT')
def test_minimize_l_bfgs_b(self):
# Minimize with L-BFGS-B method
opts = {'disp': False, 'maxiter': self.maxiter}
r = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
assert_allclose(self.func(r.x), self.func(self.solution),
atol=1e-6)
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', options=opts)
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
# check that function evaluations in approximate jacobian are counted
assert_(ra.nfev > r.nfev)
def test_minimize_l_bfgs_b_ftol(self):
# Check that the `ftol` parameter in l_bfgs_b works as expected
v0 = None
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
sol = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
v = self.func(sol.x)
if v0 is None:
v0 = v
else:
assert_(v < v0)
assert_allclose(v, self.func(self.solution), rtol=tol)
def test_minimize_l_bfgs_maxls(self):
# check that the maxls is passed down to the Fortran routine
sol = optimize.minimize(optimize.rosen, np.array([-1.2,1.0]),
method='L-BFGS-B', jac=optimize.rosen_der,
options={'disp': False, 'maxls': 1})
assert_(not sol.success)
def test_minimize_l_bfgs_b_maxfun_interruption(self):
# gh-6162
f = optimize.rosen
g = optimize.rosen_der
values = []
x0 = np.ones(7) * 1000
def objfun(x):
value = f(x)
values.append(value)
return value
# Look for an interesting test case.
# Request a maxfun that stops at a particularly bad function
# evaluation somewhere between 100 and 300 evaluations.
low, medium, high = 30, 100, 300
optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
v, k = max((y, i) for i, y in enumerate(values[medium:]))
maxfun = medium + k
# If the minimization strategy is reasonable,
# the minimize() result should not be worse than the best
# of the first 30 function evaluations.
target = min(values[:low])
xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
assert_array_less(fmin, target)
def test_custom(self):
# This function comes from the documentation example.
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = x0
besty = fun(x0)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for dim in range(np.size(x0)):
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
testx = np.copy(bestx)
testx[dim] = s
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
res = optimize.minimize(optimize.rosen, x0, method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
def test_minimize_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return x**2*y**2 + x**4 + 1
def dfunc(z):
x, y = z
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
if method in ('nelder-mead', 'powell', 'cobyla'):
jac = None
else:
jac = dfunc
sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10,
method=method)
sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0,
method=method)
assert_(func(sol1.x) < func(sol2.x),
"%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
@pytest.mark.parametrize('method', ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
'fmin_slsqp',
'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B',
'TNC', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov'])
def test_minimize_callback_copies_array(self, method):
# Check that arrays passed to callbacks are not modified
# inplace by the optimizer afterward
if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
func = lambda x: (optimize.rosen(x), optimize.rosen_der(x))
else:
func = optimize.rosen
jac = optimize.rosen_der
hess = optimize.rosen_hess
x0 = np.zeros(10)
# Set options
kwargs = {}
if method.startswith('fmin'):
routine = getattr(optimize, method)
if method == 'fmin_slsqp':
kwargs['iter'] = 5
elif method == 'fmin_tnc':
kwargs['maxfun'] = 100
else:
kwargs['maxiter'] = 5
else:
def routine(*a, **kw):
kw['method'] = method
return optimize.minimize(*a, **kw)
if method == 'TNC':
kwargs['options'] = dict(maxiter=100)
else:
kwargs['options'] = dict(maxiter=5)
if method in ('fmin_ncg',):
kwargs['fprime'] = jac
elif method in ('Newton-CG',):
kwargs['jac'] = jac
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
'trust-constr'):
kwargs['jac'] = jac
kwargs['hess'] = hess
# Run with callback
results = []
def callback(x, *args, **kwargs):
results.append((x, np.copy(x)))
sol = routine(func, x0, callback=callback, **kwargs)
# Check returned arrays coincide with their copies and have no memory overlap
assert_(len(results) > 2)
assert_(all(np.all(x == y) for x, y in results))
assert_(not any(np.may_share_memory(x[0], y[0]) for x, y in itertools.combinations(results, 2)))
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
'l-bfgs-b', 'tnc', 'cobyla', 'slsqp'])
def test_no_increase(self, method):
# Check that the solver doesn't return a value worse than the
# initial point.
def func(x):
return (x - 1)**2
def bad_grad(x):
# purposefully invalid gradient function, simulates a case
# where line searches start failing
return 2*(x - 1) * (-1) - 2
x0 = np.array([2.0])
f0 = func(x0)
jac = bad_grad
if method in ['nelder-mead', 'powell', 'cobyla']:
jac = None
sol = optimize.minimize(func, x0, jac=jac, method=method,
options=dict(maxiter=20))
assert_equal(func(sol.x), sol.fun)
if method == 'slsqp':
pytest.xfail("SLSQP returns slightly worse")
assert_(func(sol.x) <= f0)
def test_slsqp_respect_bounds(self):
# Regression test for gh-3108
def f(x):
return sum((x - np.array([1., 2., 3., 4.]))**2)
def cons(x):
a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
x0 = np.array([0.5, 1., 1.5, 2.])
res = optimize.minimize(f, x0, method='slsqp',
constraints={'type': 'ineq', 'fun': cons})
assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
def test_minimize_automethod(self):
def f(x):
return x**2
def cons(x):
return x - 2
x0 = np.array([10.])
sol_0 = optimize.minimize(f, x0)
sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}])
sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
sol_3 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(5, 10)])
sol_4 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(1, 10)])
for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
assert_(sol.success)
assert_allclose(sol_0.x, 0, atol=1e-7)
assert_allclose(sol_1.x, 2, atol=1e-7)
assert_allclose(sol_2.x, 5, atol=1e-7)
assert_allclose(sol_3.x, 5, atol=1e-7)
assert_allclose(sol_4.x, 2, atol=1e-7)
def test_minimize_coerce_args_param(self):
# Regression test for gh-3503
def Y(x, c):
return np.sum((x-c)**2)
def dY_dx(x, c=None):
return 2*(x-c)
c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
xinit = np.random.randn(len(c))
optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
def test_initial_step_scaling(self):
# Check that optimizer initial step is not huge even if the
# function and gradients are
scales = [1e-50, 1, 1e50]
methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
def f(x):
if first_step_size[0] is None and x[0] != x0[0]:
first_step_size[0] = abs(x[0] - x0[0])
if abs(x).max() > 1e4:
raise AssertionError("Optimization stepped far away!")
return scale*(x[0] - 1)**2
def g(x):
return np.array([scale*(x[0] - 1)])
for scale, method in itertools.product(scales, methods):
if method in ('CG', 'BFGS'):
options = dict(gtol=scale*1e-8)
else:
options = dict()
if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
# XXX: return initial point if they see small gradient
continue
x0 = [-1.0]
first_step_size = [None]
res = optimize.minimize(f, x0, jac=g, method=method,
options=options)
err_msg = "{0} {1}: {2}: {3}".format(method, scale, first_step_size,
res)
assert_(res.success, err_msg)
assert_allclose(res.x, [1.0], err_msg=err_msg)
assert_(res.nit <= 3, err_msg)
if scale > 1e-10:
if method in ('CG', 'BFGS'):
assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
else:
# Newton-CG and L-BFGS-B use different logic for the first step,
# but are both scaling invariant with step sizes ~ 1
assert_(first_step_size[0] > 0.5 and first_step_size[0] < 3,
err_msg)
else:
# step size has upper bound of ||grad||, so line
# search makes many small steps
pass
class TestLBFGSBBounds(object):
def setup_method(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return 1.0 / p * (x[0]**p + x[1]**p)
def jac(self, x, p=2.0):
return x**(p - 1)
def fj(self, x, p=2.0):
return self.fun(x, p), self.jac(x, p)
def test_l_bfgs_b_bounds(self):
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
fprime=self.jac,
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with fun and jac combined and extra arguments
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_l_bfgs_b_bounds(self):
# Minimize with method='L-BFGS-B' with bounds
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=self.bounds)
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-6)
class TestOptimizeScalar(object):
def setup_method(self):
self.solution = 1.5
def fun(self, x, a=1.5):
"""Objective function"""
return (x - a)**2 - 0.8
def test_brent(self):
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_golden(self):
x = optimize.golden(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, tol=0)
assert_allclose(x, self.solution)
maxiter_test_cases = [0, 1, 5]
for maxiter in maxiter_test_cases:
x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
nfev0, nfev = x0[2], x[2]
assert_equal(nfev - nfev0, maxiter)
def test_fminbound(self):
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
try:
optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
self.fail("exception not raised")
except ValueError as e:
assert_('must be scalar' in str(e))
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar(self):
# combine all tests above for the minimize_scalar wrapper
x = optimize.minimize_scalar(self.fun).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent')
assert_(x.success)
x = optimize.minimize_scalar(self.fun, method='Brent',
options=dict(maxiter=3))
assert_(not x.success)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='golden',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
method='Bounded').x
assert_allclose(x, 1, atol=1e-4)
x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
np.array([5])),
args=(np.array([1.5]), ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(5, 1), method='bounded', args=(1.5, ))
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar_custom(self):
# This function comes from the documentation example.
def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = (bracket[1] + bracket[0]) / 2.0
besty = fun(bestx)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for testx in [bestx - stepsize, bestx + stepsize]:
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, self.solution, atol=1e-6)
def test_minimize_scalar_coerce_args_param(self):
# Regression test for gh-3503
optimize.minimize_scalar(self.fun, args=1.5)
def test_brent_negative_tolerance():
assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
class TestNewtonCg(object):
def test_rosenbrock(self):
x0 = np.array([-1.2, 1.0])
sol = optimize.minimize(optimize.rosen, x0,
jac=optimize.rosen_der,
hess=optimize.rosen_hess,
tol=1e-5,
method='Newton-CG')
assert_(sol.success, sol.message)
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
def test_himmelblau(self):
x0 = np.array(himmelblau_x0)
sol = optimize.minimize(himmelblau,
x0,
jac=himmelblau_grad,
hess=himmelblau_hess,
method='Newton-CG',
tol=1e-6)
assert_(sol.success, sol.message)
assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
class TestRosen(object):
def test_hess(self):
# Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
def himmelblau(p):
"""
R^2 -> R^1 test function for optimization. The function has four local
minima where himmelblau(xopt) == 0.
"""
x, y = p
a = x*x + y - 11
b = x + y*y - 7
return a*a + b*b
def himmelblau_grad(p):
x, y = p
return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
def himmelblau_hess(p):
x, y = p
return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
[4*x + 4*y, 4*x + 12*y**2 - 26]])
himmelblau_x0 = [-0.27, -0.9]
himmelblau_xopt = [3, 2]
himmelblau_min = 0.0
def test_minimize_multiple_constraints():
# Regression test for gh-4240.
def func(x):
return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
def func1(x):
return np.array([x[1]])
def func2(x):
return np.array([x[2]])
cons = ({'type': 'ineq', 'fun': func},
{'type': 'ineq', 'fun': func1},
{'type': 'ineq', 'fun': func2})
f = lambda x: -1 * (x[0] + x[1] + x[2])
res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
assert_allclose(res.x, [125, 0, 0], atol=1e-10)
class TestOptimizeResultAttributes(object):
# Test that all minimizers return an OptimizeResult containing
# all the OptimizeResult attributes
def setup_method(self):
self.x0 = [5, 5]
self.func = optimize.rosen
self.jac = optimize.rosen_der
self.hess = optimize.rosen_hess
self.hessp = optimize.rosen_hess_prod
self.bounds = [(0., 10.), (0., 10.)]
def test_attributes_present(self):
methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'dogleg',
'trust-ncg']
attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',
'message']
skip = {'COBYLA': ['nit']}
for method in methods:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Method .+ does not use (gradient|Hessian.*) information")
res = optimize.minimize(self.func, self.x0, method=method,
jac=self.jac, hess=self.hess,
hessp=self.hessp)
for attribute in attributes:
if method in skip and attribute in skip[method]:
continue
assert_(hasattr(res, attribute))
assert_(attribute in dir(res))
class TestBrute:
# Test the "brute force" method
def setup_method(self):
self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
self.solution = np.array([-1.05665192, 1.80834843])
def f1(self, z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
def f2(self, z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
def f3(self, z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
def func(self, z, *params):
return self.f1(z, *params) + self.f2(z, *params) + self.f3(z, *params)
def test_brute(self):
# test fmin
resbrute = optimize.brute(self.func, self.rranges, args=self.params,
full_output=True, finish=optimize.fmin)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], self.func(self.solution, *self.params),
atol=1e-3)
# test minimize
resbrute = optimize.brute(self.func, self.rranges, args=self.params,
full_output=True,
finish=optimize.minimize)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], self.func(self.solution, *self.params),
atol=1e-3)
def test_1D(self):
# test that for a 1D problem the test function is passed an array,
# not a scalar.
def f(x):
assert_(len(x.shape) == 1)
assert_(x.shape[0] == 1)
return x ** 2
optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
class TestIterationLimits(object):
# Tests that optimisation does not give up before trying requested
# number of iterations or evaluations. And that it does not succeed
# by exceeding the limits.
def setup_method(self):
self.funcalls = 0
def slow_func(self, v):
self.funcalls += 1
r,t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0],v[1])
return np.sin(r*20 + t)+r*0.5
def test_neldermead_limit(self):
self.check_limits("Nelder-Mead", 200)
def test_powell_limit(self):
self.check_limits("powell", 1000)
def check_limits(self, method, default_iters):
for start_v in [[0.1,0.1], [1,1], [2,2]]:
for mfev in [50, 500, 5000]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
assert_(res["nfev"] < mfev)
else:
assert_(res["nfev"] >= mfev)
for mit in [50, 500,5000]:
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit})
if res["success"]:
assert_(res["nit"] <= mit)
else:
assert_(res["nit"] >= mit)
for mfev,mit in [[50,50], [5000,5000],[5000,np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit, "maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
assert_(res["nfev"] < mfev and res["nit"] <= mit)
else:
assert_(res["nfev"] >= mfev or res["nit"] >= mit)
for mfev,mit in [[np.inf,None], [None,np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit, "maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
if mfev is None:
assert_(res["nfev"] < default_iters*2)
else:
assert_(res["nit"] <= default_iters*2)
else:
assert_(res["nfev"] >= default_iters*2 or
res["nit"] >= default_iters*2)
| 53,408 | 38.591549 | 104 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test__basinhopping.py
|
"""
Unit tests for the basin hopping global minimization algorithm.
"""
from __future__ import division, print_function, absolute_import
import copy
from numpy.testing import assert_almost_equal, assert_equal, assert_
from pytest import raises as assert_raises
import numpy as np
from numpy import cos, sin
from scipy.optimize import basinhopping, OptimizeResult
from scipy.optimize._basinhopping import (
Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
def func1d(x):
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
return f, df
def func2d_nograd(x):
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
return f
def func2d(x):
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
df = np.zeros(2)
df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
df[1] = 2. * x[1] + 0.2
return f, df
def func2d_easyderiv(x):
f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
df = np.zeros(2)
df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
df[1] = 2.0*x[0] + 4.0*x[1]
return f, df
class MyTakeStep1(RandomDisplacement):
"""use a copy of displace, but have it set a special parameter to
make sure it's actually being used."""
def __init__(self):
self.been_called = False
super(MyTakeStep1, self).__init__()
def __call__(self, x):
self.been_called = True
return super(MyTakeStep1, self).__call__(x)
def myTakeStep2(x):
"""redo RandomDisplacement in function form without the attribute stepsize
to make sure everything still works ok
"""
s = 0.5
x += np.random.uniform(-s, s, np.shape(x))
return x
class MyAcceptTest(object):
"""pass a custom accept test
This does nothing but make sure it's being used and ensure all the
possible return values are accepted
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
self.testres = [False, 'force accept', True, np.bool_(True),
np.bool_(False), [], {}, 0, 1]
def __call__(self, **kwargs):
self.been_called = True
self.ncalls += 1
if self.ncalls - 1 < len(self.testres):
return self.testres[self.ncalls - 1]
else:
return True
class MyCallBack(object):
"""pass a custom callback function
This makes sure it's being used. It also returns True after 10
steps to ensure that it's stopping early.
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
def __call__(self, x, f, accepted):
self.been_called = True
self.ncalls += 1
if self.ncalls == 10:
return True
class TestBasinHopping(object):
def setup_method(self):
""" Tests setup.
Run tests based on the 1-D and 2-D functions described above.
"""
self.x0 = (1.0, [1.0, 1.0])
self.sol = (-0.195, np.array([-0.195, -0.1]))
self.tol = 3 # number of decimal places
self.niter = 100
self.disp = False
# fix random seed
np.random.seed(1234)
self.kwargs = {"method": "L-BFGS-B", "jac": True}
self.kwargs_nograd = {"method": "L-BFGS-B"}
def test_TypeError(self):
# test the TypeErrors are raised on bad input
i = 1
# if take_step is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
take_step=1)
# if accept_test is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
accept_test=1)
def test_1d_grad(self):
# test 1d minimizations with gradient
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_2d(self):
# test 2d minimizations with gradient
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(res.nfev > 0)
def test_njev(self):
# test njev is returned correctly
i = 1
minimizer_kwargs = self.kwargs.copy()
# L-BFGS-B doesn't use njev, but BFGS does
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(res.nfev > 0)
assert_equal(res.nfev, res.njev)
def test_jac(self):
# test jacobian returned
minimizer_kwargs = self.kwargs.copy()
# BFGS returns a Jacobian
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d_easyderiv, [0.0, 0.0],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(hasattr(res.lowest_optimization_result, "jac"))
# in this case, the jacobian is just [df/dx, df/dy]
_, jacobian = func2d_easyderiv(res.x)
assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
self.tol)
def test_2d_nograd(self):
# test 2d minimizations without gradient
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_all_minimizers(self):
# test 2d minimizations with gradient. Nelder-Mead, Powell and COBYLA
# don't accept jac=True, so aren't included here.
i = 1
methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
minimizer_kwargs = copy.copy(self.kwargs)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_all_nograd_minimizers(self):
# test 2d minimizations without gradient. Newton-CG requires jac=True,
# so not included here.
i = 1
methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
'Nelder-Mead', 'Powell', 'COBYLA']
minimizer_kwargs = copy.copy(self.kwargs_nograd)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
tol = self.tol
if method == 'COBYLA':
tol = 2
assert_almost_equal(res.x, self.sol[i], decimal=tol)
def test_pass_takestep(self):
# test that passing a custom takestep works
# also test that the stepsize is being adjusted
takestep = MyTakeStep1()
initial_step_size = takestep.stepsize
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(takestep.been_called)
# make sure that the built in adaptive step size has been used
assert_(initial_step_size != takestep.stepsize)
def test_pass_simple_takestep(self):
# test that passing a custom takestep without attribute stepsize
takestep = myTakeStep2
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_pass_accept_test(self):
# test passing a custom accept test
# makes sure it's being used and ensures all the possible return values
# are accepted.
accept_test = MyAcceptTest()
i = 1
# there's no point in running it more than a few steps.
basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=10, disp=self.disp, accept_test=accept_test)
assert_(accept_test.been_called)
def test_pass_callback(self):
# test passing a custom callback function
# This makes sure it's being used. It also returns True after 10 steps
# to ensure that it's stopping early.
callback = MyCallBack()
i = 1
# there's no point in running it more than a few steps.
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=30, disp=self.disp, callback=callback)
assert_(callback.been_called)
assert_("callback" in res.message[0])
assert_equal(res.nit, 10)
def test_minimizer_fail(self):
# test if a minimizer fails
i = 1
self.kwargs["options"] = dict(maxiter=0)
self.niter = 10
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
# the number of failed minimizations should be the number of
# iterations + 1
assert_equal(res.nit + 1, res.minimization_failures)
def test_niter_zero(self):
# gh5915, what happens if you call basinhopping with niter=0
i = 0
basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=0, disp=self.disp)
def test_seed_reproducibility(self):
# seed should ensure reproducibility between runs
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
f_1 = []
def callback(x, f, accepted):
f_1.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback, seed=10)
f_2 = []
def callback2(x, f, accepted):
f_2.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback2, seed=10)
assert_equal(np.array(f_1), np.array(f_2))
def test_monotonic_basin_hopping(self):
# test 1d minimizations with gradient and T=0
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp, T=0)
assert_almost_equal(res.x, self.sol[i], self.tol)
class Test_Storage(object):
def setup_method(self):
self.x0 = np.array(1)
self.f0 = 0
minres = OptimizeResult()
minres.x = self.x0
minres.fun = self.f0
self.storage = Storage(minres)
def test_higher_f_rejected(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 + 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_equal(self.x0, minres.x)
assert_equal(self.f0, minres.fun)
assert_(not ret)
def test_lower_f_accepted(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 - 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_(self.x0 != minres.x)
assert_(self.f0 != minres.fun)
assert_(ret)
class Test_RandomDisplacement(object):
def setup_method(self):
self.stepsize = 1.0
self.displace = RandomDisplacement(stepsize=self.stepsize)
self.N = 300000
self.x0 = np.zeros([self.N])
def test_random(self):
# the mean should be 0
# the variance should be (2*stepsize)**2 / 12
# note these tests are random, they will fail from time to time
x = self.displace(self.x0)
v = (2. * self.stepsize) ** 2 / 12
assert_almost_equal(np.mean(x), 0., 1)
assert_almost_equal(np.var(x), v, 1)
class Test_Metropolis(object):
def setup_method(self):
self.T = 2.
self.met = Metropolis(self.T)
def test_boolean_return(self):
# the return must be a bool. else an error will be raised in
# basinhopping
ret = self.met(f_new=0., f_old=1.)
assert isinstance(ret, bool)
def test_lower_f_accepted(self):
assert_(self.met(f_new=0., f_old=1.))
def test_KeyError(self):
# should raise KeyError if kwargs f_old or f_new is not passed
assert_raises(KeyError, self.met, f_old=1.)
assert_raises(KeyError, self.met, f_new=1.)
def test_accept(self):
# test that steps are randomly accepted for f_new > f_old
one_accept = False
one_reject = False
for i in range(1000):
if one_accept and one_reject:
break
ret = self.met(f_new=1., f_old=0.5)
if ret:
one_accept = True
else:
one_reject = True
assert_(one_accept)
assert_(one_reject)
def test_GH7495(self):
# an overflow in exp was producing a RuntimeWarning
# create own object here in case someone changes self.T
met = Metropolis(2)
with np.errstate(over='raise'):
met.accept_reject(0, 2000)
class Test_AdaptiveStepsize(object):
def setup_method(self):
self.stepsize = 1.
self.ts = RandomDisplacement(stepsize=self.stepsize)
self.target_accept_rate = 0.5
self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
accept_rate=self.target_accept_rate)
def test_adaptive_increase(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(False)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_adaptive_decrease(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(True)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
def test_all_accepted(self):
# test that everything works OK if all steps were accepted
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_all_rejected(self):
# test that everything works OK if all steps were rejected
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
| 15,398 | 33.372768 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test__remove_redundancy.py
|
"""
Unit test for Linear Programming via Simplex Algorithm.
"""
# TODO: add tests for:
# https://github.com/scipy/scipy/issues/5400
# https://github.com/scipy/scipy/issues/6690
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_equal)
from .test_linprog import magic_square
from scipy.optimize._remove_redundancy import _remove_redundancy
def setup_module():
np.random.seed(2017)
def _assert_success(
res,
desired_fun=None,
desired_x=None,
rtol=1e-7,
atol=1e-7):
# res: linprog result object
# desired_fun: desired objective function value or None
# desired_x: desired solution or None
assert_(res.success)
assert_equal(res.status, 0)
if desired_fun is not None:
assert_allclose(
res.fun,
desired_fun,
err_msg="converged to an unexpected objective value",
rtol=rtol,
atol=atol)
if desired_x is not None:
assert_allclose(
res.x,
desired_x,
err_msg="converged to an unexpected solution",
rtol=rtol,
atol=atol)
def test_no_redundancy():
m, n = 10, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A1, b1, status, message = _remove_redundancy(A0, b0)
assert_allclose(A0, A1)
assert_allclose(b0, b1)
assert_equal(status, 0)
def test_infeasible_zero_row():
A = np.eye(3)
A[1, :] = 0
b = np.random.rand(3)
A1, b1, status, message = _remove_redundancy(A, b)
assert_equal(status, 2)
def test_remove_zero_row():
A = np.eye(3)
A[1, :] = 0
b = np.random.rand(3)
b[1] = 0
A1, b1, status, message = _remove_redundancy(A, b)
assert_equal(status, 0)
assert_allclose(A1, A[[0, 2], :])
assert_allclose(b1, b[[0, 2]])
def test_infeasible_m_gt_n():
m, n = 20, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A1, b1, status, message = _remove_redundancy(A0, b0)
assert_equal(status, 2)
def test_infeasible_m_eq_n():
m, n = 10, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A0[-1, :] = 2 * A0[-2, :]
A1, b1, status, message = _remove_redundancy(A0, b0)
assert_equal(status, 2)
def test_infeasible_m_lt_n():
m, n = 9, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
A1, b1, status, message = _remove_redundancy(A0, b0)
assert_equal(status, 2)
def test_m_gt_n():
m, n = 20, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
x = np.linalg.solve(A0[:n, :], b0[:n])
b0[n:] = A0[n:, :].dot(x)
A1, b1, status, message = _remove_redundancy(A0, b0)
assert_equal(status, 0)
assert_equal(A1.shape[0], n)
assert_equal(np.linalg.matrix_rank(A1), n)
def test_m_gt_n_rank_deficient():
m, n = 20, 10
A0 = np.zeros((m, n))
A0[:, 0] = 1
b0 = np.ones(m)
A1, b1, status, message = _remove_redundancy(A0, b0)
assert_equal(status, 0)
assert_allclose(A1, A0[0:1, :])
assert_allclose(b1, b0[0])
def test_m_lt_n_rank_deficient():
m, n = 9, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
b0[-1] = np.arange(m - 1).dot(b0[:-1])
A1, b1, status, message = _remove_redundancy(A0, b0)
assert_equal(status, 0)
assert_equal(A1.shape[0], 8)
assert_equal(np.linalg.matrix_rank(A1), 8)
def test_dense1():
A = np.ones((6, 6))
A[0, :3] = 0
A[1, 3:] = 0
A[3:, ::2] = -1
A[3, :2] = 0
A[4, 2:] = 0
b = np.zeros(A.shape[0])
A2 = A[[0, 1, 3, 4], :]
b2 = np.zeros(4)
A1, b1, status, message = _remove_redundancy(A, b)
assert_allclose(A1, A2)
assert_allclose(b1, b2)
assert_equal(status, 0)
def test_dense2():
A = np.eye(6)
A[-2, -1] = 1
A[-1, :] = 1
b = np.zeros(A.shape[0])
A1, b1, status, message = _remove_redundancy(A, b)
assert_allclose(A1, A[:-1, :])
assert_allclose(b1, b[:-1])
assert_equal(status, 0)
def test_dense3():
A = np.eye(6)
A[-2, -1] = 1
A[-1, :] = 1
b = np.random.rand(A.shape[0])
b[-1] = np.sum(b[:-1])
A1, b1, status, message = _remove_redundancy(A, b)
assert_allclose(A1, A[:-1, :])
assert_allclose(b1, b[:-1])
assert_equal(status, 0)
def test_m_gt_n_sparse():
np.random.seed(2013)
m, n = 20, 5
p = 0.1
A = np.random.rand(m, n)
A[np.random.rand(m, n) > p] = 0
rank = np.linalg.matrix_rank(A)
b = np.zeros(A.shape[0])
A1, b1, status, message = _remove_redundancy(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], rank)
assert_equal(np.linalg.matrix_rank(A1), rank)
def test_m_lt_n_sparse():
np.random.seed(2017)
m, n = 20, 50
p = 0.05
A = np.random.rand(m, n)
A[np.random.rand(m, n) > p] = 0
rank = np.linalg.matrix_rank(A)
b = np.zeros(A.shape[0])
A1, b1, status, message = _remove_redundancy(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], rank)
assert_equal(np.linalg.matrix_rank(A1), rank)
def test_m_eq_n_sparse():
np.random.seed(2017)
m, n = 100, 100
p = 0.01
A = np.random.rand(m, n)
A[np.random.rand(m, n) > p] = 0
rank = np.linalg.matrix_rank(A)
b = np.zeros(A.shape[0])
A1, b1, status, message = _remove_redundancy(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], rank)
assert_equal(np.linalg.matrix_rank(A1), rank)
def test_magic_square():
A, b, c, numbers = magic_square(3)
A1, b1, status, message = _remove_redundancy(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], 23)
assert_equal(np.linalg.matrix_rank(A1), 23)
def test_magic_square2():
A, b, c, numbers = magic_square(4)
A1, b1, status, message = _remove_redundancy(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], 39)
assert_equal(np.linalg.matrix_rank(A1), 39)
| 6,031 | 24.238494 | 65 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_tnc.py
|
"""
Unit tests for TNC optimization routine from tnc.py
"""
from numpy.testing import assert_allclose, assert_equal
from scipy import optimize
import numpy as np
from math import pow
class TestTnc(object):
"""TNC non-linear optimization.
These tests are taken from Prof. K. Schittkowski's test examples
for constrained non-linear programming.
http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm
"""
def setup_method(self):
# options for minimize
self.opts = {'disp': False, 'maxiter': 200}
# objective functions and jacobian for each test
def f1(self, x, a=100.0):
return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2)
def g1(self, x, a=100.0):
dif = [0, 0]
dif[1] = 2 * a * (x[1] - pow(x[0], 2))
dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0)
return dif
def fg1(self, x, a=100.0):
return self.f1(x, a), self.g1(x, a)
def f3(self, x):
return x[1] + pow(x[1] - x[0], 2) * 1.0e-5
def g3(self, x):
dif = [0, 0]
dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5
dif[1] = 1.0 - dif[0]
return dif
def fg3(self, x):
return self.f3(x), self.g3(x)
def f4(self, x):
return pow(x[0] + 1.0, 3) / 3.0 + x[1]
def g4(self, x):
dif = [0, 0]
dif[0] = pow(x[0] + 1.0, 2)
dif[1] = 1.0
return dif
def fg4(self, x):
return self.f4(x), self.g4(x)
def f5(self, x):
return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \
1.5 * x[0] + 2.5 * x[1] + 1.0
def g5(self, x):
dif = [0, 0]
v1 = np.cos(x[0] + x[1])
v2 = 2.0*(x[0] - x[1])
dif[0] = v1 + v2 - 1.5
dif[1] = v1 - v2 + 2.5
return dif
def fg5(self, x):
return self.f5(x), self.g5(x)
def f38(self, x):
return (100.0 * pow(x[1] - pow(x[0], 2), 2) +
pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) +
pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) +
pow(x[3] - 1.0, 2)) +
19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5
def g38(self, x):
dif = [0, 0, 0, 0]
dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) -
2.0 * (1.0 - x[0])) * 1.0e-5
dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) +
19.8 * (x[3] - 1.0)) * 1.0e-5
dif[2] = (- 360.0 * x[2] * (x[3] - pow(x[2], 2)) -
2.0 * (1.0 - x[2])) * 1.0e-5
dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) +
19.8 * (x[1] - 1.0)) * 1.0e-5
return dif
def fg38(self, x):
return self.f38(x), self.g38(x)
def f45(self, x):
return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0
def g45(self, x):
dif = [0] * 5
dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0
dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0
dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0
dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0
dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0
return dif
def fg45(self, x):
return self.f45(x), self.g45(x)
# tests
# minimize with method=TNC
def test_minimize_tnc1(self):
x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None])
xopt = [1, 1]
iterx = [] # to test callback
res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1,
bounds=bnds, options=self.opts,
callback=iterx.append)
assert_allclose(res.fun, self.f1(xopt), atol=1e-8)
assert_equal(len(iterx), res.nit)
def test_minimize_tnc1b(self):
x0, bnds = np.matrix([-2, 1]), ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x = optimize.minimize(self.f1, x0, method='TNC',
bounds=bnds, options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4)
def test_minimize_tnc1c(self):
x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x = optimize.minimize(self.fg1, x0, method='TNC',
jac=True, bounds=bnds,
options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc2(self):
x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None])
xopt = [-1.2210262419616387, 1.5]
x = optimize.minimize(self.f1, x0, method='TNC',
jac=self.g1, bounds=bnds,
options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc3(self):
x0, bnds = [10, 1], ([-np.inf, None], [0.0, None])
xopt = [0, 0]
x = optimize.minimize(self.f3, x0, method='TNC',
jac=self.g3, bounds=bnds,
options=self.opts).x
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8)
def test_minimize_tnc4(self):
x0,bnds = [1.125, 0.125], [(1, None), (0, None)]
xopt = [1, 0]
x = optimize.minimize(self.f4, x0, method='TNC',
jac=self.g4, bounds=bnds,
options=self.opts).x
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8)
def test_minimize_tnc5(self):
x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)]
xopt = [-0.54719755119659763, -1.5471975511965976]
x = optimize.minimize(self.f5, x0, method='TNC',
jac=self.g5, bounds=bnds,
options=self.opts).x
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8)
def test_minimize_tnc38(self):
x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4
xopt = [1]*4
x = optimize.minimize(self.f38, x0, method='TNC',
jac=self.g38, bounds=bnds,
options=self.opts).x
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8)
def test_minimize_tnc45(self):
x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]
xopt = [1, 2, 3, 4, 5]
x = optimize.minimize(self.f45, x0, method='TNC',
jac=self.g45, bounds=bnds,
options=self.opts).x
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8)
# fmin_tnc
def test_tnc1(self):
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ),
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc1b(self):
x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc1c(self):
x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc2(self):
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None])
xopt = [-1.2210262419616387, 1.5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc3(self):
fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None])
xopt = [0, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc4(self):
fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)]
xopt = [1, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc5(self):
fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)]
xopt = [-0.54719755119659763, -1.5471975511965976]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc38(self):
fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4
xopt = [1]*4
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc45(self):
fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3),
(0, 4), (0, 5)]
xopt = [1, 2, 3, 4, 5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
| 11,161 | 35.838284 | 76 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_linprog.py
|
"""
Unit test for Linear Programming
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_equal
from pytest import raises as assert_raises
from scipy.optimize import linprog, OptimizeWarning
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
from scipy.sparse.linalg import MatrixRankWarning
import pytest
def magic_square(n):
np.random.seed(0)
M = n * (n**2 + 1) / 2
numbers = np.arange(n**4) // n**2 + 1
numbers = numbers.reshape(n**2, n, n)
zeros = np.zeros((n**2, n, n))
A_list = []
b_list = []
# Rule 1: use every number exactly once
for i in range(n**2):
A_row = zeros.copy()
A_row[i, :, :] = 1
A_list.append(A_row.flatten())
b_list.append(1)
# Rule 2: Only one number per square
for i in range(n):
for j in range(n):
A_row = zeros.copy()
A_row[:, i, j] = 1
A_list.append(A_row.flatten())
b_list.append(1)
# Rule 3: sum of rows is M
for i in range(n):
A_row = zeros.copy()
A_row[:, i, :] = numbers[:, i, :]
A_list.append(A_row.flatten())
b_list.append(M)
# Rule 4: sum of columns is M
for i in range(n):
A_row = zeros.copy()
A_row[:, :, i] = numbers[:, :, i]
A_list.append(A_row.flatten())
b_list.append(M)
# Rule 5: sum of diagonals is M
A_row = zeros.copy()
A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)]
A_list.append(A_row.flatten())
b_list.append(M)
A_row = zeros.copy()
A_row[:, range(n), range(-1, -n - 1, -1)] = \
numbers[:, range(n), range(-1, -n - 1, -1)]
A_list.append(A_row.flatten())
b_list.append(M)
A = np.array(np.vstack(A_list), dtype=float)
b = np.array(b_list, dtype=float)
c = np.random.rand(A.shape[1])
return A, b, c, numbers
def lpgen_2d(m, n):
""" -> A b c LP test: m*n vars, m+n constraints
row sums == n/m, col sums == 1
https://gist.github.com/denis-bz/8647461
"""
np.random.seed(0)
c = - np.random.exponential(size=(m, n))
Arow = np.zeros((m, m * n))
brow = np.zeros(m)
for j in range(m):
j1 = j + 1
Arow[j, j * n:j1 * n] = 1
brow[j] = n / m
Acol = np.zeros((n, m * n))
bcol = np.zeros(n)
for j in range(n):
j1 = j + 1
Acol[j, j::n] = 1
bcol[j] = 1
A = np.vstack((Arow, Acol))
b = np.hstack((brow, bcol))
return A, b, c.ravel()
def _assert_infeasible(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 2, "failed to report infeasible status")
def _assert_unbounded(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 3, "failed to report unbounded status")
def _assert_success(res, desired_fun=None, desired_x=None,
rtol=1e-8, atol=1e-8):
# res: linprog result object
# desired_fun: desired objective function value or None
# desired_x: desired solution or None
if not res.success:
msg = "linprog status {0}, message: {1}".format(res.status,
res.message)
raise AssertionError(msg)
assert_equal(res.status, 0)
if desired_fun is not None:
assert_allclose(res.fun, desired_fun,
err_msg="converged to an unexpected objective value",
rtol=rtol, atol=atol)
if desired_x is not None:
assert_allclose(res.x, desired_x,
err_msg="converged to an unexpected solution",
rtol=rtol, atol=atol)
class LinprogCommonTests(object):
def test_aliasing_b_ub(self):
c = np.array([1.0])
A_ub = np.array([[1.0]])
b_ub_orig = np.array([3.0])
b_ub = b_ub_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-4, desired_x=[-4])
assert_allclose(b_ub_orig, b_ub)
def test_aliasing_b_eq(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq_orig = np.array([3.0])
b_eq = b_eq_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
assert_allclose(b_eq_orig, b_eq)
def test_bounds_second_form_unbounded_below(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (None, 10.0)
res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounds_second_form_unbounded_above(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (1.0, None)
res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_non_ndarray_args(self):
c = [1.0]
A_ub = [[1.0]]
b_ub = [3.0]
A_eq = [[1.0]]
b_eq = [2.0]
bounds = (-1.0, 10.0)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
_assert_success(res, desired_fun=2, desired_x=[2])
def test_linprog_upper_bound_constraints(self):
# Maximize a linear function subject to only linear upper bound
# constraints.
# http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf
c = np.array([3, 2]) * -1 # maximize
A_ub = [[2, 1],
[1, 1],
[1, 0]]
b_ub = [10, 8, 4]
res = (linprog(c, A_ub=A_ub, b_ub=b_ub,
method=self.method, options=self.options))
_assert_success(res, desired_fun=-18, desired_x=[2, 6])
def test_linprog_mixed_constraints(self):
# Minimize linear function subject to non-negative variables.
# http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf
c = [6, 3]
A_ub = [[0, 3],
[-1, -1],
[-2, 1]]
b_ub = [2, -1, -1]
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
method=self.method, options=self.options)
_assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3])
def test_linprog_cyclic_recovery(self):
# Test linprogs recovery from cycling using the Klee-Minty problem
# Klee-Minty http://www.math.ubc.ca/~israel/m340/kleemin3.pdf
c = np.array([100, 10, 1]) * -1 # maximize
A_ub = [[1, 0, 0],
[20, 1, 0],
[200, 20, 1]]
b_ub = [1, 100, 10000]
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7)
def test_linprog_cyclic_bland(self):
# Test the effect of Bland's rule on a cycling problem
c = np.array([-10, 57, 9, 24.])
A_ub = np.array([[0.5, -5.5, -2.5, 9],
[0.5, -1.5, -0.5, 1],
[1, 0, 0, 0]])
b_ub = [0, 0, 1]
# "interior-point" will succeed, "simplex" will fail
res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=dict(maxiter=100),
method=self.method)
if self.method == "simplex":
assert_(not res.success)
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
options=dict(maxiter=100, bland=True,),
method=self.method)
_assert_success(res, desired_x=[1, 0, 1, 0])
def test_linprog_cyclic_bland_bug_8561(self):
# Test that pivot row is chosen correctly when using Bland's rule
c = np.array([7, 0, -4, 1.5, 1.5])
A_ub = np.array([
[4, 5.5, 1.5, 1.0, -3.5],
[1, -2.5, -2, 2.5, 0.5],
[3, -0.5, 4, -12.5, -7],
[-1, 4.5, 2, -3.5, -2],
[5.5, 2, -4.5, -1, 9.5]])
b_ub = np.array([0, 0, 0, 0, 1])
if self.method == "simplex":
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
options=dict(maxiter=100, bland=True),
method=self.method)
else:
res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=dict(maxiter=100),
method=self.method)
_assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3])
def test_linprog_unbounded(self):
# Test linprog response to an unbounded problem
c = np.array([1, 1]) * -1 # maximize
A_ub = [[-1, 1],
[-1, -1]]
b_ub = [-1, -2]
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_linprog_infeasible(self):
# Test linrpog response to an infeasible problem
c = [-1, -1]
A_ub = [[1, 0],
[0, 1],
[-1, -1]]
b_ub = [2, 2, -5]
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_nontrivial_problem(self):
# Test linprog for a problem involving all constraint types,
# negative resource limits, and rounding issues.
c = [-1, 8, 4, -6]
A_ub = [[-7, -7, 6, 9],
[1, -1, -3, 0],
[10, -10, -7, 7],
[6, -1, 3, 4]]
b_ub = [-3, 6, -6, 6]
A_eq = [[-10, 1, 1, -8]]
b_eq = [-4]
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_success(res, desired_fun=7083 / 1391,
desired_x=[101 / 1391, 1462 / 1391, 0, 752 / 1391])
def test_negative_variable(self):
# Test linprog with a problem with one unbounded variable and
# another with a negative lower bound.
c = np.array([-1, 4]) * -1 # maximize
A_ub = np.array([[-3, 1],
[1, 2]], dtype=np.float64)
A_ub_orig = A_ub.copy()
b_ub = [6, 4]
x0_bounds = (-np.inf, np.inf)
x1_bounds = (-3, np.inf)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=(x0_bounds, x1_bounds),
method=self.method, options=self.options)
assert_equal(A_ub, A_ub_orig) # user input not overwritten
_assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7])
def test_large_problem(self):
# Test linprog simplex with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-64.049494229)
def test_network_flow(self):
# A network flow problem with supply and demand at nodes
# and with costs along directed edges.
# https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf
c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18]
n, p = -1, 1
A_eq = [
[n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0],
[p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0],
[0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0],
[0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p],
[0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]]
b_eq = [0, 19, -16, 33, 0, 0, -36]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7)
def test_network_flow_limited_capacity(self):
# A network flow problem with supply and demand at nodes
# and with costs and capacities along directed edges.
# http://blog.sommer-forst.de/2013/04/10/
cost = [2, 2, 1, 3, 1]
bounds = [
[0, 4],
[0, 2],
[0, 2],
[0, 3],
[0, 5]]
n, p = -1, 1
A_eq = [
[n, n, 0, 0, 0],
[p, 0, n, n, 0],
[0, p, p, 0, n],
[0, 0, 0, p, p]]
b_eq = [-4, 0, 0, 4]
if self.method == "simplex":
# Including the callback here ensures the solution can be
# calculated correctly, even when phase 1 terminated
# with some of the artificial variables as pivots
# (i.e. basis[:m] contains elements corresponding to
# the artificial variables)
res = linprog(c=cost, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=self.options,
callback=lambda x, **kwargs: None)
else:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(OptimizeWarning, "Solving system with option...")
res = linprog(c=cost, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14)
def test_simplex_algorithm_wikipedia_example(self):
# http://en.wikipedia.org/wiki/Simplex_algorithm#Example
Z = [-2, -3, -4]
A_ub = [
[3, 2, 1],
[2, 5, 3]]
b_ub = [10, 15]
res = linprog(c=Z, A_ub=A_ub, b_ub=b_ub,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-20)
def test_enzo_example(self):
# http://projects.scipy.org/scipy/attachment/ticket/1252/lp2.py
#
# Translated from Octave code at:
# http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm
# and placed under MIT licence by Enzo Michelangeli
# with permission explicitly granted by the original author,
# Prof. Kazunobu Yoshida
c = [4, 8, 3, 0, 0, 0]
A_eq = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b_eq = [185, 155, 600]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_success(res, desired_fun=317.5,
desired_x=[66.25, 0, 17.5, 0, 183.75, 0],
atol=6e-6, rtol=1e-7)
def test_enzo_example_b(self):
# rescued from https://github.com/scipy/scipy/pull/218
c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8]
A_eq = [[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]]
b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3]
if self.method == "simplex":
# Including the callback here ensures the solution can be
# calculated correctly.
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options,
callback=lambda x, **kwargs: None)
else:
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-1.77,
desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3])
def test_enzo_example_c_with_degeneracy(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 20
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros(m))
def test_enzo_example_c_with_unboundedness(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_enzo_example_c_with_infeasibility(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [1, 1]
if self.method == "simplex":
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
else:
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, method=self.method,
options={"presolve": False})
_assert_infeasible(res)
def test_unknown_options_or_solver(self):
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,
options={}):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method,
options=options)
_assert_warns(OptimizeWarning, f,
c, A_ub=A_ub, b_ub=b_ub, options=dict(spam='42'))
assert_raises(ValueError, linprog,
c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki')
def test_no_constraints(self):
res = linprog([-1, -2], method=self.method, options=self.options)
if self.method == "simplex":
# Why should x be 0,0? inf,inf is more correct, IMO
assert_equal(res.x, [0, 0])
_assert_unbounded(res)
def test_simple_bounds(self):
res = linprog([1, 2], bounds=(1, 2),
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
res = linprog([1, 2], bounds=[(1, 2), (1, 2)],
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
def test_invalid_inputs(self):
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
for bad_bound in [[(5, 0), (1, 2), (3, 4)],
[(1, 2), (3, 4)],
[(1, 2), (3, 4), (3, 4, 5)],
[(1, 2), (np.inf, np.inf), (3, 4)],
[(1, 2), (-np.inf, -np.inf), (3, 4)],
]:
assert_raises(ValueError, f, [1, 2, 3], bounds=bad_bound)
assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2])
assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2])
assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1)
if ("_sparse_presolve" in self.options and
self.options["_sparse_presolve"]):
return
# this test doesn't make sense for sparse presolve
# there aren't 3D sparse matrices
assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1)
def test_basic_artificial_vars(self):
# Test if linprog succeeds when at the end of Phase 1 some artificial
# variables remain basic, and the row in T corresponding to the
# artificial variables is not all zero.
c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004])
A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0],
[0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0],
[1.0, 1.0, 0, 0, 0, 0]])
b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0])
A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]])
b_eq = np.array([0, 0])
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros_like(c),
atol=2e-6)
def test_empty_constraint_2(self):
res = linprog([1, -1, 1, -1],
bounds=[(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)],
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2)
def test_zero_row_2(self):
A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_eq = [0, 3, 0]
c = [1, 2, 3]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3)
def test_zero_row_4(self):
A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_ub = [0, 3, 0]
c = [1, 2, 3]
res = linprog(c=c, A_ub=A_ub, b_ub=b_ub,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0)
def test_zero_column_1(self):
m, n = 3, 4
np.random.seed(0)
c = np.random.rand(n)
c[1] = 1
A_eq = np.random.rand(m, n)
A_eq[:, 1] = 0
b_eq = np.random.rand(m)
A_ub = [[1, 0, 1, 1]]
b_ub = 3
res = linprog(c, A_ub, b_ub, A_eq, b_eq,
bounds=[(-10, 10), (-10, 10),
(-10, None), (None, None)],
method=self.method, options=self.options)
_assert_success(res, desired_fun=-9.7087836730413404)
def test_singleton_row_eq_2(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 1, 4]
res = linprog(c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_success(res, desired_fun=4)
def test_singleton_row_ub_2(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -0.5, 4]
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
bounds=[(None, None), (0, None), (0, None), (0, None)],
method=self.method, options=self.options)
_assert_success(res, desired_fun=0.5)
def test_remove_redundancy_infeasibility(self):
m, n = 10, 10
c = np.random.rand(n)
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A0[-1, :] = 2 * A0[-2, :]
b0[-1] *= -1
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_eq=A0, b_eq=b0,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounded_below_only(self):
A = np.eye(3)
b = np.array([1, 2, 3])
c = np.ones(3)
res = linprog(c, A_eq=A, b_eq=b, bounds=(0.5, np.inf),
method=self.method, options=self.options)
_assert_success(res, desired_x=b, desired_fun=np.sum(b))
def test_bounded_above_only(self):
A = np.eye(3)
b = np.array([1, 2, 3])
c = np.ones(3)
res = linprog(c, A_eq=A, b_eq=b, bounds=(-np.inf, 4),
method=self.method, options=self.options)
_assert_success(res, desired_x=b, desired_fun=np.sum(b))
def test_unbounded_below_and_above(self):
A = np.eye(3)
b = np.array([1, 2, 3])
c = np.ones(3)
res = linprog(c, A_eq=A, b_eq=b, bounds=(-np.inf, np.inf),
method=self.method, options=self.options)
_assert_success(res, desired_x=b, desired_fun=np.sum(b))
def test_bug_8663(self):
A = [[0, -7]]
b = [-6]
c = [1, 5]
bounds = [(0, None), (None, None)]
res = linprog(c, A_eq=A, b_eq=b, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res,
desired_x=[0, 6./7],
desired_fun=5*6./7)
class TestLinprogSimplex(LinprogCommonTests):
method = "simplex"
options = {}
def test_callback(self):
# Check that callback is as advertised
callback_complete = [False]
last_xk = []
def cb(xk, **kwargs):
kwargs.pop('tableau')
assert_(isinstance(kwargs.pop('phase'), int))
assert_(isinstance(kwargs.pop('nit'), int))
i, j = kwargs.pop('pivot')
assert_(np.isscalar(i))
assert_(np.isscalar(j))
basis = kwargs.pop('basis')
assert_(isinstance(basis, np.ndarray))
assert_(basis.dtype == np.int_)
complete = kwargs.pop('complete')
assert_(isinstance(complete, bool))
if complete:
last_xk.append(xk)
callback_complete[0] = True
else:
assert_(not callback_complete[0])
# no more kwargs
assert_(not kwargs)
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method)
assert_(callback_complete[0])
assert_allclose(last_xk[0], res.x)
class BaseTestLinprogIP(LinprogCommonTests):
method = "interior-point"
def test_bounds_equal_but_infeasible(self):
c = [-4, 1]
A_ub = [[7, -2], [0, 1], [2, -2]]
b_ub = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
method=self.method)
_assert_infeasible(res)
def test_bounds_equal_but_infeasible2(self):
c = [-4, 1]
A_eq = [[7, -2], [0, 1], [2, -2]]
b_eq = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method)
_assert_infeasible(res)
def test_magic_square_bug_7044(self):
# test linprog with a problem with a rank-deficient A_eq matrix
A, b, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_eq=A, b_eq=b, bounds=(0, 1),
method=self.method, options=self.options)
_assert_success(res, desired_fun=1.730550597)
def test_bug_6690(self):
# https://github.com/scipy/scipy/issues/6690
A_eq = np.array([[0., 0., 0., 0.93, 0., 0.65, 0., 0., 0.83, 0.]])
b_eq = np.array([0.9626])
A_ub = np.array([[0., 0., 0., 1.18, 0., 0., 0., -0.2, 0.,
-0.22],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.43, 0., 0., 0., 0., 0., 0.],
[0., -1.22, -0.25, 0., 0., 0., -2.06, 0., 0.,
1.37],
[0., 0., 0., 0., 0., 0., 0., -0.25, 0., 0.]])
b_ub = np.array([0.615, 0., 0.172, -0.869, -0.022])
bounds = np.array(
[[-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73],
[0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15]]).T
c = np.array([-1.64, 0.7, 1.8, -1.06, -1.16,
0.26, 2.13, 1.53, 0.66, 0.28])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "Solving system with option...")
sol = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method,
options=self.options)
_assert_success(sol, desired_fun=-1.191, rtol=1e-6)
def test_bug_5400(self):
# https://github.com/scipy/scipy/issues/5400
bounds = [
(0, None),
(0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100),
(0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900),
(0, None), (0, None), (0, None), (0, None), (0, None), (0, None)]
f = 1 / 9
g = -1e4
h = -3.1
A_ub = np.array([
[1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0],
[1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1],
[0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0],
[0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0],
[0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0],
[0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0],
[0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]])
b_ub = np.array([0.0, 0, 0, 0, 0, 0, 0, 0, 0])
c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0])
res = linprog(c, A_ub, b_ub, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-106.63507541835018)
def test_empty_constraint_1(self):
# detected in presolve?
res = linprog([-1, 1, -1, 1],
bounds=[(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)],
method=self.method, options=self.options)
_assert_unbounded(res)
assert_equal(res.nit, 0)
def test_singleton_row_eq_1(self):
# detected in presolve?
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 2, 4]
res = linprog(c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_infeasible(res)
assert_equal(res.nit, 0)
def test_singleton_row_ub_1(self):
# detected in presolve?
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -2, 4]
res = linprog(c, A_ub=A_ub, b_ub=b_ub,
bounds=[(None, None), (0, None), (0, None), (0, None)],
method=self.method, options=self.options)
_assert_infeasible(res)
assert_equal(res.nit, 0)
def test_zero_column_2(self):
# detected in presolve?
np.random.seed(0)
m, n = 2, 4
c = np.random.rand(n)
c[1] = -1
A_eq = np.random.rand(m, n)
A_eq[:, 1] = 0
b_eq = np.random.rand(m)
A_ub = np.random.rand(m, n)
A_ub[:, 1] = 0
b_ub = np.random.rand(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds=(None, None),
method=self.method, options=self.options)
_assert_unbounded(res)
assert_equal(res.nit, 0)
def test_zero_row_1(self):
# detected in presolve?
m, n = 2, 4
c = np.random.rand(n)
A_eq = np.random.rand(m, n)
A_eq[0, :] = 0
b_eq = np.random.rand(m)
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
_assert_infeasible(res)
assert_equal(res.nit, 0)
def test_zero_row_3(self):
# detected in presolve?
m, n = 2, 4
c = np.random.rand(n)
A_ub = np.random.rand(m, n)
A_ub[0, :] = 0
b_ub = -np.random.rand(m)
res = linprog(c=c, A_ub=A_ub, b_ub=b_ub,
method=self.method, options=self.options)
_assert_infeasible(res)
assert_equal(res.nit, 0)
def test_infeasible_ub(self):
# detected in presolve?
c = [1]
A_ub = [[2]]
b_ub = 4
bounds = (5, 6)
res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
assert_equal(res.nit, 0)
def test_type_error(self):
c = [1]
A_eq = [[1]]
b_eq = "hello"
assert_raises(TypeError, linprog,
c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
def test_equal_bounds_no_presolve(self):
# There was a bug when a lower and upper bound were equal but
# presolve was not on to eliminate the variable. The bound
# was being converted to an equality constraint, but the bound
# was not eliminated, leading to issues in postprocessing.
c = [1, 2]
A_ub = [[1, 2], [1.1, 2.2]]
b_ub = [4, 8]
bounds = [(1, 2), (2, 2)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_unbounded_below_no_presolve_corrected(self):
c = [1]
bounds = [(None, 1)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c=c, bounds=bounds,
method=self.method,
options=o)
_assert_unbounded(res)
def test_bug_8664(self):
# Weak test. Ideally should _detect infeasibility_ for all options.
c = [4]
A_ub = [[2], [5]]
b_ub = [4, 4]
A_eq = [[0], [-8], [9]]
b_eq = [3, 2, 10]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sup.filter(OptimizeWarning, "Solving system with option...")
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, options=o,
method=self.method)
assert_(not res.success, "incorrectly reported success")
class TestLinprogIPSpecific:
method = "interior-point"
# the following tests don't need to be performed separately for
# sparse presolve, sparse after presolve, and dense
def test_unbounded_below_no_presolve_original(self):
# formerly caused segfault in TravisCI w/ "cholesky":True
c = [-1]
bounds = [(None, 1)]
res = linprog(c=c, bounds=bounds,
method=self.method,
options={"presolve": False, "cholesky": True})
_assert_success(res, desired_fun=-1)
def test_cholesky(self):
# Test with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
# use cholesky factorization and triangular solves
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"cholesky": True}) # only for dense
_assert_success(res, desired_fun=-64.049494229)
def test_alternate_initial_point(self):
# Test with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
# use "improved" initial point
A, b, c = lpgen_2d(20, 20)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "Solving system with option...")
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"ip": True, "disp": True})
# ip code is independent of sparse/dense
_assert_success(res, desired_fun=-64.049494229)
def test_maxiter(self):
# Test with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
# test iteration limit
A, b, c = lpgen_2d(20, 20)
maxiter = np.random.randint(6) + 1 # problem takes 7 iterations
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"maxiter": maxiter})
# maxiter is independent of sparse/dense
assert_equal(res.status, 1)
assert_equal(res.nit, maxiter)
def test_disp(self):
# Test with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
# test that display option does not break anything.
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"disp": True})
# disp is independent of sparse/dense
_assert_success(res, desired_fun=-64.049494229)
def test_callback(self):
def f():
pass
assert_raises(NotImplementedError, linprog, c=1, callback=f,
method=self.method)
class TestLinprogIPSparse(BaseTestLinprogIP):
options = {"sparse": True}
@pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
def test_bug_6690(self):
# Test defined in base class, but can't mark as xfail there
super(TestLinprogIPSparse, self).test_bug_6690()
def test_magic_square_sparse_no_presolve(self):
# test linprog with a problem with a rank-deficient A_eq matrix
A, b, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
sup.filter(OptimizeWarning, "Solving system with option...")
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_eq=A, b_eq=b, bounds=(0, 1),
options=o, method=self.method)
_assert_success(res, desired_fun=1.730550597)
def test_sparse_solve_options(self):
A, b, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(OptimizeWarning, "Invalid permc_spec option")
o = {key: self.options[key] for key in self.options}
permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A',
'COLAMD', 'ekki-ekki-ekki')
for permc_spec in permc_specs:
o["permc_spec"] = permc_spec
res = linprog(c, A_eq=A, b_eq=b, bounds=(0, 1),
method=self.method, options=o)
_assert_success(res, desired_fun=1.730550597)
class TestLinprogIPDense(BaseTestLinprogIP):
options = {"sparse": False}
class TestLinprogIPSparsePresolve(BaseTestLinprogIP):
options = {"sparse": True, "_sparse_presolve": True}
@pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
def test_bug_6690(self):
# Test defined in base class, but can't mark as xfail there
super(TestLinprogIPSparsePresolve, self).test_bug_6690()
| 39,908 | 38.126471 | 90 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_constraints.py
|
from __future__ import division, print_function, absolute_import
import pytest
import numpy as np
from numpy.testing import TestCase, assert_array_equal
import scipy.sparse as sps
from scipy.optimize._constraints import (
Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint,
new_bounds_to_old, old_bound_to_new, strict_bounds)
class TestStrictBounds(TestCase):
def test_scalarvalue_unique_enforce_feasibility(self):
m = 3
lb = 2
ub = 4
enforce_feasibility = False
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
enforce_feasibility = True
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [2, 2, 2])
assert_array_equal(strict_ub, [4, 4, 4])
def test_vectorvalue_unique_enforce_feasibility(self):
m = 3
lb = [1, 2, 3]
ub = [4, 5, 6]
enforce_feasibility = False
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
enforce_feasibility = True
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [1, 2, 3])
assert_array_equal(strict_ub, [4, 5, 6])
def test_scalarvalue_vector_enforce_feasibility(self):
m = 3
lb = 2
ub = 4
enforce_feasibility = [False, True, False]
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [-np.inf, 2, -np.inf])
assert_array_equal(strict_ub, [np.inf, 4, np.inf])
def test_vectorvalue_vector_enforce_feasibility(self):
m = 3
lb = [1, 2, 3]
ub = [4, 6, np.inf]
enforce_feasibility = [True, False, True]
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [1, -np.inf, 3])
assert_array_equal(strict_ub, [4, np.inf, np.inf])
def test_prepare_constraint_infeasible_x0():
lb = np.array([0, 20, 30])
ub = np.array([0.5, np.inf, 70])
x0 = np.array([1, 2, 3])
enforce_feasibility = np.array([False, True, True], dtype=bool)
bounds = Bounds(lb, ub, enforce_feasibility)
pytest.raises(ValueError, PreparedConstraint, bounds, x0)
x0 = np.array([1, 2, 3, 4])
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
enforce_feasibility = np.array([True, True, True], dtype=bool)
linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility)
pytest.raises(ValueError, PreparedConstraint, linear, x0)
def fun(x):
return A.dot(x)
def jac(x):
return A
def hess(x, v):
return sps.csr_matrix((4, 4))
nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess,
enforce_feasibility)
pytest.raises(ValueError, PreparedConstraint, nonlinear, x0)
def test_new_bounds_to_old():
lb = np.array([-np.inf, 2, 3])
ub = np.array([3, np.inf, 10])
bounds = [(None, 3), (2, None), (3, 10)]
assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds)
bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)]
assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb)
bounds_no_lb = [(None, 3), (None, None), (None, 10)]
assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb)
bounds_single_ub = [(None, 20), (2, 20), (3, 20)]
assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub)
bounds_no_ub = [(None, None), (2, None), (3, None)]
assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub)
bounds_single_both = [(1, 2), (1, 2), (1, 2)]
assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both)
bounds_no_both = [(None, None), (None, None), (None, None)]
assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both)
def test_old_bounds_to_new():
bounds = ([1, 2], (None, 3), (-1, None))
lb_true = np.array([1, -np.inf, -1])
ub_true = np.array([2, 3, np.inf])
lb, ub = old_bound_to_new(bounds)
assert_array_equal(lb, lb_true)
assert_array_equal(ub, ub_true)
| 4,966 | 36.345865 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_cobyla.py
|
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from numpy.testing import assert_allclose, assert_
from scipy.optimize import fmin_cobyla, minimize
class TestCobyla(object):
def setup_method(self):
self.x0 = [4.95, 0.66]
self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3]
self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5,
'maxiter': 100}
def fun(self, x):
return x[0]**2 + abs(x[1])**3
def con1(self, x):
return x[0]**2 + x[1]**2 - 25
def con2(self, x):
return -self.con1(x)
def test_simple(self):
# use disp=True as smoke test for gh-8118
x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1,
rhoend=1e-5, maxfun=100, disp=True)
assert_allclose(x, self.solution, atol=1e-4)
def test_minimize_simple(self):
# Minimize with method='COBYLA'
cons = ({'type': 'ineq', 'fun': self.con1},
{'type': 'ineq', 'fun': self.con2})
sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
options=self.opts)
assert_allclose(sol.x, self.solution, atol=1e-4)
assert_(sol.success, sol.message)
assert_(sol.maxcv < 1e-5, sol)
assert_(sol.nfev < 70, sol)
assert_(sol.fun < self.fun(self.solution) + 1e-3, sol)
def test_minimize_constraint_violation(self):
np.random.seed(1234)
pb = np.random.rand(10, 10)
spread = np.random.rand(10)
def p(w):
return pb.dot(w)
def f(w):
return -(w * spread).sum()
def c1(w):
return 500 - abs(p(w)).sum()
def c2(w):
return 5 - abs(p(w).sum())
def c3(w):
return 5 - abs(p(w)).max()
cons = ({'type': 'ineq', 'fun': c1},
{'type': 'ineq', 'fun': c2},
{'type': 'ineq', 'fun': c3})
w0 = np.zeros((10, 1))
sol = minimize(f, w0, method='cobyla', constraints=cons,
options={'catol': 1e-6})
assert_(sol.maxcv > 1e-6)
assert_(not sol.success)
def test_vector_constraints():
# test that fmin_cobyla and minimize can take a combination
# of constraints, some returning a number and others an array
def fun(x):
return (x[0] - 1)**2 + (x[1] - 2.5)**2
def fmin(x):
return fun(x) - 1
def cons1(x):
a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]])
return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] +
a[i, 2] for i in range(len(a))])
def cons2(x):
return x # identity, acts as bounds x > 0
x0 = np.array([2, 0])
cons_list = [fun, cons1, cons2]
xsol = [1.4, 1.7]
fsol = 0.8
# testing fmin_cobyla
sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5)
assert_allclose(sol, xsol, atol=1e-4)
sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5)
assert_allclose(fun(sol), 1, atol=1e-4)
# testing minimize
constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list]
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
assert_allclose(sol.x, xsol, atol=1e-4)
assert_(sol.success, sol.message)
assert_allclose(sol.fun, fsol, atol=1e-4)
constraints = {'type': 'ineq', 'fun': fmin}
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
assert_allclose(sol.fun, 1, atol=1e-4)
| 3,512 | 29.284483 | 76 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_linesearch.py
|
"""
Tests for line search routines
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, assert_equal, \
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_warns
from scipy._lib._numpy_compat import suppress_warnings
import scipy.optimize.linesearch as ls
from scipy.optimize.linesearch import LineSearchWarning
import numpy as np
def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
"""
Check that strong Wolfe conditions apply
"""
phi1 = phi(s)
phi0 = phi(0)
derphi0 = derphi(0)
derphi1 = derphi(s)
msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % (
s, phi0, phi1, derphi0, derphi1, err_msg)
assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg)
assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg)
def assert_armijo(s, phi, c1=1e-4, err_msg=""):
"""
Check that Armijo condition applies
"""
phi1 = phi(s)
phi0 = phi(0)
msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg)
assert_(phi1 <= (1 - c1*s)*phi0, msg)
def assert_line_wolfe(x, p, s, f, fprime, **kw):
assert_wolfe(s, phi=lambda sp: f(x + p*sp),
derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)
def assert_line_armijo(x, p, s, f, **kw):
assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)
def assert_fp_equal(x, y, err_msg="", nulp=50):
"""Assert two arrays are equal, up to some floating-point rounding error"""
try:
assert_array_almost_equal_nulp(x, y, nulp)
except AssertionError as e:
raise AssertionError("%s\n%s" % (e, err_msg))
class TestLineSearch(object):
# -- scalar functions; must have dphi(0.) < 0
def _scalar_func_1(self, s):
self.fcount += 1
p = -s - s**3 + s**4
dp = -1 - 3*s**2 + 4*s**3
return p, dp
def _scalar_func_2(self, s):
self.fcount += 1
p = np.exp(-4*s) + s**2
dp = -4*np.exp(-4*s) + 2*s
return p, dp
def _scalar_func_3(self, s):
self.fcount += 1
p = -np.sin(10*s)
dp = -10*np.cos(10*s)
return p, dp
# -- n-d functions
def _line_func_1(self, x):
self.fcount += 1
f = np.dot(x, x)
df = 2*x
return f, df
def _line_func_2(self, x):
self.fcount += 1
f = np.dot(x, np.dot(self.A, x)) + 1
df = np.dot(self.A + self.A.T, x)
return f, df
# --
def setup_method(self):
self.scalar_funcs = []
self.line_funcs = []
self.N = 20
self.fcount = 0
def bind_index(func, idx):
# Remember Python's closure semantics!
return lambda *a, **kw: func(*a, **kw)[idx]
for name in sorted(dir(self)):
if name.startswith('_scalar_func_'):
value = getattr(self, name)
self.scalar_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
elif name.startswith('_line_func_'):
value = getattr(self, name)
self.line_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
np.random.seed(1234)
self.A = np.random.randn(self.N, self.N)
def scalar_iter(self):
for name, phi, derphi in self.scalar_funcs:
for old_phi0 in np.random.randn(3):
yield name, phi, derphi, old_phi0
def line_iter(self):
for name, f, fprime in self.line_funcs:
k = 0
while k < 9:
x = np.random.randn(self.N)
p = np.random.randn(self.N)
if np.dot(p, fprime(x)) >= 0:
# always pick a descent direction
continue
k += 1
old_fv = float(np.random.randn())
yield name, f, fprime, x, p, old_fv
# -- Generic scalar searches
def test_scalar_search_wolfe1(self):
c = 0
for name, phi, derphi, old_phi0 in self.scalar_iter():
c += 1
s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),
old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
assert_wolfe(s, phi, derphi, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_scalar_search_wolfe2(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
phi, derphi, phi(0), old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
if derphi1 is not None:
assert_fp_equal(derphi1, derphi(s), name)
assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0))
def test_scalar_search_armijo(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))
assert_fp_equal(phi1, phi(s), name)
assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0))
# -- Generic line searches
def test_line_search_wolfe1(self):
c = 0
smax = 100
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount, fc+gc)
assert_fp_equal(ofv, f(x))
if s is None:
continue
assert_fp_equal(fv, f(x + s*p))
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2(self):
c = 0
smax = 512
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
with suppress_warnings() as sup:
sup.filter(LineSearchWarning,
"The line search algorithm could not find a solution")
sup.filter(LineSearchWarning,
"The line search algorithm did not converge")
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount, fc+gc)
assert_fp_equal(ofv, f(x))
assert_fp_equal(fv, f(x + s*p))
if gv is not None:
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2_bounds(self):
# See gh-7475
# For this f and p, starting at a point on axis 0, the strong Wolfe
# condition 2 is met if and only if the step length s satisfies
# |x + s| <= c2 * |x|
f = lambda x: np.dot(x, x)
fp = lambda x: 2 * x
p = np.array([1, 0])
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
x = -60 * p
c2 = 0.5
s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
assert_line_wolfe(x, p, s, f, fp)
s, _, _, _, _, _ = assert_warns(LineSearchWarning,
ls.line_search_wolfe2, f, fp, x, p,
amax=29, c2=c2)
assert_(s is None)
# s=30 will only be tried on the 6th iteration, so this won't converge
assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,
c2=c2, maxiter=5)
def test_line_search_armijo(self):
c = 0
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)
c += 1
assert_equal(self.fcount, fc)
assert_fp_equal(fv, f(x + s*p))
assert_line_armijo(x, p, s, f, err_msg=name)
assert_(c >= 9)
# -- More specific tests
def test_armijo_terminate_1(self):
# Armijo should evaluate the function only once if the trial step
# is already suitable
count = [0]
def phi(s):
count[0] += 1
return -s + 0.01*s**2
s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)
assert_equal(s, 1)
assert_equal(count[0], 2)
assert_armijo(s, phi)
def test_wolfe_terminate(self):
# wolfe1 and wolfe2 should also evaluate the function only a few
# times if the trial step is already suitable
def phi(s):
count[0] += 1
return -s + 0.05*s**2
def derphi(s):
count[0] += 1
return -1 + 0.05*2*s
for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
count = [0]
r = func(phi, derphi, phi(0), None, derphi(0))
assert_(r[0] is not None, (r, func))
assert_(count[0] <= 2 + 2, (count, func))
assert_wolfe(r[0], phi, derphi, err_msg=str(func))
| 9,849 | 33.683099 | 81 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_allclose
import scipy.linalg
from scipy.optimize import minimize
def test_1():
def f(x):
return x**4, 4*x**3
for gtol in [1e-8, 1e-12, 1e-20]:
for maxcor in range(20, 35):
result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20,
options={'gtol': gtol, 'maxcor': maxcor})
H1 = result.hess_inv(np.array([1])).reshape(1,1)
H2 = result.hess_inv.todense()
assert_allclose(H1, H2)
def test_2():
H0 = [[3, 0], [1, 2]]
def f(x):
return np.dot(x, np.dot(scipy.linalg.inv(H0), x))
result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20])
result2 = minimize(fun=f, method='BFGS', x0=[10, 20])
H1 = result1.hess_inv.todense()
H2 = np.vstack((
result1.hess_inv(np.array([1, 0])),
result1.hess_inv(np.array([0, 1]))))
assert_allclose(
result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1),
result1.hess_inv(np.array([1, 0])))
assert_allclose(H1, H2)
assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03)
| 1,212 | 25.369565 | 72 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_least_squares.py
|
from __future__ import division
from itertools import product
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose,
assert_equal)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import issparse, lil_matrix
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import least_squares
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
from scipy.optimize._lsq.common import EPS, make_strictly_feasible
def fun_trivial(x, a=0):
return (x - a)**2 + 5.0
def jac_trivial(x, a=0.0):
return 2 * (x - a)
def fun_2d_trivial(x):
return np.array([x[0], x[1]])
def jac_2d_trivial(x):
return np.identity(2)
def fun_rosenbrock(x):
return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
def jac_rosenbrock(x):
return np.array([
[-20 * x[0], 10],
[-1, 0]
])
def jac_rosenbrock_bad_dim(x):
return np.array([
[-20 * x[0], 10],
[-1, 0],
[0.0, 0.0]
])
def fun_rosenbrock_cropped(x):
return fun_rosenbrock(x)[0]
def jac_rosenbrock_cropped(x):
return jac_rosenbrock(x)[0]
# When x is 1-d array, return is 2-d array.
def fun_wrong_dimensions(x):
return np.array([x, x**2, x**3])
def jac_wrong_dimensions(x, a=0.0):
return np.atleast_3d(jac_trivial(x, a=a))
def fun_bvp(x):
n = int(np.sqrt(x.shape[0]))
u = np.zeros((n + 2, n + 2))
x = x.reshape((n, n))
u[1:-1, 1:-1] = x
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
return y.ravel()
class BroydenTridiagonal(object):
def __init__(self, n=100, mode='sparse'):
np.random.seed(0)
self.n = n
self.x0 = -np.ones(n)
self.lb = np.linspace(-2, -1.5, n)
self.ub = np.linspace(-0.8, 0.0, n)
self.lb += 0.1 * np.random.randn(n)
self.ub += 0.1 * np.random.randn(n)
self.x0 += 0.1 * np.random.randn(n)
self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
if mode == 'sparse':
self.sparsity = lil_matrix((n, n), dtype=int)
i = np.arange(n)
self.sparsity[i, i] = 1
i = np.arange(1, n)
self.sparsity[i, i - 1] = 1
i = np.arange(n - 1)
self.sparsity[i, i + 1] = 1
self.jac = self._jac
elif mode == 'operator':
self.jac = lambda x: aslinearoperator(self._jac(x))
elif mode == 'dense':
self.sparsity = None
self.jac = lambda x: self._jac(x).toarray()
else:
assert_(False)
def fun(self, x):
f = (3 - x) * x + 1
f[1:] -= x[:-1]
f[:-1] -= 2 * x[1:]
return f
def _jac(self, x):
J = lil_matrix((self.n, self.n))
i = np.arange(self.n)
J[i, i] = 3 - 2 * x
i = np.arange(1, self.n)
J[i, i - 1] = -1
i = np.arange(self.n - 1)
J[i, i + 1] = -2
return J
class ExponentialFittingProblem(object):
"""Provide data and function for exponential fitting in the form
y = a + exp(b * x) + noise."""
def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
n_points=11, random_seed=None):
np.random.seed(random_seed)
self.m = n_points
self.n = 2
self.p0 = np.zeros(2)
self.x = np.linspace(x_range[0], x_range[1], n_points)
self.y = a + np.exp(b * self.x)
self.y += noise * np.random.randn(self.m)
outliers = np.random.randint(0, self.m, n_outliers)
self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
self.p_opt = np.array([a, b])
def fun(self, p):
return p[0] + np.exp(p[1] * self.x) - self.y
def jac(self, p):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = self.x * np.exp(p[1] * self.x)
return J
def cubic_soft_l1(z):
rho = np.empty((3, z.size))
t = 1 + z
rho[0] = 3 * (t**(1/3) - 1)
rho[1] = t ** (-2/3)
rho[2] = -2/3 * t**(-5/3)
return rho
LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
class BaseMixin(object):
def test_basic(self):
# Test that the basic calling sequence works.
res = least_squares(fun_trivial, 2., method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.fun, fun_trivial(res.x))
def test_args_kwargs(self):
# Test that args and kwargs are passed correctly to the functions.
a = 3.0
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
method=self.method)
res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_allclose(res1.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
args=(3, 4,), method=self.method)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
kwargs={'kaboom': 3}, method=self.method)
def test_jac_options(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
method=self.method)
def test_nfev_options(self):
for max_nfev in [None, 20]:
res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
def test_x_scale_options(self):
for x_scale in [1.0, np.array([0.5]), 'jac']:
res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
assert_allclose(res.x, 0)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale='auto', method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=-1.0, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=None, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=1.0+2.0j, method=self.method)
def test_diff_step(self):
# res1 and res2 should be equivalent.
# res2 and res3 should be different.
res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
method=self.method)
res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
method=self.method)
res3 = least_squares(fun_trivial, 2.0,
diff_step=None, method=self.method)
assert_allclose(res1.x, 0, atol=1e-4)
assert_allclose(res2.x, 0, atol=1e-4)
assert_allclose(res3.x, 0, atol=1e-4)
assert_equal(res1.x, res2.x)
assert_equal(res1.nfev, res2.nfev)
assert_(res2.nfev != res3.nfev)
def test_incorrect_options_usage(self):
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'no_such_option': 100})
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'max_nfev': 100})
def test_full_result(self):
# MINPACK doesn't work very well with factor=100 on this problem,
# thus using low 'atol'.
res = least_squares(fun_trivial, 2.0, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.cost, 12.5)
assert_allclose(res.fun, 5)
assert_allclose(res.jac, 0, atol=1e-4)
assert_allclose(res.grad, 0, atol=1e-2)
assert_allclose(res.optimality, 0, atol=1e-2)
assert_equal(res.active_mask, 0)
if self.method == 'lm':
assert_(res.nfev < 30)
assert_(res.njev is None)
else:
assert_(res.nfev < 10)
assert_(res.njev < 10)
assert_(res.status > 0)
assert_(res.success)
def test_full_result_single_fev(self):
# MINPACK checks the number of nfev after the iteration,
# so it's hard to tell what he is going to compute.
if self.method == 'lm':
return
res = least_squares(fun_trivial, 2.0, method=self.method,
max_nfev=1)
assert_equal(res.x, np.array([2]))
assert_equal(res.cost, 40.5)
assert_equal(res.fun, np.array([9]))
assert_equal(res.jac, np.array([[4]]))
assert_equal(res.grad, np.array([36]))
assert_equal(res.optimality, 36)
assert_equal(res.active_mask, np.array([0]))
assert_equal(res.nfev, 1)
assert_equal(res.njev, 1)
assert_equal(res.status, 0)
assert_equal(res.success, 0)
def test_rosenbrock(self):
x0 = [-2, 1]
x_opt = [1, 1]
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.x, x_opt)
def test_rosenbrock_cropped(self):
x0 = [-2, 1]
if self.method == 'lm':
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
x0, method='lm')
else:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
res = least_squares(
fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.cost, 0, atol=1e-14)
def test_fun_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_wrong_dimensions,
2.0, method=self.method)
def test_jac_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, jac_wrong_dimensions, method=self.method)
def test_fun_and_jac_inconsistent_dimensions(self):
x0 = [1, 2]
assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
jac_rosenbrock_bad_dim, method=self.method)
def test_x0_multidimensional(self):
x0 = np.ones(4).reshape(2, 2)
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_scalar(self):
x0 = 2.0 + 0.0*1j
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_array(self):
x0 = [1.0, 2.0 + 0.0*1j]
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_bvp(self):
# This test was introduced with fix #5556. It turned out that
# dogbox solver had a bug with trust-region radius update, which
# could block its progress and create an infinite loop. And this
# discrete boundary value problem is the one which triggers it.
n = 10
x0 = np.ones(n**2)
if self.method == 'lm':
max_nfev = 5000 # To account for Jacobian estimation.
else:
max_nfev = 100
res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
max_nfev=max_nfev)
assert_(res.nfev < max_nfev)
assert_(res.cost < 0.5)
class BoundsMixin(object):
def test_inconsistent(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(10.0, 0.0), method=self.method)
def test_infeasible(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(3., 4), method=self.method)
def test_wrong_number(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.,
bounds=(1., 2, 3), method=self.method)
def test_inconsistent_shape(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(1.0, [2.0, 3.0]), method=self.method)
# 1-D array wont't be broadcasted
assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
bounds=([0.0], [3.0, 4.0]), method=self.method)
def test_in_bounds(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(-1.0, 3.0), method=self.method)
assert_allclose(res.x, 0.0, atol=1e-4)
assert_equal(res.active_mask, [0])
assert_(-1 <= res.x <= 3)
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(0.5, 3.0), method=self.method)
assert_allclose(res.x, 0.5, atol=1e-4)
assert_equal(res.active_mask, [-1])
assert_(0.5 <= res.x <= 3)
def test_bounds_shape(self):
for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
x0 = [1.0, 1.0]
res = least_squares(fun_2d_trivial, x0, jac=jac)
assert_allclose(res.x, [0.0, 0.0])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=(0.5, [2.0, 2.0]), method=self.method)
assert_allclose(res.x, [0.5, 0.5])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=([0.3, 0.2], 3.0), method=self.method)
assert_allclose(res.x, [0.3, 0.2])
res = least_squares(
fun_2d_trivial, x0, jac=jac, bounds=([-1, 0.5], [1.0, 3.0]),
method=self.method)
assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
def test_rosenbrock_bounds(self):
x0_1 = np.array([-2.0, 1.0])
x0_2 = np.array([2.0, 2.0])
x0_3 = np.array([-2.0, 2.0])
x0_4 = np.array([0.0, 2.0])
x0_5 = np.array([-1.2, 1.0])
problems = [
(x0_1, ([-np.inf, -1.5], np.inf)),
(x0_2, ([-np.inf, 1.5], np.inf)),
(x0_3, ([-np.inf, 1.5], np.inf)),
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
(x0_5, ([-50.0, 0.0], [0.5, 100]))
]
for x0, bounds in problems:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, [1.0, 0.5], 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, bounds,
x_scale=x_scale, tr_solver=tr_solver,
method=self.method)
assert_allclose(res.optimality, 0.0, atol=1e-5)
class SparseMixin(object):
def test_exact_tr_solver(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='exact', method=self.method)
assert_raises(ValueError, least_squares, p.fun, p.x0,
tr_solver='exact', jac_sparsity=p.sparsity,
method=self.method)
def test_equivalence(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(
sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(
dense.fun, dense.x0, jac=sparse.jac,
method=self.method)
assert_equal(res_sparse.nfev, res_dense.nfev)
assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
def test_tr_options(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
tr_options={'btol': 1e-10})
assert_allclose(res.cost, 0, atol=1e-20)
def test_wrong_parameters(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='best', method=self.method)
assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
tr_solver='lsmr', tr_options={'tol': 1e-10})
def test_solver_selection(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
method=self.method)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_(issparse(res_sparse.jac))
assert_(isinstance(res_dense.jac, np.ndarray))
def test_numerical_jac(self):
p = BroydenTridiagonal()
for jac in ['2-point', '3-point', 'cs']:
res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
res_sparse = least_squares(
p.fun, p.x0, jac,method=self.method,
jac_sparsity=p.sparsity)
assert_equal(res_dense.nfev, res_sparse.nfev)
assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
def test_with_bounds(self):
p = BroydenTridiagonal()
for jac, jac_sparsity in product(
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
res_1 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
method=self.method,jac_sparsity=jac_sparsity)
res_2 = least_squares(
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
res_3 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
assert_allclose(res_1.optimality, 0, atol=1e-10)
assert_allclose(res_2.optimality, 0, atol=1e-10)
assert_allclose(res_3.optimality, 0, atol=1e-10)
def test_wrong_jac_sparsity(self):
p = BroydenTridiagonal()
sparsity = p.sparsity[:-1]
assert_raises(ValueError, least_squares, p.fun, p.x0,
jac_sparsity=sparsity, method=self.method)
def test_linear_operator(self):
p = BroydenTridiagonal(mode='operator')
res = least_squares(p.fun, p.x0, p.jac, method=self.method)
assert_allclose(res.cost, 0.0, atol=1e-20)
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, tr_solver='exact')
def test_x_scale_jac_scale(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
x_scale='jac')
assert_allclose(res.cost, 0.0, atol=1e-20)
p = BroydenTridiagonal(mode='operator')
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, x_scale='jac')
class LossFunctionMixin(object):
def test_options(self):
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_allclose(res.x, 0, atol=1e-15)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
loss='hinge', method=self.method)
def test_fun(self):
# Test that res.fun is actual residuals, and not modified by loss
# function stuff.
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_equal(res.fun, fun_trivial(res.x))
def test_grad(self):
# Test that res.grad is true gradient of loss function at the
# solution. Use max_nfev = 1, to avoid reaching minimum.
x = np.array([2.0]) # res.x will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x * (x**2 + 5))
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x)
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
def test_jac(self):
# Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
# of Hessian. This approximation is computed by doubly differentiating
# the cost function and dropping the part containing second derivative
# of f. For a scalar function it is computed as
# H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
# brackets is less than EPS it is replaced by EPS. Here we check
# against the root of H.
x = 2.0 # res.x will be this.
f = x**2 + 5 # res.fun will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x)
# For `huber` loss the Jacobian correction is identically zero
# in outlier region, in such cases it is modified to be equal EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x * EPS**0.5)
# Now let's apply `loss_scale` to turn the residual into an inlier.
# The loss function becomes linear.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
f_scale=10, max_nfev=1)
assert_equal(res.jac, 2 * x)
# 'soft_l1' always gives a positive scaling.
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
# For 'cauchy' the correction term turns out to be negative, and it
# replaced by EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Now use scaling to turn the residual to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
f_scale=10, max_nfev=1, method=self.method)
fs = f / 10
assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
# 'arctan' gives an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
f_scale=20.0, max_nfev=1, method=self.method)
fs = f / 20
assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
# cubic_soft_l1 will give an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial,
loss=cubic_soft_l1, f_scale=6, max_nfev=1)
fs = f / 6
assert_allclose(res.jac,
2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
def test_robustness(self):
for noise in [0.1, 1.0]:
p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
for jac in ['2-point', '3-point', 'cs', p.jac]:
res_lsq = least_squares(p.fun, p.p0, jac=jac,
method=self.method)
assert_allclose(res_lsq.optimality, 0, atol=1e-2)
for loss in LOSSES:
if loss == 'linear':
continue
res_robust = least_squares(
p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
method=self.method)
assert_allclose(res_robust.optimality, 0, atol=1e-2)
assert_(norm(res_robust.x - p.p_opt) <
norm(res_lsq.x - p.p_opt))
class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'dogbox'
class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'trf'
def test_lsmr_regularization(self):
p = BroydenTridiagonal()
for regularize in [True, False]:
res = least_squares(p.fun, p.x0, p.jac, method='trf',
tr_options={'regularize': regularize})
assert_allclose(res.cost, 0, atol=1e-20)
class TestLM(BaseMixin):
method = 'lm'
def test_bounds_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, bounds=(-3.0, 3.0), method='lm')
def test_m_less_n_not_supported(self):
x0 = [-2, 1]
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
method='lm')
def test_sparse_not_supported(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_jac_sparsity_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
jac_sparsity=[1], method='lm')
def test_LinearOperator_not_supported(self):
p = BroydenTridiagonal(mode="operator")
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_loss(self):
res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
assert_allclose(res.x, 0.0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method='lm', loss='huber')
def test_basic():
# test that 'method' arg is really optional
res = least_squares(fun_trivial, 2.0)
assert_allclose(res.x, 0, atol=1e-10)
| 28,578 | 37.830163 | 96 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_minimize_constrained.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from scipy.linalg import block_diag
from scipy.sparse import csc_matrix
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_less)
from pytest import raises
from scipy.optimize import (NonlinearConstraint,
LinearConstraint,
Bounds,
minimize,
BFGS,
SR1)
from scipy._lib._numpy_compat import suppress_warnings
class Maratos:
"""Problem 15.4 from Nocedal and Wright
The following optimization problem:
minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
Subject to: x[0]**2 + x[1]**2 - 1 = 0
"""
def __init__(self, degrees=60, constr_jac=None, constr_hess=None):
rads = degrees/180*np.pi
self.x0 = [np.cos(rads), np.sin(rads)]
self.x_opt = np.array([1.0, 0.0])
self.constr_jac = constr_jac
self.constr_hess = constr_hess
self.bounds = None
def fun(self, x):
return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
def grad(self, x):
return np.array([4*x[0]-1, 4*x[1]])
def hess(self, x):
return 4*np.eye(2)
@property
def constr(self):
def fun(x):
return x[0]**2 + x[1]**2
if self.constr_jac is None:
def jac(x):
return [[2*x[0], 2*x[1]]]
else:
jac = self.constr_jac
if self.constr_hess is None:
def hess(x, v):
return 2*v[0]*np.eye(2)
else:
hess = self.constr_hess
return NonlinearConstraint(fun, 1, 1, jac, hess)
class MaratosTestArgs:
"""Problem 15.4 from Nocedal and Wright
The following optimization problem:
minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
Subject to: x[0]**2 + x[1]**2 - 1 = 0
"""
def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None):
rads = degrees/180*np.pi
self.x0 = [np.cos(rads), np.sin(rads)]
self.x_opt = np.array([1.0, 0.0])
self.constr_jac = constr_jac
self.constr_hess = constr_hess
self.a = a
self.b = b
self.bounds = None
def _test_args(self, a, b):
if self.a != a or self.b != b:
raise ValueError()
def fun(self, x, a, b):
self._test_args(a, b)
return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
def grad(self, x, a, b):
self._test_args(a, b)
return np.array([4*x[0]-1, 4*x[1]])
def hess(self, x, a, b):
self._test_args(a, b)
return 4*np.eye(2)
@property
def constr(self):
def fun(x):
return x[0]**2 + x[1]**2
if self.constr_jac is None:
def jac(x):
return [[4*x[0], 4*x[1]]]
else:
jac = self.constr_jac
if self.constr_hess is None:
def hess(x, v):
return 2*v[0]*np.eye(2)
else:
hess = self.constr_hess
return NonlinearConstraint(fun, 1, 1, jac, hess)
class MaratosGradInFunc:
"""Problem 15.4 from Nocedal and Wright
The following optimization problem:
minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
Subject to: x[0]**2 + x[1]**2 - 1 = 0
"""
def __init__(self, degrees=60, constr_jac=None, constr_hess=None):
rads = degrees/180*np.pi
self.x0 = [np.cos(rads), np.sin(rads)]
self.x_opt = np.array([1.0, 0.0])
self.constr_jac = constr_jac
self.constr_hess = constr_hess
self.bounds = None
def fun(self, x):
return (2*(x[0]**2 + x[1]**2 - 1) - x[0],
np.array([4*x[0]-1, 4*x[1]]))
@property
def grad(self):
return True
def hess(self, x):
return 4*np.eye(2)
@property
def constr(self):
def fun(x):
return x[0]**2 + x[1]**2
if self.constr_jac is None:
def jac(x):
return [[4*x[0], 4*x[1]]]
else:
jac = self.constr_jac
if self.constr_hess is None:
def hess(x, v):
return 2*v[0]*np.eye(2)
else:
hess = self.constr_hess
return NonlinearConstraint(fun, 1, 1, jac, hess)
class HyperbolicIneq:
"""Problem 15.1 from Nocedal and Wright
The following optimization problem:
minimize 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2
Subject to: 1/(x[0] + 1) - x[1] >= 1/4
x[0] >= 0
x[1] >= 0
"""
def __init__(self, constr_jac=None, constr_hess=None):
self.x0 = [0, 0]
self.x_opt = [1.952823, 0.088659]
self.constr_jac = constr_jac
self.constr_hess = constr_hess
self.bounds = Bounds(0, np.inf)
def fun(self, x):
return 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2
def grad(self, x):
return [x[0] - 2, x[1] - 1/2]
def hess(self, x):
return np.eye(2)
@property
def constr(self):
def fun(x):
return 1/(x[0] + 1) - x[1]
if self.constr_jac is None:
def jac(x):
return [[-1/(x[0] + 1)**2, -1]]
else:
jac = self.constr_jac
if self.constr_hess is None:
def hess(x, v):
return 2*v[0]*np.array([[1/(x[0] + 1)**3, 0],
[0, 0]])
else:
hess = self.constr_hess
return NonlinearConstraint(fun, 0.25, np.inf, jac, hess)
class Rosenbrock:
"""Rosenbrock function.
The following optimization problem:
minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
"""
def __init__(self, n=2, random_state=0):
rng = np.random.RandomState(random_state)
self.x0 = rng.uniform(-1, 1, n)
self.x_opt = np.ones(n)
self.bounds = None
def fun(self, x):
x = np.asarray(x)
r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def grad(self, x):
x = np.asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = np.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def hess(self, x):
x = np.atleast_1d(x)
H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
diagonal = np.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + np.diag(diagonal)
return H
@property
def constr(self):
return ()
class IneqRosenbrock(Rosenbrock):
"""Rosenbrock subject to inequality constraints.
The following optimization problem:
minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
subject to: x[0] + 2 x[1] <= 1
Taken from matlab ``fmincon`` documentation.
"""
def __init__(self, random_state=0):
Rosenbrock.__init__(self, 2, random_state)
self.x0 = [-1, -0.5]
self.x_opt = [0.5022, 0.2489]
self.bounds = None
@property
def constr(self):
A = [[1, 2]]
b = 1
return LinearConstraint(A, -np.inf, b)
class EqIneqRosenbrock(Rosenbrock):
"""Rosenbrock subject to equality and inequality constraints.
The following optimization problem:
minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
subject to: x[0] + 2 x[1] <= 1
2 x[0] + x[1] = 1
Taken from matlab ``fimincon`` documentation.
"""
def __init__(self, random_state=0):
Rosenbrock.__init__(self, 2, random_state)
self.x0 = [-1, -0.5]
self.x_opt = [0.41494, 0.17011]
self.bounds = None
@property
def constr(self):
A_ineq = [[1, 2]]
b_ineq = 1
A_eq = [[2, 1]]
b_eq = 1
return (LinearConstraint(A_ineq, -np.inf, b_ineq),
LinearConstraint(A_eq, b_eq, b_eq))
class Elec:
"""Distribution of electrons on a sphere.
Problem no 2 from COPS collection [2]_. Find
the equilibrium state distribution (of minimal
potential) of the electrons positioned on a
conducting sphere.
References
----------
.. [1] E. D. Dolan, J. J. Mor\'{e}, and T. S. Munson,
"Benchmarking optimization software with COPS 3.0.",
Argonne National Lab., Argonne, IL (US), 2004.
"""
def __init__(self, n_electrons=200, random_state=0,
constr_jac=None, constr_hess=None):
self.n_electrons = n_electrons
self.rng = np.random.RandomState(random_state)
# Initial Guess
phi = self.rng.uniform(0, 2 * np.pi, self.n_electrons)
theta = self.rng.uniform(-np.pi, np.pi, self.n_electrons)
x = np.cos(theta) * np.cos(phi)
y = np.cos(theta) * np.sin(phi)
z = np.sin(theta)
self.x0 = np.hstack((x, y, z))
self.x_opt = None
self.constr_jac = constr_jac
self.constr_hess = constr_hess
self.bounds = None
def _get_cordinates(self, x):
x_coord = x[:self.n_electrons]
y_coord = x[self.n_electrons:2 * self.n_electrons]
z_coord = x[2 * self.n_electrons:]
return x_coord, y_coord, z_coord
def _compute_coordinate_deltas(self, x):
x_coord, y_coord, z_coord = self._get_cordinates(x)
dx = x_coord[:, None] - x_coord
dy = y_coord[:, None] - y_coord
dz = z_coord[:, None] - z_coord
return dx, dy, dz
def fun(self, x):
dx, dy, dz = self._compute_coordinate_deltas(x)
with np.errstate(divide='ignore'):
dm1 = (dx**2 + dy**2 + dz**2) ** -0.5
dm1[np.diag_indices_from(dm1)] = 0
return 0.5 * np.sum(dm1)
def grad(self, x):
dx, dy, dz = self._compute_coordinate_deltas(x)
with np.errstate(divide='ignore'):
dm3 = (dx**2 + dy**2 + dz**2) ** -1.5
dm3[np.diag_indices_from(dm3)] = 0
grad_x = -np.sum(dx * dm3, axis=1)
grad_y = -np.sum(dy * dm3, axis=1)
grad_z = -np.sum(dz * dm3, axis=1)
return np.hstack((grad_x, grad_y, grad_z))
def hess(self, x):
dx, dy, dz = self._compute_coordinate_deltas(x)
d = (dx**2 + dy**2 + dz**2) ** 0.5
with np.errstate(divide='ignore'):
dm3 = d ** -3
dm5 = d ** -5
i = np.arange(self.n_electrons)
dm3[i, i] = 0
dm5[i, i] = 0
Hxx = dm3 - 3 * dx**2 * dm5
Hxx[i, i] = -np.sum(Hxx, axis=1)
Hxy = -3 * dx * dy * dm5
Hxy[i, i] = -np.sum(Hxy, axis=1)
Hxz = -3 * dx * dz * dm5
Hxz[i, i] = -np.sum(Hxz, axis=1)
Hyy = dm3 - 3 * dy**2 * dm5
Hyy[i, i] = -np.sum(Hyy, axis=1)
Hyz = -3 * dy * dz * dm5
Hyz[i, i] = -np.sum(Hyz, axis=1)
Hzz = dm3 - 3 * dz**2 * dm5
Hzz[i, i] = -np.sum(Hzz, axis=1)
H = np.vstack((
np.hstack((Hxx, Hxy, Hxz)),
np.hstack((Hxy, Hyy, Hyz)),
np.hstack((Hxz, Hyz, Hzz))
))
return H
@property
def constr(self):
def fun(x):
x_coord, y_coord, z_coord = self._get_cordinates(x)
return x_coord**2 + y_coord**2 + z_coord**2 - 1
if self.constr_jac is None:
def jac(x):
x_coord, y_coord, z_coord = self._get_cordinates(x)
Jx = 2 * np.diag(x_coord)
Jy = 2 * np.diag(y_coord)
Jz = 2 * np.diag(z_coord)
return csc_matrix(np.hstack((Jx, Jy, Jz)))
else:
jac = self.constr_jac
if self.constr_hess is None:
def hess(x, v):
D = 2 * np.diag(v)
return block_diag(D, D, D)
else:
hess = self.constr_hess
return NonlinearConstraint(fun, -np.inf, 0, jac, hess)
class TestTrustRegionConstr(TestCase):
def test_list_of_problems(self):
list_of_problems = [Maratos(),
Maratos(constr_hess='2-point'),
Maratos(constr_hess=SR1()),
Maratos(constr_jac='2-point', constr_hess=SR1()),
MaratosGradInFunc(),
HyperbolicIneq(),
HyperbolicIneq(constr_hess='3-point'),
HyperbolicIneq(constr_hess=BFGS()),
HyperbolicIneq(constr_jac='3-point',
constr_hess=BFGS()),
Rosenbrock(),
IneqRosenbrock(),
EqIneqRosenbrock(),
Elec(n_electrons=2),
Elec(n_electrons=2, constr_hess='2-point'),
Elec(n_electrons=2, constr_hess=SR1()),
Elec(n_electrons=2, constr_jac='3-point',
constr_hess=SR1())]
for prob in list_of_problems:
for grad in (prob.grad, '3-point', False):
for hess in (prob.hess,
'3-point',
SR1(),
BFGS(exception_strategy='damp_update'),
BFGS(exception_strategy='skip_update')):
# Remove exceptions
if grad in ('2-point', '3-point', 'cs', False) and \
hess in ('2-point', '3-point', 'cs'):
continue
if prob.grad is True and grad in ('3-point', False):
continue
with suppress_warnings() as sup:
sup.filter(UserWarning, "delta_grad == 0.0")
result = minimize(prob.fun, prob.x0,
method='trust-constr',
jac=grad, hess=hess,
bounds=prob.bounds,
constraints=prob.constr)
if prob.x_opt is not None:
assert_array_almost_equal(result.x, prob.x_opt, decimal=5)
# gtol
if result.status == 1:
assert_array_less(result.optimality, 1e-8)
# xtol
if result.status == 2:
assert_array_less(result.tr_radius, 1e-8)
if result.method == "tr_interior_point":
assert_array_less(result.barrier_parameter, 1e-8)
# max iter
if result.status in (0, 3):
raise RuntimeError("Invalid termination condition.")
def test_no_constraints(self):
prob = Rosenbrock()
result = minimize(prob.fun, prob.x0,
method='trust-constr',
jac=prob.grad, hess=prob.hess)
result1 = minimize(prob.fun, prob.x0,
method='L-BFGS-B',
jac='2-point')
with pytest.warns(UserWarning):
result2 = minimize(prob.fun, prob.x0,
method='L-BFGS-B',
jac='3-point')
assert_array_almost_equal(result.x, prob.x_opt, decimal=5)
assert_array_almost_equal(result1.x, prob.x_opt, decimal=5)
assert_array_almost_equal(result2.x, prob.x_opt, decimal=5)
def test_hessp(self):
prob = Maratos()
def hessp(x, p):
H = prob.hess(x)
return H.dot(p)
result = minimize(prob.fun, prob.x0,
method='trust-constr',
jac=prob.grad, hessp=hessp,
bounds=prob.bounds,
constraints=prob.constr)
if prob.x_opt is not None:
assert_array_almost_equal(result.x, prob.x_opt, decimal=2)
# gtol
if result.status == 1:
assert_array_less(result.optimality, 1e-8)
# xtol
if result.status == 2:
assert_array_less(result.tr_radius, 1e-8)
if result.method == "tr_interior_point":
assert_array_less(result.barrier_parameter, 1e-8)
# max iter
if result.status in (0, 3):
raise RuntimeError("Invalid termination condition.")
def test_args(self):
prob = MaratosTestArgs("a", 234)
result = minimize(prob.fun, prob.x0, ("a", 234),
method='trust-constr',
jac=prob.grad, hess=prob.hess,
bounds=prob.bounds,
constraints=prob.constr)
if prob.x_opt is not None:
assert_array_almost_equal(result.x, prob.x_opt, decimal=2)
# gtol
if result.status == 1:
assert_array_less(result.optimality, 1e-8)
# xtol
if result.status == 2:
assert_array_less(result.tr_radius, 1e-8)
if result.method == "tr_interior_point":
assert_array_less(result.barrier_parameter, 1e-8)
# max iter
if result.status in (0, 3):
raise RuntimeError("Invalid termination condition.")
def test_raise_exception(self):
prob = Maratos()
raises(ValueError, minimize, prob.fun, prob.x0, method='trust-constr',
jac='2-point', hess='2-point', constraints=prob.constr)
| 17,981 | 30.826549 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_zeros.py
|
from __future__ import division, print_function, absolute_import
from math import sqrt, exp, sin, cos
from numpy.testing import (assert_warns, assert_,
assert_allclose,
assert_equal)
from numpy import finfo
from scipy.optimize import zeros as cc
from scipy.optimize import zeros
# Import testing parameters
from scipy.optimize._tstutils import functions, fstrings
class TestBasic(object):
def run_check(self, method, name):
a = .5
b = sqrt(3)
xtol = 4*finfo(float).eps
rtol = 4*finfo(float).eps
for function, fname in zip(functions, fstrings):
zero, r = method(function, a, b, xtol=xtol, rtol=rtol,
full_output=True)
assert_(r.converged)
assert_allclose(zero, 1.0, atol=xtol, rtol=rtol,
err_msg='method %s, function %s' % (name, fname))
def test_bisect(self):
self.run_check(cc.bisect, 'bisect')
def test_ridder(self):
self.run_check(cc.ridder, 'ridder')
def test_brentq(self):
self.run_check(cc.brentq, 'brentq')
def test_brenth(self):
self.run_check(cc.brenth, 'brenth')
def test_newton(self):
f1 = lambda x: x**2 - 2*x - 1
f1_1 = lambda x: 2*x - 2
f1_2 = lambda x: 2.0 + 0*x
f2 = lambda x: exp(x) - cos(x)
f2_1 = lambda x: exp(x) + sin(x)
f2_2 = lambda x: exp(x) + cos(x)
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
x = zeros.newton(f, 3, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
def test_deriv_zero_warning(self):
func = lambda x: x**2
dfunc = lambda x: 2*x
assert_warns(RuntimeWarning, cc.newton, func, 0.0, dfunc)
def test_gh_5555():
root = 0.1
def f(x):
return x - root
methods = [cc.bisect, cc.ridder]
xtol = 4*finfo(float).eps
rtol = 4*finfo(float).eps
for method in methods:
res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol)
assert_allclose(root, res, atol=xtol, rtol=rtol,
err_msg='method %s' % method.__name__)
def test_gh_5557():
# Show that without the changes in 5557 brentq and brenth might
# only achieve a tolerance of 2*(xtol + rtol*|res|).
# f linearly interpolates (0, -0.1), (0.5, -0.1), and (1,
# 0.4). The important parts are that |f(0)| < |f(1)| (so that
# brent takes 0 as the initial guess), |f(0)| < atol (so that
# brent accepts 0 as the root), and that the exact root of f lies
# more than atol away from 0 (so that brent doesn't achieve the
# desired tolerance).
def f(x):
if x < 0.5:
return -0.1
else:
return x - 0.6
atol = 0.51
rtol = 4*finfo(float).eps
methods = [cc.brentq, cc.brenth]
for method in methods:
res = method(f, 0, 1, xtol=atol, rtol=rtol)
assert_allclose(0.6, res, atol=atol, rtol=rtol)
class TestRootResults:
def test_repr(self):
r = zeros.RootResults(root=1.0,
iterations=44,
function_calls=46,
flag=0)
expected_repr = (" converged: True\n flag: 'converged'"
"\n function_calls: 46\n iterations: 44\n"
" root: 1.0")
assert_equal(repr(r), expected_repr)
def test_complex_halley():
"""Test Halley's works with complex roots"""
def f(x, *a):
return a[0] * x**2 + a[1] * x + a[2]
def f_1(x, *a):
return 2 * a[0] * x + a[1]
def f_2(x, *a):
return 2 * a[0]
z = complex(1.0, 2.0)
coeffs = (2.0, 3.0, 4.0)
y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
# (-0.75000000000000078+1.1989578808281789j)
assert_allclose(f(y, *coeffs), 0, atol=1e-6)
| 4,155 | 30.484848 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_lsq_linear.py
|
import numpy as np
from numpy.linalg import lstsq
from numpy.testing import assert_allclose, assert_equal, assert_
from pytest import raises as assert_raises
from scipy.sparse import rand
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import lsq_linear
A = np.array([
[0.171, -0.057],
[-0.049, -0.248],
[-0.166, 0.054],
])
b = np.array([0.074, 1.014, -0.383])
class BaseMixin(object):
def setup_method(self):
self.rnd = np.random.RandomState(0)
def test_dense_no_bounds(self):
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
def test_dense_bounds(self):
# Solutions for comparison are taken from MATLAB.
lb = np.array([-1, -10])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
lb = np.array([0.0, -np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
atol=1e-6)
lb = np.array([-1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.448427311733504, 0]),
atol=1e-15)
ub = np.array([np.inf, -5])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-0.105560998682388, -5]))
ub = np.array([-1, np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-1, -4.181102129483254]))
lb = np.array([0, -4])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.005236663400791, -4]))
def test_dense_rank_deficient(self):
A = np.array([[-0.307, -0.184]])
b = np.array([0.773])
lb = [-0.1, -0.1]
ub = [0.1, 0.1]
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, [-0.1, -0.1])
A = np.array([
[0.334, 0.668],
[-0.516, -1.032],
[0.192, 0.384],
])
b = np.array([-1.436, 0.135, 0.909])
lb = [0, -1]
ub = [1, -0.5]
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.optimality, 0, atol=1e-11)
def test_full_result(self):
lb = np.array([0, -4])
ub = np.array([1, 0])
res = lsq_linear(A, b, (lb, ub), method=self.method)
assert_allclose(res.x, [0.005236663400791, -4])
r = A.dot(res.x) - b
assert_allclose(res.cost, 0.5 * np.dot(r, r))
assert_allclose(res.fun, r)
assert_allclose(res.optimality, 0.0, atol=1e-12)
assert_equal(res.active_mask, [0, -1])
assert_(res.nit < 15)
assert_(res.status == 1 or res.status == 3)
assert_(isinstance(res.message, str))
assert_(res.success)
class SparseMixin(object):
def test_sparse_and_LinearOperator(self):
m = 5000
n = 1000
A = rand(m, n, random_state=0)
b = self.rnd.randn(m)
res = lsq_linear(A, b)
assert_allclose(res.optimality, 0, atol=1e-6)
A = aslinearoperator(A)
res = lsq_linear(A, b)
assert_allclose(res.optimality, 0, atol=1e-6)
def test_sparse_bounds(self):
m = 5000
n = 1000
A = rand(m, n, random_state=0)
b = self.rnd.randn(m)
lb = self.rnd.randn(n)
ub = lb + 1
res = lsq_linear(A, b, (lb, ub))
assert_allclose(res.optimality, 0.0, atol=1e-8)
res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13)
assert_allclose(res.optimality, 0.0, atol=1e-8)
res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
assert_allclose(res.optimality, 0.0, atol=1e-8)
class TestTRF(BaseMixin, SparseMixin):
method = 'trf'
lsq_solvers = ['exact', 'lsmr']
class TestBVLS(BaseMixin):
method = 'bvls'
lsq_solvers = ['exact']
| 5,022 | 32.264901 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_differentiable_functions.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_)
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize._differentiable_functions import (ScalarFunction,
VectorFunction,
LinearVectorFunction,
IdentityVectorFunction)
class ExScalarFunction:
def __init__(self):
self.nfev = 0
self.ngev = 0
self.nhev = 0
def fun(self, x):
self.nfev += 1
return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
def grad(self, x):
self.ngev += 1
return np.array([4*x[0]-1, 4*x[1]])
def hess(self, x):
self.nhev += 1
return 4*np.eye(2)
class TestScalarFunction(TestCase):
def test_finite_difference_grad(self):
ex = ExScalarFunction()
nfev = 0
ngev = 0
x0 = [1.0, 0.0]
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
ex.hess, None, (-np.inf, np.inf))
nfev += 1
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev, nfev)
approx = ScalarFunction(ex.fun, x0, (), '2-point',
ex.hess, None, (-np.inf, np.inf))
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(analit.f, approx.f)
assert_array_almost_equal(analit.g, approx.g)
x = [10, 0.3]
f_analit = analit.fun(x)
g_analit = analit.grad(x)
nfev += 1
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
f_approx = approx.fun(x)
g_approx = approx.grad(x)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(g_analit, g_approx)
x = [2.0, 1.0]
g_analit = analit.grad(x)
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
g_approx = approx.grad(x)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_almost_equal(g_analit, g_approx)
x = [2.5, 0.3]
f_analit = analit.fun(x)
g_analit = analit.grad(x)
nfev += 1
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
f_approx = approx.fun(x)
g_approx = approx.grad(x)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(g_analit, g_approx)
x = [2, 0.3]
f_analit = analit.fun(x)
g_analit = analit.grad(x)
nfev += 1
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
f_approx = approx.fun(x)
g_approx = approx.grad(x)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(g_analit, g_approx)
def test_finite_difference_hess_linear_operator(self):
ex = ExScalarFunction()
nfev = 0
ngev = 0
nhev = 0
x0 = [1.0, 0.0]
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
ex.hess, None, (-np.inf, np.inf))
nfev += 1
ngev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev, nhev)
approx = ScalarFunction(ex.fun, x0, (), ex.grad,
'2-point', None, (-np.inf, np.inf))
assert_(isinstance(approx.H, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_equal(analit.f, approx.f)
assert_array_almost_equal(analit.g, approx.g)
assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v))
nfev += 1
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.0, 1.0]
H_analit = analit.hess(x)
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
H_approx = approx.hess(x)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.1, 1.2]
H_analit = analit.hess(x)
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
H_approx = approx.hess(x)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.5, 0.3]
_ = analit.grad(x)
H_analit = analit.hess(x)
ngev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
_ = approx.grad(x)
H_approx = approx.hess(x)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [5.2, 2.3]
_ = analit.grad(x)
H_analit = analit.hess(x)
ngev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
_ = approx.grad(x)
H_approx = approx.hess(x)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
class ExVectorialFunction:
def __init__(self):
self.nfev = 0
self.njev = 0
self.nhev = 0
def fun(self, x):
self.nfev += 1
return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0],
4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]])
def jac(self, x):
self.njev += 1
return np.array([[4*x[0]-1, 4*x[1]],
[12*x[0]**2-3, 8*x[1]]])
def hess(self, x, v):
self.nhev += 1
return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0],
[0, 8]])
class TestVectorialFunction(TestCase):
def test_finite_difference_jac(self):
ex = ExVectorialFunction()
nfev = 0
njev = 0
x0 = [1.0, 0.0]
v0 = [0.0, 1.0]
analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
(-np.inf, np.inf), None)
nfev += 1
njev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev, njev)
approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None,
(-np.inf, np.inf), None)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(analit.f, approx.f)
assert_array_almost_equal(analit.J, approx.J)
x = [10, 0.3]
f_analit = analit.fun(x)
J_analit = analit.jac(x)
nfev += 1
njev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
f_approx = approx.fun(x)
J_approx = approx.jac(x)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(J_analit, J_approx, decimal=4)
x = [2.0, 1.0]
J_analit = analit.jac(x)
njev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
J_approx = approx.jac(x)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_almost_equal(J_analit, J_approx)
x = [2.5, 0.3]
f_analit = analit.fun(x)
J_analit = analit.jac(x)
nfev += 1
njev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
f_approx = approx.fun(x)
J_approx = approx.jac(x)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(J_analit, J_approx)
x = [2, 0.3]
f_analit = analit.fun(x)
J_analit = analit.jac(x)
nfev += 1
njev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
f_approx = approx.fun(x)
J_approx = approx.jac(x)
nfev += 3
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(J_analit, J_approx)
def test_finite_difference_hess_linear_operator(self):
ex = ExVectorialFunction()
nfev = 0
njev = 0
nhev = 0
x0 = [1.0, 0.0]
v0 = [1.0, 2.0]
analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
(-np.inf, np.inf), None)
nfev += 1
njev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev, nhev)
approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None,
(-np.inf, np.inf), None)
assert_(isinstance(approx.H, LinearOperator))
for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_equal(analit.f, approx.f)
assert_array_almost_equal(analit.J, approx.J)
assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p))
nfev += 1
njev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.0, 1.0]
H_analit = analit.hess(x, v0)
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
H_approx = approx.hess(x, v0)
assert_(isinstance(H_approx, LinearOperator))
for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p),
decimal=5)
njev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.1, 1.2]
v = [1.0, 1.0]
H_analit = analit.hess(x, v)
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
H_approx = approx.hess(x, v)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
njev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.5, 0.3]
_ = analit.jac(x)
H_analit = analit.hess(x, v0)
njev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
_ = approx.jac(x)
H_approx = approx.hess(x, v0)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
njev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [5.2, 2.3]
v = [2.3, 5.2]
_ = analit.jac(x)
H_analit = analit.hess(x, v)
njev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
_ = approx.jac(x)
H_approx = approx.hess(x, v)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
njev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.njev, njev)
assert_array_equal(analit.njev+approx.njev, njev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
def test_LinearVectorFunction():
A_dense = np.array([
[-1, 2, 0],
[0, 4, 2]
])
x0 = np.zeros(3)
A_sparse = csr_matrix(A_dense)
x = np.array([1, -1, 0])
v = np.array([-1, 1])
Ax = np.array([-3, -4])
f1 = LinearVectorFunction(A_dense, x0, None)
assert_(not f1.sparse_jacobian)
f2 = LinearVectorFunction(A_dense, x0, True)
assert_(f2.sparse_jacobian)
f3 = LinearVectorFunction(A_dense, x0, False)
assert_(not f3.sparse_jacobian)
f4 = LinearVectorFunction(A_sparse, x0, None)
assert_(f4.sparse_jacobian)
f5 = LinearVectorFunction(A_sparse, x0, True)
assert_(f5.sparse_jacobian)
f6 = LinearVectorFunction(A_sparse, x0, False)
assert_(not f6.sparse_jacobian)
assert_array_equal(f1.fun(x), Ax)
assert_array_equal(f2.fun(x), Ax)
assert_array_equal(f1.jac(x), A_dense)
assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray())
assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
def test_LinearVectorFunction_memoization():
A = np.array([[-1, 2, 0], [0, 4, 2]])
x0 = np.array([1, 2, -1])
fun = LinearVectorFunction(A, x0, False)
assert_array_equal(x0, fun.x)
assert_array_equal(A.dot(x0), fun.f)
x1 = np.array([-1, 3, 10])
assert_array_equal(A, fun.jac(x1))
assert_array_equal(x1, fun.x)
assert_array_equal(A.dot(x0), fun.f)
assert_array_equal(A.dot(x1), fun.fun(x1))
assert_array_equal(A.dot(x1), fun.f)
def test_IdentityVectorFunction():
x0 = np.zeros(3)
f1 = IdentityVectorFunction(x0, None)
f2 = IdentityVectorFunction(x0, False)
f3 = IdentityVectorFunction(x0, True)
assert_(f1.sparse_jacobian)
assert_(not f2.sparse_jacobian)
assert_(f3.sparse_jacobian)
x = np.array([-1, 2, 1])
v = np.array([-2, 3, 0])
assert_array_equal(f1.fun(x), x)
assert_array_equal(f2.fun(x), x)
assert_array_equal(f1.jac(x).toarray(), np.eye(3))
assert_array_equal(f2.jac(x), np.eye(3))
assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
| 21,514 | 36.030981 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_hungarian.py
|
# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck
# License: BSD
from numpy.testing import assert_array_equal
from pytest import raises as assert_raises
import numpy as np
from scipy.optimize import linear_sum_assignment
def test_linear_sum_assignment():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])
cost_matrix = cost_matrix.T
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(cost_matrix[row_ind, col_ind]))
def test_linear_sum_assignment_input_validation():
assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])
C = [[1, 2, 3], [4, 5, 6]]
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.asarray(C)))
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.matrix(C)))
I = np.identity(3)
assert_array_equal(linear_sum_assignment(I.astype(np.bool)),
linear_sum_assignment(I))
assert_raises(ValueError, linear_sum_assignment, I.astype(str))
I[0][0] = np.nan
assert_raises(ValueError, linear_sum_assignment, I)
I = np.identity(3)
I[1][1] = np.inf
assert_raises(ValueError, linear_sum_assignment, I)
| 2,155 | 27.746667 | 72 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test_trustregion_exact.py
|
"""
Unit tests for trust-region iterative subproblem.
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize._trustregion_exact import (
estimate_smallest_singular_value,
singular_leading_submatrix,
IterativeSubproblem)
from scipy.linalg import (svd, get_lapack_funcs, det,
cho_factor, cho_solve, qr,
eigvalsh, eig, norm)
from numpy.testing import (assert_, assert_array_equal,
assert_equal, assert_array_almost_equal,
assert_array_less)
def random_entry(n, min_eig, max_eig, case):
# Generate random matrix
rand = np.random.uniform(-1, 1, (n, n))
# QR decomposition
Q, _, _ = qr(rand, pivoting='True')
# Generate random eigenvalues
eigvalues = np.random.uniform(min_eig, max_eig, n)
eigvalues = np.sort(eigvalues)[::-1]
# Generate matrix
Qaux = np.multiply(eigvalues, Q)
A = np.dot(Qaux, Q.T)
# Generate gradient vector accordingly
# to the case is being tested.
if case == 'hard':
g = np.zeros(n)
g[:-1] = np.random.uniform(-1, 1, n-1)
g = np.dot(Q, g)
elif case == 'jac_equal_zero':
g = np.zeros(n)
else:
g = np.random.uniform(-1, 1, n)
return A, g
class TestEstimateSmallestSingularValue(object):
def test_for_ill_condiotioned_matrix(self):
# Ill-conditioned triangular matrix
C = np.array([[1, 2, 3, 4],
[0, 0.05, 60, 7],
[0, 0, 0.8, 9],
[0, 0, 0, 10]])
# Get svd decomposition
U, s, Vt = svd(C)
# Get smallest singular value and correspondent right singular vector.
smin_svd = s[-1]
zmin_svd = Vt[-1, :]
# Estimate smallest singular value
smin, zmin = estimate_smallest_singular_value(C)
# Check the estimation
assert_array_almost_equal(smin, smin_svd, decimal=8)
assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8)
class TestSingularLeadingSubmatrix(object):
def test_for_already_singular_leading_submatrix(self):
# Define test matrix A.
# Note that the leading 2x2 submatrix is singular.
A = np.array([[1, 2, 3],
[2, 4, 5],
[3, 5, 6]])
# Get Cholesky from lapack functions
cholesky, = get_lapack_funcs(('potrf',), (A,))
# Compute Cholesky Decomposition
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
delta, v = singular_leading_submatrix(A, c, k)
A[k-1, k-1] += delta
# Check if the leading submatrix is singular.
assert_array_almost_equal(det(A[:k, :k]), 0)
# Check if `v` fullfil the specified properties
quadratic_term = np.dot(v, np.dot(A, v))
assert_array_almost_equal(quadratic_term, 0)
def test_for_simetric_indefinite_matrix(self):
# Define test matrix A.
# Note that the leading 5x5 submatrix is indefinite.
A = np.asarray([[1, 2, 3, 7, 8],
[2, 5, 5, 9, 0],
[3, 5, 11, 1, 2],
[7, 9, 1, 7, 5],
[8, 0, 2, 5, 8]])
# Get Cholesky from lapack functions
cholesky, = get_lapack_funcs(('potrf',), (A,))
# Compute Cholesky Decomposition
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
delta, v = singular_leading_submatrix(A, c, k)
A[k-1, k-1] += delta
# Check if the leading submatrix is singular.
assert_array_almost_equal(det(A[:k, :k]), 0)
# Check if `v` fullfil the specified properties
quadratic_term = np.dot(v, np.dot(A, v))
assert_array_almost_equal(quadratic_term, 0)
def test_for_first_element_equal_to_zero(self):
# Define test matrix A.
# Note that the leading 2x2 submatrix is singular.
A = np.array([[0, 3, 11],
[3, 12, 5],
[11, 5, 6]])
# Get Cholesky from lapack functions
cholesky, = get_lapack_funcs(('potrf',), (A,))
# Compute Cholesky Decomposition
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
delta, v = singular_leading_submatrix(A, c, k)
A[k-1, k-1] += delta
# Check if the leading submatrix is singular
assert_array_almost_equal(det(A[:k, :k]), 0)
# Check if `v` fullfil the specified properties
quadratic_term = np.dot(v, np.dot(A, v))
assert_array_almost_equal(quadratic_term, 0)
class TestIterativeSubproblem(object):
def test_for_the_easy_case(self):
# `H` is chosen such that `g` is not orthogonal to the
# eigenvector associated with the smallest eigenvalue `s`.
H = [[10, 2, 3, 4],
[2, 1, 7, 1],
[3, 7, 1, 7],
[4, 1, 7, 2]]
g = [1, 1, 1, 1]
# Trust Radius
trust_radius = 1
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H),
k_easy=1e-10,
k_hard=1e-10)
p, hits_boundary = subprob.solve(trust_radius)
assert_array_almost_equal(p, [0.00393332, -0.55260862,
0.67065477, -0.49480341])
assert_array_almost_equal(hits_boundary, True)
def test_for_the_hard_case(self):
# `H` is chosen such that `g` is orthogonal to the
# eigenvector associated with the smallest eigenvalue `s`.
H = [[10, 2, 3, 4],
[2, 1, 7, 1],
[3, 7, 1, 7],
[4, 1, 7, 2]]
g = [6.4852641521327437, 1, 1, 1]
s = -8.2151519874416614
# Trust Radius
trust_radius = 1
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H),
k_easy=1e-10,
k_hard=1e-10)
p, hits_boundary = subprob.solve(trust_radius)
assert_array_almost_equal(-s, subprob.lambda_current)
def test_for_interior_convergence(self):
H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988],
[0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588],
[0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867],
[-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166],
[0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]
g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H))
p, hits_boundary = subprob.solve(1.1)
assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999,
-0.67005053, 0.31586769])
assert_array_almost_equal(hits_boundary, False)
assert_array_almost_equal(subprob.lambda_current, 0)
assert_array_almost_equal(subprob.niter, 1)
def test_for_jac_equal_zero(self):
H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
g = [0, 0, 0, 0, 0]
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H),
k_easy=1e-10,
k_hard=1e-10)
p, hits_boundary = subprob.solve(1.1)
assert_array_almost_equal(p, [0.06910534, -0.01432721,
-0.65311947, -0.23815972,
-0.84954934])
assert_array_almost_equal(hits_boundary, True)
def test_for_jac_very_close_to_zero(self):
H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
g = [0, 0, 0, 0, 1e-15]
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H),
k_easy=1e-10,
k_hard=1e-10)
p, hits_boundary = subprob.solve(1.1)
assert_array_almost_equal(p, [0.06910534, -0.01432721,
-0.65311947, -0.23815972,
-0.84954934])
assert_array_almost_equal(hits_boundary, True)
def test_for_random_entries(self):
# Seed
np.random.seed(1)
# Dimension
n = 5
for case in ('easy', 'hard', 'jac_equal_zero'):
eig_limits = [(-20, -15),
(-10, -5),
(-10, 0),
(-5, 5),
(-10, 10),
(0, 10),
(5, 10),
(15, 20)]
for min_eig, max_eig in eig_limits:
# Generate random symmetric matrix H with
# eigenvalues between min_eig and max_eig.
H, g = random_entry(n, min_eig, max_eig, case)
# Trust radius
trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10]
for trust_radius in trust_radius_list:
# Solve subproblem with very high accuracy
subprob_ac = IterativeSubproblem(0,
lambda x: 0,
lambda x: g,
lambda x: H,
k_easy=1e-10,
k_hard=1e-10)
p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius)
# Compute objective function value
J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac)
stop_criteria = [(0.1, 2),
(0.5, 1.1),
(0.9, 1.01)]
for k_opt, k_trf in stop_criteria:
# k_easy and k_hard computed in function
# of k_opt and k_trf accordingly to
# Conn, A. R., Gould, N. I., & Toint, P. L. (2000).
# "Trust region methods". Siam. p. 197.
k_easy = min(k_trf-1,
1-np.sqrt(k_opt))
k_hard = 1-k_opt
# Solve subproblem
subprob = IterativeSubproblem(0,
lambda x: 0,
lambda x: g,
lambda x: H,
k_easy=k_easy,
k_hard=k_hard)
p, hits_boundary = subprob.solve(trust_radius)
# Compute objective function value
J = 1/2*np.dot(p, np.dot(H, p))+np.dot(g, p)
# Check if it respect k_trf
if hits_boundary:
assert_array_equal(np.abs(norm(p)-trust_radius) <=
(k_trf-1)*trust_radius, True)
else:
assert_equal(norm(p) <= trust_radius, True)
# Check if it respect k_opt
assert_equal(J <= k_opt*J_ac, True)
| 13,189 | 35.843575 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/tests/test__differential_evolution.py
|
"""
Unit tests for the differential global minimization algorithm.
"""
from scipy.optimize import _differentialevolution
from scipy.optimize._differentialevolution import DifferentialEvolutionSolver
from scipy.optimize import differential_evolution
import numpy as np
from scipy.optimize import rosen
from numpy.testing import (assert_equal, assert_allclose,
assert_almost_equal,
assert_string_equal, assert_)
from pytest import raises as assert_raises
class TestDifferentialEvolutionSolver(object):
def setup_method(self):
self.old_seterr = np.seterr(invalid='raise')
self.limits = np.array([[0., 0.],
[2., 2.]])
self.bounds = [(0., 2.), (0., 2.)]
self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
[(0, 100)])
# dummy_solver2 will be used to test mutation strategies
self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
[(0, 1)],
popsize=7,
mutation=0.5)
# create a population that's only 7 members long
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
self.dummy_solver2.population = population
def teardown_method(self):
np.seterr(**self.old_seterr)
def quadratic(self, x):
return x[0]**2
def test__strategy_resolves(self):
# test that the correct mutation function is resolved by
# different requested strategy arguments
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='best1exp')
assert_equal(solver.strategy, 'best1exp')
assert_equal(solver.mutation_func.__name__, '_best1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='best1bin')
assert_equal(solver.strategy, 'best1bin')
assert_equal(solver.mutation_func.__name__, '_best1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand1bin')
assert_equal(solver.strategy, 'rand1bin')
assert_equal(solver.mutation_func.__name__, '_rand1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand1exp')
assert_equal(solver.strategy, 'rand1exp')
assert_equal(solver.mutation_func.__name__, '_rand1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand2exp')
assert_equal(solver.strategy, 'rand2exp')
assert_equal(solver.mutation_func.__name__, '_rand2')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='best2bin')
assert_equal(solver.strategy, 'best2bin')
assert_equal(solver.mutation_func.__name__, '_best2')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand2bin')
assert_equal(solver.strategy, 'rand2bin')
assert_equal(solver.mutation_func.__name__, '_rand2')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='rand2exp')
assert_equal(solver.strategy, 'rand2exp')
assert_equal(solver.mutation_func.__name__, '_rand2')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='randtobest1bin')
assert_equal(solver.strategy, 'randtobest1bin')
assert_equal(solver.mutation_func.__name__, '_randtobest1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='randtobest1exp')
assert_equal(solver.strategy, 'randtobest1exp')
assert_equal(solver.mutation_func.__name__, '_randtobest1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='currenttobest1bin')
assert_equal(solver.strategy, 'currenttobest1bin')
assert_equal(solver.mutation_func.__name__, '_currenttobest1')
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='currenttobest1exp')
assert_equal(solver.strategy, 'currenttobest1exp')
assert_equal(solver.mutation_func.__name__, '_currenttobest1')
def test__mutate1(self):
# strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
result = np.array([0.05])
trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))
assert_allclose(trial, result)
result = np.array([0.25])
trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))
assert_allclose(trial, result)
def test__mutate2(self):
# strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
result = np.array([-0.1])
trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))
assert_allclose(trial, result)
result = np.array([0.1])
trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))
assert_allclose(trial, result)
def test__randtobest1(self):
# strategies randtobest/1/*
result = np.array([0.15])
trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6))
assert_allclose(trial, result)
def test__currenttobest1(self):
# strategies currenttobest/1/*
result = np.array([0.1])
trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6))
assert_allclose(trial, result)
def test_can_init_with_dithering(self):
mutation = (0.5, 1)
solver = DifferentialEvolutionSolver(self.quadratic,
self.bounds,
mutation=mutation)
assert_equal(solver.dither, list(mutation))
def test_invalid_mutation_values_arent_accepted(self):
func = rosen
mutation = (0.5, 3)
assert_raises(ValueError,
DifferentialEvolutionSolver,
func,
self.bounds,
mutation=mutation)
mutation = (-1, 1)
assert_raises(ValueError,
DifferentialEvolutionSolver,
func,
self.bounds,
mutation=mutation)
mutation = (0.1, np.nan)
assert_raises(ValueError,
DifferentialEvolutionSolver,
func,
self.bounds,
mutation=mutation)
mutation = 0.5
solver = DifferentialEvolutionSolver(func,
self.bounds,
mutation=mutation)
assert_equal(0.5, solver.scale)
assert_equal(None, solver.dither)
def test__scale_parameters(self):
trial = np.array([0.3])
assert_equal(30, self.dummy_solver._scale_parameters(trial))
# it should also work with the limits reversed
self.dummy_solver.limits = np.array([[100], [0.]])
assert_equal(30, self.dummy_solver._scale_parameters(trial))
def test__unscale_parameters(self):
trial = np.array([30])
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
# it should also work with the limits reversed
self.dummy_solver.limits = np.array([[100], [0.]])
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
def test__ensure_constraint(self):
trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])
self.dummy_solver._ensure_constraint(trial)
assert_equal(trial[2], 0.9)
assert_(np.logical_and(trial >= 0, trial <= 1).all())
def test_differential_evolution(self):
# test that the Jmin of DifferentialEvolutionSolver
# is the same as the function evaluation
solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
result = solver.solve()
assert_almost_equal(result.fun, self.quadratic(result.x))
def test_best_solution_retrieval(self):
# test that the getter property method for the best solution works.
solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
result = solver.solve()
assert_almost_equal(result.x, solver.x)
def test_callback_terminates(self):
# test that if the callback returns true, then the minimization halts
bounds = [(0, 2), (0, 2)]
def callback(param, convergence=0.):
return True
result = differential_evolution(rosen, bounds, callback=callback)
assert_string_equal(result.message,
'callback function requested stop early '
'by returning True')
def test_args_tuple_is_passed(self):
# test that the args tuple is passed to the cost function properly.
bounds = [(-10, 10)]
args = (1., 2., 3.)
def quadratic(x, *args):
if type(args) != tuple:
raise ValueError('args should be a tuple')
return args[0] + args[1] * x + args[2] * x**2.
result = differential_evolution(quadratic,
bounds,
args=args,
polish=True)
assert_almost_equal(result.fun, 2 / 3.)
def test_init_with_invalid_strategy(self):
# test that passing an invalid strategy raises ValueError
func = rosen
bounds = [(-3, 3)]
assert_raises(ValueError,
differential_evolution,
func,
bounds,
strategy='abc')
def test_bounds_checking(self):
# test that the bounds checking works
func = rosen
bounds = [(-3, None)]
assert_raises(ValueError,
differential_evolution,
func,
bounds)
bounds = [(-3)]
assert_raises(ValueError,
differential_evolution,
func,
bounds)
bounds = [(-3, 3), (3, 4, 5)]
assert_raises(ValueError,
differential_evolution,
func,
bounds)
def test_select_samples(self):
# select_samples should return 5 separate random numbers.
limits = np.arange(12., dtype='float64').reshape(2, 6)
bounds = list(zip(limits[0, :], limits[1, :]))
solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
candidate = 0
r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
assert_equal(
len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
def test_maxiter_stops_solve(self):
# test that if the maximum number of iterations is exceeded
# the solver stops.
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
result = solver.solve()
assert_equal(result.success, False)
assert_equal(result.message,
'Maximum number of iterations has been exceeded.')
def test_maxfun_stops_solve(self):
# test that if the maximum number of function evaluations is exceeded
# during initialisation the solver stops
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
polish=False)
result = solver.solve()
assert_equal(result.nfev, 2)
assert_equal(result.success, False)
assert_equal(result.message,
'Maximum number of function evaluations has '
'been exceeded.')
# test that if the maximum number of function evaluations is exceeded
# during the actual minimisation, then the solver stops.
# Have to turn polishing off, as this will still occur even if maxfun
# is reached. For popsize=5 and len(bounds)=2, then there are only 10
# function evaluations during initialisation.
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
popsize=5,
polish=False,
maxfun=40)
result = solver.solve()
assert_equal(result.nfev, 41)
assert_equal(result.success, False)
assert_equal(result.message,
'Maximum number of function evaluations has '
'been exceeded.')
def test_quadratic(self):
# test the quadratic function from object
solver = DifferentialEvolutionSolver(self.quadratic,
[(-100, 100)],
tol=0.02)
solver.solve()
assert_equal(np.argmin(solver.population_energies), 0)
def test_quadratic_from_diff_ev(self):
# test the quadratic function from differential_evolution function
differential_evolution(self.quadratic,
[(-100, 100)],
tol=0.02)
def test_seed_gives_repeatability(self):
result = differential_evolution(self.quadratic,
[(-100, 100)],
polish=False,
seed=1,
tol=0.5)
result2 = differential_evolution(self.quadratic,
[(-100, 100)],
polish=False,
seed=1,
tol=0.5)
assert_equal(result.x, result2.x)
def test_exp_runs(self):
# test whether exponential mutation loop runs
solver = DifferentialEvolutionSolver(rosen,
self.bounds,
strategy='best1exp',
maxiter=1)
solver.solve()
def test_gh_4511_regression(self):
# This modification of the differential evolution docstring example
# uses a custom popsize that had triggered an off-by-one error.
# Because we do not care about solving the optimization problem in
# this test, we use maxiter=1 to reduce the testing time.
bounds = [(-5, 5), (-5, 5)]
result = differential_evolution(rosen, bounds, popsize=1815, maxiter=1)
def test_calculate_population_energies(self):
# if popsize is 3 then the overall generation has size (6,)
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)
solver._calculate_population_energies()
assert_equal(np.argmin(solver.population_energies), 0)
# initial calculation of the energies should require 6 nfev.
assert_equal(solver._nfev, 6)
def test_iteration(self):
# test that DifferentialEvolutionSolver is iterable
# if popsize is 3 then the overall generation has size (6,)
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,
maxfun=12)
x, fun = next(solver)
assert_equal(np.size(x, 0), 2)
# 6 nfev are required for initial calculation of energies, 6 nfev are
# required for the evolution of the 6 population members.
assert_equal(solver._nfev, 12)
# the next generation should halt because it exceeds maxfun
assert_raises(StopIteration, next, solver)
# check a proper minimisation can be done by an iterable solver
solver = DifferentialEvolutionSolver(rosen, self.bounds)
for i, soln in enumerate(solver):
x_current, fun_current = soln
# need to have this otherwise the solver would never stop.
if i == 1000:
break
assert_almost_equal(fun_current, 0)
def test_convergence(self):
solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
polish=False)
solver.solve()
assert_(solver.convergence < 0.2)
def test_maxiter_none_GH5731(self):
# Pre 0.17 the previous default for maxiter and maxfun was None.
# the numerical defaults are now 1000 and np.inf. However, some scripts
# will still supply None for both of those, this will raise a TypeError
# in the solve method.
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,
maxfun=None)
solver.solve()
def test_population_initiation(self):
# test the different modes of population initiation
# init must be either 'latinhypercube' or 'random'
# raising ValueError is something else is passed in
assert_raises(ValueError,
DifferentialEvolutionSolver,
*(rosen, self.bounds),
**{'init': 'rubbish'})
solver = DifferentialEvolutionSolver(rosen, self.bounds)
# check that population initiation:
# 1) resets _nfev to 0
# 2) all population energies are np.inf
solver.init_population_random()
assert_equal(solver._nfev, 0)
assert_(np.all(np.isinf(solver.population_energies)))
solver.init_population_lhs()
assert_equal(solver._nfev, 0)
assert_(np.all(np.isinf(solver.population_energies)))
# we should be able to initialise with our own array
population = np.linspace(-1, 3, 10).reshape(5, 2)
solver = DifferentialEvolutionSolver(rosen, self.bounds,
init=population,
strategy='best2bin',
atol=0.01, seed=1, popsize=5)
assert_equal(solver._nfev, 0)
assert_(np.all(np.isinf(solver.population_energies)))
assert_(solver.num_population_members == 5)
assert_(solver.population_shape == (5, 2))
# check that the population was initialised correctly
unscaled_population = np.clip(solver._unscale_parameters(population),
0, 1)
assert_almost_equal(solver.population[:5], unscaled_population)
# population values need to be clipped to bounds
assert_almost_equal(np.min(solver.population[:5]), 0)
assert_almost_equal(np.max(solver.population[:5]), 1)
# shouldn't be able to initialise with an array if it's the wrong shape
# this would have too many parameters
population = np.linspace(-1, 3, 15).reshape(5, 3)
assert_raises(ValueError,
DifferentialEvolutionSolver,
*(rosen, self.bounds),
**{'init': population})
| 20,204 | 41.269874 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trlib/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
from numpy import get_include
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from os.path import join, dirname
lapack_opt = get_info('lapack_opt')
lib_inc = join(dirname(dirname(dirname(__file__))), '_lib')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
config = Configuration('_trlib', parent_package, top_path)
config.add_extension('_trlib',
sources=['_trlib.c', 'trlib_krylov.c',
'trlib_eigen_inverse.c', 'trlib_leftmost.c',
'trlib_quadratic_zero.c', 'trlib_tri_factor.c'],
include_dirs=[get_include(), lib_inc, 'trlib'],
extra_info=lapack_opt,
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,123 | 36.466667 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_trlib/__init__.py
|
from ._trlib import TRLIBQuadraticSubproblem
__all__ = ['TRLIBQuadraticSubproblem', 'get_trlib_quadratic_subproblem']
def get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=False):
def subproblem_factory(x, fun, jac, hess, hessp):
return TRLIBQuadraticSubproblem(x, fun, jac, hess, hessp,
tol_rel_i=tol_rel_i,
tol_rel_b=tol_rel_b,
disp=disp)
return subproblem_factory
| 524 | 39.384615 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/dogbox.py
|
"""
dogleg algorithm with rectangular trust regions for least-squares minimization.
The description of the algorithm can be found in [Voglis]_. The algorithm does
trust-region iterations, but the shape of trust regions is rectangular as
opposed to conventional elliptical. The intersection of a trust region and
an initial feasible region is again some rectangle. Thus on each iteration a
bound-constrained quadratic optimization problem is solved.
A quadratic problem is solved by well-known dogleg approach, where the
function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
along this path, and optimization amounts to simply following along this
path as long as a point stays within the bounds. A constrained Cauchy step
(along the anti-gradient) is considered for safety in rank deficient cases,
in this situations the convergence might be slow.
If during iterations some variable hit the initial bound and the component
of anti-gradient points outside the feasible region, then a next dogleg step
won't make any progress. At this state such variables satisfy first-order
optimality conditions and they are excluded before computing a next dogleg
step.
Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
dense and sparse matrices, or Jacobian being LinearOperator). The second
option allows to solve very large problems (up to couple of millions of
residuals on a regular PC), provided the Jacobian matrix is sufficiently
sparse. But note that dogbox is not very good for solving problems with
large number of constraints, because of variables exclusion-inclusion on each
iteration (a required number of function evaluations might be high or accuracy
of a solution will be poor), thus its large-scale usage is probably limited
to unconstrained problems.
References
----------
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
Approach for Unconstrained and Bound Constrained Nonlinear
Optimization", WSEAS International Conference on Applied
Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import lstsq, norm
from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
from scipy.optimize import OptimizeResult
from scipy._lib.six import string_types
from .common import (
step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
build_quadratic_1d, minimize_quadratic_1d, compute_grad,
compute_jac_scale, check_termination, scale_for_robust_loss_function,
print_header_nonlinear, print_iteration_nonlinear)
def lsmr_operator(Jop, d, active_set):
"""Compute LinearOperator to use in LSMR by dogbox algorithm.
`active_set` mask is used to excluded active variables from computations
of matrix-vector products.
"""
m, n = Jop.shape
def matvec(x):
x_free = x.ravel().copy()
x_free[active_set] = 0
return Jop.matvec(x * d)
def rmatvec(x):
r = d * Jop.rmatvec(x)
r[active_set] = 0
return r
return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
def find_intersection(x, tr_bounds, lb, ub):
"""Find intersection of trust-region bounds and initial bounds.
Returns
-------
lb_total, ub_total : ndarray with shape of x
Lower and upper bounds of the intersection region.
orig_l, orig_u : ndarray of bool with shape of x
True means that an original bound is taken as a corresponding bound
in the intersection region.
tr_l, tr_u : ndarray of bool with shape of x
True means that a trust-region bound is taken as a corresponding bound
in the intersection region.
"""
lb_centered = lb - x
ub_centered = ub - x
lb_total = np.maximum(lb_centered, -tr_bounds)
ub_total = np.minimum(ub_centered, tr_bounds)
orig_l = np.equal(lb_total, lb_centered)
orig_u = np.equal(ub_total, ub_centered)
tr_l = np.equal(lb_total, -tr_bounds)
tr_u = np.equal(ub_total, tr_bounds)
return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
"""Find dogleg step in a rectangular region.
Returns
-------
step : ndarray, shape (n,)
Computed dogleg step.
bound_hits : ndarray of int, shape (n,)
Each component shows whether a corresponding variable hits the
initial bound after the step is taken:
* 0 - a variable doesn't hit the bound.
* -1 - lower bound is hit.
* 1 - upper bound is hit.
tr_hit : bool
Whether the step hit the boundary of the trust-region.
"""
lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
x, tr_bounds, lb, ub
)
bound_hits = np.zeros_like(x, dtype=int)
if in_bounds(newton_step, lb_total, ub_total):
return newton_step, bound_hits, False
to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
# The classical dogleg algorithm would check if Cauchy step fits into
# the bounds, and just return it constrained version if not. But in a
# rectangular trust region it makes sense to try to improve constrained
# Cauchy step too. Thus we don't distinguish these two cases.
cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
step_diff = newton_step - cauchy_step
step_size, hits = step_size_to_bound(cauchy_step, step_diff,
lb_total, ub_total)
bound_hits[(hits < 0) & orig_l] = -1
bound_hits[(hits > 0) & orig_u] = 1
tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
return cauchy_step + step_size * step_diff, bound_hits, tr_hit
def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose):
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
Delta = norm(x0 * scale_inv, ord=np.inf)
if Delta == 0:
Delta = 1.0
on_bound = np.zeros_like(x0, dtype=int)
on_bound[np.equal(x0, lb)] = -1
on_bound[np.equal(x0, ub)] = 1
x = x0
step = np.empty_like(x0)
if max_nfev is None:
max_nfev = x0.size * 100
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
active_set = on_bound * g < 0
free_set = ~active_set
g_free = g[free_set]
g_full = g.copy()
g[active_set] = 0
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
x_free = x[free_set]
lb_free = lb[free_set]
ub_free = ub[free_set]
scale_free = scale[free_set]
# Compute (Gauss-)Newton and build quadratic model for Cauchy step.
if tr_solver == 'exact':
J_free = J[:, free_set]
newton_step = lstsq(J_free, -f, rcond=-1)[0]
# Coefficients for the quadratic model along the anti-gradient.
a, b = build_quadratic_1d(J_free, g_free, -g_free)
elif tr_solver == 'lsmr':
Jop = aslinearoperator(J)
# We compute lsmr step in scaled variables and then
# transform back to normal variables, if lsmr would give exact lsq
# solution this would be equivalent to not doing any
# transformations, but from experience it's better this way.
# We pass active_set to make computations as if we selected
# the free subset of J columns, but without actually doing any
# slicing, which is expensive for sparse matrices and impossible
# for LinearOperator.
lsmr_op = lsmr_operator(Jop, scale, active_set)
newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
newton_step *= scale_free
# Components of g for active variables were zeroed, so this call
# is correct and equivalent to using J_free and g_free.
a, b = build_quadratic_1d(Jop, g, -g)
actual_reduction = -1.0
while actual_reduction <= 0 and nfev < max_nfev:
tr_bounds = Delta * scale_free
step_free, on_bound_free, tr_hit = dogleg_step(
x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
step.fill(0.0)
step[free_set] = step_free
if tr_solver == 'exact':
predicted_reduction = -evaluate_quadratic(J_free, g_free,
step_free)
elif tr_solver == 'lsmr':
predicted_reduction = -evaluate_quadratic(Jop, g, step)
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step * scale_inv, ord=np.inf)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, tr_hit
)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
on_bound[free_set] = on_bound_free
x = x_new
# Set variables exactly at the boundary.
mask = on_bound == -1
x[mask] = lb[mask]
mask = on_bound == 1
x[mask] = ub[mask]
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
| 11,709 | 34.165165 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/least_squares.py
|
"""Generic interface for least-square minimization."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional real function of n real
variables) and the loss function rho(s) (a scalar function), `least_squares`
finds a local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar. If the
argument ``x`` is complex or the function ``fun`` returns complex
residuals, it must be wrapped in a real function of real arguments,
as shown at the end of the Examples section.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as much operations compared to '2-point' (default). The
scheme 'cs' uses complex steps, and while potentially the most
accurate, it is applicable only when `fun` correctly handles
complex inputs and can be analytically continued to the complex
plane. Method 'lm' always uses the '2-point' scheme. If callable,
it is used as ``jac(x, *args, **kwargs)`` and should return a
good approximation (or the exact value) for the Jacobian as an
array_like (np.atleast_2d is applied), a sparse matrix or a
`scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independent variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
In the next example, we show how complex-valued residual functions of
complex variables can be optimized with ``least_squares()``. Consider the
following function:
>>> def f(z):
... return z - (0.5 + 0.5j)
We wrap it into a function of real variables that returns real residuals
by simply handling the real and imaginary parts as independent variables:
>>> def f_wrap(x):
... fx = f(x[0] + 1j*x[1])
... return np.array([fx.real, fx.imag])
Thus, instead of the original m-dimensional complex function of n complex
variables we optimize a 2m-dimensional real function of 2n real variables:
>>> from scipy.optimize import least_squares
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
>>> z
(0.49999999999925893+0.49999999999925893j)
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
if np.iscomplexobj(x0):
raise ValueError("`x0` must be real.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| 37,726 | 39.479614 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_lsq', parent_package, top_path)
config.add_extension('givens_elimination',
sources=['givens_elimination.c'])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 482 | 31.2 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/lsq_linear.py
|
"""Linear least squares with bound constraints on independent variables."""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator, lsmr
from scipy.optimize import OptimizeResult
from .common import in_bounds, compute_grad
from .trf_linear import trf_linear
from .bvls import bvls
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
TERMINATION_MESSAGES = {
-1: "The algorithm was not able to make progress on the last iteration.",
0: "The maximum number of iterations is exceeded.",
1: "The first-order optimality measure is less than `tol`.",
2: "The relative change of the cost function is less than `tol`.",
3: "The unconstrained solution is optimal."
}
def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10,
lsq_solver=None, lsmr_tol=None, max_iter=None, verbose=0):
r"""Solve a linear least-squares problem with bounds on the variables.
Given a m-by-n design matrix A and a target vector b with m elements,
`lsq_linear` solves the following optimization problem::
minimize 0.5 * ||A x - b||**2
subject to lb <= x <= ub
This optimization problem is convex, hence a found minimum (if iterations
have converged) is guaranteed to be global.
Parameters
----------
A : array_like, sparse matrix of LinearOperator, shape (m, n)
Design matrix. Can be `scipy.sparse.linalg.LinearOperator`.
b : array_like, shape (m,)
Target vector.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must have shape (n,) or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : 'trf' or 'bvls', optional
Method to perform minimization.
* 'trf' : Trust Region Reflective algorithm adapted for a linear
least-squares problem. This is an interior-point-like method
and the required number of iterations is weakly correlated with
the number of variables.
* 'bvls' : Bounded-Variable Least-Squares algorithm. This is
an active set method, which requires the number of iterations
comparable to the number of variables. Can't be used when `A` is
sparse or LinearOperator.
Default is 'trf'.
tol : float, optional
Tolerance parameter. The algorithm terminates if a relative change
of the cost function is less than `tol` on the last iteration.
Additionally the first-order optimality measure is considered:
* ``method='trf'`` terminates if the uniform norm of the gradient,
scaled to account for the presence of the bounds, is less than
`tol`.
* ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions
are satisfied within `tol` tolerance.
lsq_solver : {None, 'exact', 'lsmr'}, optional
Method of solving unbounded least-squares problems throughout
iterations:
* 'exact' : Use dense QR or SVD decomposition approach. Can't be
used when `A` is sparse or LinearOperator.
* 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure
which requires only matrix-vector product evaluations. Can't
be used with ``method='bvls'``.
If None (default) the solver is chosen based on type of `A`.
lsmr_tol : None, float or 'auto', optional
Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr`
If None (default), it is set to ``1e-2 * tol``. If 'auto', the
tolerance will be adjusted based on the optimality of the current
iterate, which can speed up the optimization process, but is not always
reliable.
max_iter : None or int, optional
Maximum number of iterations before termination. If None (default), it
is set to 100 for ``method='trf'`` or to the number of variables for
``method='bvls'`` (not counting iterations for 'bvls' initialization).
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 : work silently (default).
* 1 : display a termination report.
* 2 : display progress during iterations.
Returns
-------
OptimizeResult with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
optimality : float
First-order optimality measure. The exact meaning depends on `method`,
refer to the description of `tol` parameter.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for the `trf` method as it generates a
sequence of strictly feasible iterates and active_mask is determined
within a tolerance threshold.
nit : int
Number of iterations. Zero if the unconstrained solution is optimal.
status : int
Reason for algorithm termination:
* -1 : the algorithm was not able to make progress on the last
iteration.
* 0 : the maximum number of iterations is exceeded.
* 1 : the first-order optimality measure is less than `tol`.
* 2 : the relative change of the cost function is less than `tol`.
* 3 : the unconstrained solution is optimal.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
nnls : Linear least squares with non-negativity constraint.
least_squares : Nonlinear least squares with bounds on the variables.
Notes
-----
The algorithm first computes the unconstrained least-squares solution by
`numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on
`lsq_solver`. This solution is returned as optimal if it lies within the
bounds.
Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for
a linear least-squares problem. The iterations are essentially the same as
in the nonlinear least-squares algorithm, but as the quadratic function
model is always accurate, we don't need to track or modify the radius of
a trust region. The line search (backtracking) is used as a safety net
when a selected step does not decrease the cost function. Read more
detailed description of the algorithm in `scipy.optimize.least_squares`.
Method 'bvls' runs a Python implementation of the algorithm described in
[BVLS]_. The algorithm maintains active and free sets of variables, on
each iteration chooses a new variable to move from the active set to the
free set and then solves the unconstrained least-squares problem on free
variables. This algorithm is guaranteed to give an accurate solution
eventually, but may require up to n iterations for a problem with n
variables. Additionally, an ad-hoc initialization procedure is
implemented, that determines which variables to set free or active
initially. It takes some number of iterations before actual BVLS starts,
but can significantly reduce the number of further iterations.
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares:
an Algorithm and Applications", Computational Statistics, 10,
129-141, 1995.
Examples
--------
In this example a problem with a large sparse matrix and bounds on the
variables is solved.
>>> from scipy.sparse import rand
>>> from scipy.optimize import lsq_linear
...
>>> np.random.seed(0)
...
>>> m = 20000
>>> n = 10000
...
>>> A = rand(m, n, density=1e-4)
>>> b = np.random.randn(m)
...
>>> lb = np.random.randn(n)
>>> ub = lb + 1
...
>>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1)
# may vary
The relative change of the cost function is less than `tol`.
Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04,
first-order optimality 4.66e-08.
"""
if method not in ['trf', 'bvls']:
raise ValueError("`method` must be 'trf' or 'bvls'")
if lsq_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`solver` must be None, 'exact' or 'lsmr'.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if issparse(A):
A = csr_matrix(A)
elif not isinstance(A, LinearOperator):
A = np.atleast_2d(A)
if method == 'bvls':
if lsq_solver == 'lsmr':
raise ValueError("method='bvls' can't be used with "
"lsq_solver='lsmr'")
if not isinstance(A, np.ndarray):
raise ValueError("method='bvls' can't be used with `A` being "
"sparse or LinearOperator.")
if lsq_solver is None:
if isinstance(A, np.ndarray):
lsq_solver = 'exact'
else:
lsq_solver = 'lsmr'
elif lsq_solver == 'exact' and not isinstance(A, np.ndarray):
raise ValueError("`exact` solver can't be used when `A` is "
"sparse or LinearOperator.")
if len(A.shape) != 2: # No ndim for LinearOperator.
raise ValueError("`A` must have at most 2 dimensions.")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_iter is not None and max_iter <= 0:
raise ValueError("`max_iter` must be None or positive integer.")
m, n = A.shape
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("`b` must have at most 1 dimension.")
if b.size != m:
raise ValueError("Inconsistent shapes between `A` and `b`.")
lb, ub = prepare_bounds(bounds, n)
if lb.shape != (n,) and ub.shape != (n,):
raise ValueError("Bounds have wrong shape.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if lsq_solver == 'exact':
x_lsq = np.linalg.lstsq(A, b, rcond=-1)[0]
elif lsq_solver == 'lsmr':
x_lsq = lsmr(A, b, atol=tol, btol=tol)[0]
if in_bounds(x_lsq, lb, ub):
r = A.dot(x_lsq) - b
cost = 0.5 * np.dot(r, r)
termination_status = 3
termination_message = TERMINATION_MESSAGES[termination_status]
g = compute_grad(A, r)
g_norm = norm(g, ord=np.inf)
if verbose > 0:
print(termination_message)
print("Final cost {0:.4e}, first-order optimality {1:.2e}"
.format(cost, g_norm))
return OptimizeResult(
x=x_lsq, fun=r, cost=cost, optimality=g_norm,
active_mask=np.zeros(n), nit=0, status=termination_status,
message=termination_message, success=True)
if method == 'trf':
res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
max_iter, verbose)
elif method == 'bvls':
res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose)
res.message = TERMINATION_MESSAGES[res.status]
res.success = res.status > 0
if verbose > 0:
print(res.message)
print("Number of iterations {0}, initial cost {1:.4e}, "
"final cost {2:.4e}, first-order optimality {3:.2e}."
.format(res.nit, res.initial_cost, res.cost, res.optimality))
del res.initial_cost
return res
| 12,646 | 38.77044 | 93 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/trf.py
|
"""Trust Region Reflective algorithm for least-squares optimization.
The algorithm is based on ideas from paper [STIR]_. The main idea is to
account for presence of the bounds by appropriate scaling of the variables (or
equivalently changing a trust-region shape). Let's introduce a vector v:
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
| 1, otherwise
where g is the gradient of a cost function and lb, ub are the bounds. Its
components are distances to the bounds at which the anti-gradient points (if
this distance is finite). Define a scaling matrix D = diag(v**0.5).
First-order optimality conditions can be stated as
D^2 g(x) = 0.
Meaning that components of the gradient should be zero for strictly interior
variables, and components must point inside the feasible region for variables
on the bound.
Now consider this system of equations as a new optimization problem. If the
point x is strictly interior (not on the bound) then the left-hand side is
differentiable and the Newton step for it satisfies
(D^2 H + diag(g) Jv) p = -D^2 g
where H is the Hessian matrix (or its J^T J approximation in least squares),
Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
elements of matrix C = diag(g) Jv are non-negative. Introduce the change
of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables
we have a Newton step satisfying
B_h p_h = -g_h,
where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
to "hat" variables. To guarantee global convergence we formulate a
trust-region problem based on the Newton step in the new variables:
0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
problem is
0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
Here the meaning of the matrix D becomes more clear: it alters the shape
of a trust-region, such that large steps towards the bounds are not allowed.
In the implementation the trust-region problem is solved in "hat" space,
but handling of the bounds is done in the original space (see below and read
the code).
The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
must keep iterates strictly feasible (to satisfy aforementioned
differentiability), the parameter theta controls step back from the boundary
(see the code for details).
The algorithm does another important trick. If the trust-region solution
doesn't fit into the bounds, then a reflected (from a firstly encountered
bound) search direction is considered. For motivation and analysis refer to
[STIR]_ paper (and other papers of the authors). In practice it doesn't need
a lot of justifications, the algorithm simply chooses the best step among
three: a constrained trust-region step, a reflected step and a constrained
Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
space).
Another feature is that a trust-region radius control strategy is modified to
account for appearance of the diagonal C matrix (called diag_h in the code).
Note, that all described peculiarities are completely gone as we consider
problems without bounds (the algorithm becomes a standard trust-region type
algorithm very similar to ones implemented in MINPACK).
The implementation supports two methods of solving the trust-region problem.
The first, called 'exact', applies SVD on Jacobian and then solves the problem
very accurately using the algorithm described in [JJMore]_. It is not
applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
approach (sometimes called "indefinite dogleg"), where the problem is solved
in a subspace spanned by the gradient and the approximate Gauss-Newton step
found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
reformulated as a 4-th order algebraic equation and solved very accurately by
``numpy.roots``. The subspace approach allows to solve very large problems
(up to couple of millions of residuals on a regular PC), provided the Jacobian
matrix is sufficiently sparse.
References
----------
.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm
from scipy.linalg import svd, qr
from scipy.sparse.linalg import LinearOperator, lsmr
from scipy.optimize import OptimizeResult
from scipy._lib.six import string_types
from .common import (
step_size_to_bound, find_active_constraints, in_bounds,
make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region,
solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d,
evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator,
CL_scaling_vector, compute_grad, compute_jac_scale, check_termination,
update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear,
print_iteration_nonlinear)
def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose):
# For efficiency it makes sense to run the simplified version of the
# algorithm when no bounds are imposed. We decided to write the two
# separate functions. It violates DRY principle, but the individual
# functions are kept the most readable.
if np.all(lb == -np.inf) and np.all(ub == np.inf):
return trf_no_bounds(
fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose)
else:
return trf_bounds(
fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose)
def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
"""Select the best step according to Trust Region Reflective algorithm."""
if in_bounds(x + p, lb, ub):
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
return p, p_h, -p_value
p_stride, hits = step_size_to_bound(x, p, lb, ub)
# Compute the reflected direction.
r_h = np.copy(p_h)
r_h[hits.astype(bool)] *= -1
r = d * r_h
# Restrict trust-region step, such that it hits the bound.
p *= p_stride
p_h *= p_stride
x_on_bound = x + p
# Reflected direction will cross first either feasible region or trust
# region boundary.
_, to_tr = intersect_trust_region(p_h, r_h, Delta)
to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub)
# Find lower and upper bounds on a step size along the reflected
# direction, considering the strict feasibility requirement. There is no
# single correct way to do that, the chosen approach seems to work best
# on test problems.
r_stride = min(to_bound, to_tr)
if r_stride > 0:
r_stride_l = (1 - theta) * p_stride / r_stride
if r_stride == to_bound:
r_stride_u = theta * to_bound
else:
r_stride_u = to_tr
else:
r_stride_l = 0
r_stride_u = -1
# Check if reflection step is available.
if r_stride_l <= r_stride_u:
a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
r_stride, r_value = minimize_quadratic_1d(
a, b, r_stride_l, r_stride_u, c=c)
r_h *= r_stride
r_h += p_h
r = r_h * d
else:
r_value = np.inf
# Now correct p_h to make it strictly interior.
p *= theta
p_h *= theta
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
ag_h = -g_h
ag = d * ag_h
to_tr = Delta / norm(ag_h)
to_bound, _ = step_size_to_bound(x, ag, lb, ub)
if to_bound < to_tr:
ag_stride = theta * to_bound
else:
ag_stride = to_tr
a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride)
ag_h *= ag_stride
ag *= ag_stride
if p_value < r_value and p_value < ag_value:
return p, p_h, -p_value
elif r_value < p_value and r_value < ag_value:
return r, r_h, -r_value
else:
return ag, ag_h, -ag_value
def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
x_scale, loss_function, tr_solver, tr_options, verbose):
x = x0.copy()
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
m, n = J.shape
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
v, dv = CL_scaling_vector(x, g, lb, ub)
v[dv != 0] *= scale_inv[dv != 0]
Delta = norm(x0 * scale_inv / v**0.5)
if Delta == 0:
Delta = 1.0
g_norm = norm(g * v, ord=np.inf)
f_augmented = np.zeros((m + n))
if tr_solver == 'exact':
J_augmented = np.empty((m + n, n))
elif tr_solver == 'lsmr':
reg_term = 0.0
regularize = tr_options.pop('regularize', True)
if max_nfev is None:
max_nfev = x0.size * 100
alpha = 0.0 # "Levenberg-Marquardt" parameter
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
v, dv = CL_scaling_vector(x, g, lb, ub)
g_norm = norm(g * v, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
# Now compute variables in "hat" space. Here we also account for
# scaling introduced by `x_scale` parameter. This part is a bit tricky,
# you have to write down the formulas and see how the trust-region
# problem is formulated when the two types of scaling are applied.
# The idea is that first we apply `x_scale` and then apply Coleman-Li
# approach in the new variables.
# v is recomputed in the variables after applying `x_scale`, note that
# components which were identically 1 not affected.
v[dv != 0] *= scale_inv[dv != 0]
# Here we apply two types of scaling.
d = v**0.5 * scale
# C = diag(g * scale) Jv
diag_h = g * dv * scale
# After all this were done, we continue normally.
# "hat" gradient.
g_h = d * g
f_augmented[:m] = f
if tr_solver == 'exact':
J_augmented[:m] = J * d
J_h = J_augmented[:m] # Memory view.
J_augmented[m:] = np.diag(diag_h**0.5)
U, s, V = svd(J_augmented, full_matrices=False)
V = V.T
uf = U.T.dot(f_augmented)
elif tr_solver == 'lsmr':
J_h = right_multiplied_operator(J, d)
if regularize:
a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
to_tr = Delta / norm(g_h)
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
reg_term = -ag_value / Delta**2
lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
S = np.vstack((g_h, gn_h)).T
S, _ = qr(S, mode='economic')
JS = J_h.dot(S) # LinearOperator does dot too.
B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
g_S = S.T.dot(g_h)
# theta controls step back step ratio from the bounds.
theta = max(0.995, 1 - g_norm)
actual_reduction = -1
while actual_reduction <= 0 and nfev < max_nfev:
if tr_solver == 'exact':
p_h, alpha, n_iter = solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha=alpha)
elif tr_solver == 'lsmr':
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
p_h = S.dot(p_S)
p = d * p_h # Trust-region solution in the original space.
step, step_h, predicted_reduction = select_step(
x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)
x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step_h)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
# Correction term is specific to the algorithm,
# vanishes in unbounded case.
correction = 0.5 * np.dot(step_h * diag_h, step_h)
Delta_new, ratio = update_tr_radius(
Delta, actual_reduction - correction, predicted_reduction,
step_h_norm, step_h_norm > 0.95 * Delta
)
alpha *= Delta / Delta_new
Delta = Delta_new
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
x = x_new
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev,
status=termination_status)
def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
x_scale, loss_function, tr_solver, tr_options, verbose):
x = x0.copy()
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
m, n = J.shape
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
Delta = norm(x0 * scale_inv)
if Delta == 0:
Delta = 1.0
if tr_solver == 'lsmr':
reg_term = 0
damp = tr_options.pop('damp', 0.0)
regularize = tr_options.pop('regularize', True)
if max_nfev is None:
max_nfev = x0.size * 100
alpha = 0.0 # "Levenberg-Marquardt" parameter
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
d = scale
g_h = d * g
if tr_solver == 'exact':
J_h = J * d
U, s, V = svd(J_h, full_matrices=False)
V = V.T
uf = U.T.dot(f)
elif tr_solver == 'lsmr':
J_h = right_multiplied_operator(J, d)
if regularize:
a, b = build_quadratic_1d(J_h, g_h, -g_h)
to_tr = Delta / norm(g_h)
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
reg_term = -ag_value / Delta**2
damp_full = (damp**2 + reg_term)**0.5
gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
S = np.vstack((g_h, gn_h)).T
S, _ = qr(S, mode='economic')
JS = J_h.dot(S)
B_S = np.dot(JS.T, JS)
g_S = S.T.dot(g_h)
actual_reduction = -1
while actual_reduction <= 0 and nfev < max_nfev:
if tr_solver == 'exact':
step_h, alpha, n_iter = solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha=alpha)
elif tr_solver == 'lsmr':
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
step_h = S.dot(p_S)
predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
step = d * step_h
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step_h)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta_new, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, step_h_norm > 0.95 * Delta)
alpha *= Delta / Delta_new
Delta = Delta_new
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
x = x_new
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
active_mask = np.zeros_like(x)
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev,
status=termination_status)
| 19,791 | 33.783831 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/bvls.py
|
"""Bounded-Variable Least-Squares algorithm."""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm, lstsq
from scipy.optimize import OptimizeResult
from .common import print_header_linear, print_iteration_linear
def compute_kkt_optimality(g, on_bound):
"""Compute the maximum violation of KKT conditions."""
g_kkt = g * on_bound
free_set = on_bound == 0
g_kkt[free_set] = np.abs(g[free_set])
return np.max(g_kkt)
def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose):
m, n = A.shape
x = x_lsq.copy()
on_bound = np.zeros(n)
mask = x < lb
x[mask] = lb[mask]
on_bound[mask] = -1
mask = x > ub
x[mask] = ub[mask]
on_bound[mask] = 1
free_set = on_bound == 0
active_set = ~free_set
free_set, = np.where(free_set)
r = A.dot(x) - b
cost = 0.5 * np.dot(r, r)
initial_cost = cost
g = A.T.dot(r)
cost_change = None
step_norm = None
iteration = 0
if verbose == 2:
print_header_linear()
# This is the initialization loop. The requirement is that the
# least-squares solution on free variables is feasible before BVLS starts.
# One possible initialization is to set all variables to lower or upper
# bounds, but many iterations may be required from this state later on.
# The implemented ad-hoc procedure which intuitively should give a better
# initial state: find the least-squares solution on current free variables,
# if its feasible then stop, otherwise set violating variables to
# corresponding bounds and continue on the reduced set of free variables.
while free_set.size > 0:
if verbose == 2:
optimality = compute_kkt_optimality(g, on_bound)
print_iteration_linear(iteration, cost, cost_change, step_norm,
optimality)
iteration += 1
x_free_old = x[free_set].copy()
A_free = A[:, free_set]
b_free = b - A.dot(x * active_set)
z = lstsq(A_free, b_free, rcond=-1)[0]
lbv = z < lb[free_set]
ubv = z > ub[free_set]
v = lbv | ubv
if np.any(lbv):
ind = free_set[lbv]
x[ind] = lb[ind]
active_set[ind] = True
on_bound[ind] = -1
if np.any(ubv):
ind = free_set[ubv]
x[ind] = ub[ind]
active_set[ind] = True
on_bound[ind] = 1
ind = free_set[~v]
x[ind] = z[~v]
r = A.dot(x) - b
cost_new = 0.5 * np.dot(r, r)
cost_change = cost - cost_new
cost = cost_new
g = A.T.dot(r)
step_norm = norm(x[free_set] - x_free_old)
if np.any(v):
free_set = free_set[~v]
else:
break
if max_iter is None:
max_iter = n
max_iter += iteration
termination_status = None
# Main BVLS loop.
optimality = compute_kkt_optimality(g, on_bound)
for iteration in range(iteration, max_iter):
if verbose == 2:
print_iteration_linear(iteration, cost, cost_change,
step_norm, optimality)
if optimality < tol:
termination_status = 1
if termination_status is not None:
break
move_to_free = np.argmax(g * on_bound)
on_bound[move_to_free] = 0
free_set = on_bound == 0
active_set = ~free_set
free_set, = np.nonzero(free_set)
x_free = x[free_set]
x_free_old = x_free.copy()
lb_free = lb[free_set]
ub_free = ub[free_set]
A_free = A[:, free_set]
b_free = b - A.dot(x * active_set)
z = lstsq(A_free, b_free, rcond=-1)[0]
lbv, = np.nonzero(z < lb_free)
ubv, = np.nonzero(z > ub_free)
v = np.hstack((lbv, ubv))
if v.size > 0:
alphas = np.hstack((
lb_free[lbv] - x_free[lbv],
ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v])
i = np.argmin(alphas)
i_free = v[i]
alpha = alphas[i]
x_free *= 1 - alpha
x_free += alpha * z
if i < lbv.size:
on_bound[free_set[i_free]] = -1
else:
on_bound[free_set[i_free]] = 1
else:
x_free = z
x[free_set] = x_free
step_norm = norm(x_free - x_free_old)
r = A.dot(x) - b
cost_new = 0.5 * np.dot(r, r)
cost_change = cost - cost_new
if cost_change < tol * cost:
termination_status = 2
cost = cost_new
g = A.T.dot(r)
optimality = compute_kkt_optimality(g, on_bound)
if termination_status is None:
termination_status = 0
return OptimizeResult(
x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound,
nit=iteration + 1, status=termination_status,
initial_cost=initial_cost)
| 4,994 | 26.596685 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/common.py
|
"""Functions used by least-squares algorithms."""
from __future__ import division, print_function, absolute_import
from math import copysign
import numpy as np
from numpy.linalg import norm
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from scipy.sparse import issparse
from scipy.sparse.linalg import LinearOperator, aslinearoperator
EPS = np.finfo(float).eps
# Functions related to a trust-region problem.
def intersect_trust_region(x, s, Delta):
"""Find the intersection of a line with the boundary of a trust region.
This function solves the quadratic equation with respect to t
||(x + s*t)||**2 = Delta**2.
Returns
-------
t_neg, t_pos : tuple of float
Negative and positive roots.
Raises
------
ValueError
If `s` is zero or `x` is not within the trust region.
"""
a = np.dot(s, s)
if a == 0:
raise ValueError("`s` is zero.")
b = np.dot(x, s)
c = np.dot(x, x) - Delta**2
if c > 0:
raise ValueError("`x` is not within the trust region.")
d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
# Computations below avoid loss of significance, see "Numerical Recipes".
q = -(b + copysign(d, b))
t1 = q / a
t2 = c / q
if t1 < t2:
return t1, t2
else:
return t2, t1
def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
rtol=0.01, max_iter=10):
"""Solve a trust-region problem arising in least-squares minimization.
This function implements a method described by J. J. More [1]_ and used
in MINPACK, but it relies on a single SVD of Jacobian instead of series
of Cholesky decompositions. Before running this function, compute:
``U, s, VT = svd(J, full_matrices=False)``.
Parameters
----------
n : int
Number of variables.
m : int
Number of residuals.
uf : ndarray
Computed as U.T.dot(f).
s : ndarray
Singular values of J.
V : ndarray
Transpose of VT.
Delta : float
Radius of a trust region.
initial_alpha : float, optional
Initial guess for alpha, which might be available from a previous
iteration. If None, determined automatically.
rtol : float, optional
Stopping tolerance for the root-finding procedure. Namely, the
solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
max_iter : int, optional
Maximum allowed number of iterations for the root-finding procedure.
Returns
-------
p : ndarray, shape (n,)
Found solution of a trust-region problem.
alpha : float
Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
Sometimes called Levenberg-Marquardt parameter.
n_iter : int
Number of iterations made by root-finding procedure. Zero means
that Gauss-Newton step was selected as the solution.
References
----------
.. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
"""
def phi_and_derivative(alpha, suf, s, Delta):
"""Function of which to find zero.
It is defined as "norm of regularized (by alpha) least-squares
solution minus `Delta`". Refer to [1]_.
"""
denom = s**2 + alpha
p_norm = norm(suf / denom)
phi = p_norm - Delta
phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
return phi, phi_prime
suf = s * uf
# Check if J has full rank and try Gauss-Newton step.
if m >= n:
threshold = EPS * m * s[0]
full_rank = s[-1] > threshold
else:
full_rank = False
if full_rank:
p = -V.dot(uf / s)
if norm(p) <= Delta:
return p, 0.0, 0
alpha_upper = norm(suf) / Delta
if full_rank:
phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
alpha_lower = -phi / phi_prime
else:
alpha_lower = 0.0
if initial_alpha is None or not full_rank and initial_alpha == 0:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
else:
alpha = initial_alpha
for it in range(max_iter):
if alpha < alpha_lower or alpha > alpha_upper:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
if phi < 0:
alpha_upper = alpha
ratio = phi / phi_prime
alpha_lower = max(alpha_lower, alpha - ratio)
alpha -= (phi + Delta) * ratio / Delta
if np.abs(phi) < rtol * Delta:
break
p = -V.dot(suf / (s**2 + alpha))
# Make the norm of p equal to Delta, p is changed only slightly during
# this. It is done to prevent p lie outside the trust region (which can
# cause problems later).
p *= Delta / norm(p)
return p, alpha, it + 1
def solve_trust_region_2d(B, g, Delta):
"""Solve a general trust-region problem in 2 dimensions.
The problem is reformulated as a 4-th order algebraic equation,
the solution of which is found by numpy.roots.
Parameters
----------
B : ndarray, shape (2, 2)
Symmetric matrix, defines a quadratic term of the function.
g : ndarray, shape (2,)
Defines a linear term of the function.
Delta : float
Radius of a trust region.
Returns
-------
p : ndarray, shape (2,)
Found solution.
newton_step : bool
Whether the returned solution is the Newton step which lies within
the trust region.
"""
try:
R, lower = cho_factor(B)
p = -cho_solve((R, lower), g)
if np.dot(p, p) <= Delta**2:
return p, True
except LinAlgError:
pass
a = B[0, 0] * Delta**2
b = B[0, 1] * Delta**2
c = B[1, 1] * Delta**2
d = g[0] * Delta
f = g[1] * Delta
coeffs = np.array(
[-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
t = np.roots(coeffs) # Can handle leading zeros.
t = np.real(t[np.isreal(t)])
p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
i = np.argmin(value)
p = p[:, i]
return p, False
def update_tr_radius(Delta, actual_reduction, predicted_reduction,
step_norm, bound_hit):
"""Update the radius of a trust region based on the cost reduction.
Returns
-------
Delta : float
New radius.
ratio : float
Ratio between actual and predicted reductions. Zero if predicted
reduction is zero.
"""
if predicted_reduction > 0:
ratio = actual_reduction / predicted_reduction
else:
ratio = 0
if ratio < 0.25:
Delta = 0.25 * step_norm
elif ratio > 0.75 and bound_hit:
Delta *= 2.0
return Delta, ratio
# Construction and minimization of quadratic functions.
def build_quadratic_1d(J, g, s, diag=None, s0=None):
"""Parameterize a multivariate quadratic function along a line.
The resulting univariate quadratic function is given as follows:
::
f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
g.T * (s0 + s*t)
Parameters
----------
J : ndarray, sparse matrix or LinearOperator shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (n,)
Direction vector of a line.
diag : None or ndarray with shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
s0 : None or ndarray with shape (n,), optional
Initial point. If None, assumed to be 0.
Returns
-------
a : float
Coefficient for t**2.
b : float
Coefficient for t.
c : float
Free term. Returned only if `s0` is provided.
"""
v = J.dot(s)
a = np.dot(v, v)
if diag is not None:
a += np.dot(s * diag, s)
a *= 0.5
b = np.dot(g, s)
if s0 is not None:
u = J.dot(s0)
b += np.dot(u, v)
c = 0.5 * np.dot(u, u) + np.dot(g, s0)
if diag is not None:
b += np.dot(s0 * diag, s)
c += 0.5 * np.dot(s0 * diag, s0)
return a, b, c
else:
return a, b
def minimize_quadratic_1d(a, b, lb, ub, c=0):
"""Minimize a 1-d quadratic function subject to bounds.
The free term `c` is 0 by default. Bounds must be finite.
Returns
-------
t : float
Minimum point.
y : float
Minimum value.
"""
t = [lb, ub]
if a != 0:
extremum = -0.5 * b / a
if lb < extremum < ub:
t.append(extremum)
t = np.asarray(t)
y = a * t**2 + b * t + c
min_index = np.argmin(y)
return t[min_index], y[min_index]
def evaluate_quadratic(J, g, s, diag=None):
"""Compute values of a quadratic function arising in least squares.
The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
Parameters
----------
J : ndarray, sparse matrix or LinearOperator, shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (k, n) or (n,)
Array containing steps as rows.
diag : ndarray, shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
Returns
-------
values : ndarray with shape (k,) or float
Values of the function. If `s` was 2-dimensional then ndarray is
returned, otherwise float is returned.
"""
if s.ndim == 1:
Js = J.dot(s)
q = np.dot(Js, Js)
if diag is not None:
q += np.dot(s * diag, s)
else:
Js = J.dot(s.T)
q = np.sum(Js**2, axis=0)
if diag is not None:
q += np.sum(diag * s**2, axis=1)
l = np.dot(s, g)
return 0.5 * q + l
# Utility functions to work with bound constraints.
def in_bounds(x, lb, ub):
"""Check if a point lies within bounds."""
return np.all((x >= lb) & (x <= ub))
def step_size_to_bound(x, s, lb, ub):
"""Compute a min_step size required to reach a bound.
The function computes a positive scalar t, such that x + s * t is on
the bound.
Returns
-------
step : float
Computed step. Non-negative value.
hits : ndarray of int with shape of x
Each element indicates whether a corresponding variable reaches the
bound:
* 0 - the bound was not hit.
* -1 - the lower bound was hit.
* 1 - the upper bound was hit.
"""
non_zero = np.nonzero(s)
s_non_zero = s[non_zero]
steps = np.empty_like(x)
steps.fill(np.inf)
with np.errstate(over='ignore'):
steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,
(ub - x)[non_zero] / s_non_zero)
min_step = np.min(steps)
return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)
def find_active_constraints(x, lb, ub, rtol=1e-10):
"""Determine which constraints are active in a given point.
The threshold is computed using `rtol` and the absolute value of the
closest bound.
Returns
-------
active : ndarray of int with shape of x
Each component shows whether the corresponding constraint is active:
* 0 - a constraint is not active.
* -1 - a lower bound is active.
* 1 - a upper bound is active.
"""
active = np.zeros_like(x, dtype=int)
if rtol == 0:
active[x <= lb] = -1
active[x >= ub] = 1
return active
lower_dist = x - lb
upper_dist = ub - x
lower_threshold = rtol * np.maximum(1, np.abs(lb))
upper_threshold = rtol * np.maximum(1, np.abs(ub))
lower_active = (np.isfinite(lb) &
(lower_dist <= np.minimum(upper_dist, lower_threshold)))
active[lower_active] = -1
upper_active = (np.isfinite(ub) &
(upper_dist <= np.minimum(lower_dist, upper_threshold)))
active[upper_active] = 1
return active
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
"""Shift a point to the interior of a feasible region.
Each element of the returned vector is at least at a relative distance
`rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
"""
x_new = x.copy()
active = find_active_constraints(x, lb, ub, rstep)
lower_mask = np.equal(active, -1)
upper_mask = np.equal(active, 1)
if rstep == 0:
x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
else:
x_new[lower_mask] = (lb[lower_mask] +
rstep * np.maximum(1, np.abs(lb[lower_mask])))
x_new[upper_mask] = (ub[upper_mask] -
rstep * np.maximum(1, np.abs(ub[upper_mask])))
tight_bounds = (x_new < lb) | (x_new > ub)
x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
return x_new
def CL_scaling_vector(x, g, lb, ub):
"""Compute Coleman-Li scaling vector and its derivatives.
Components of a vector v are defined as follows:
::
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
| 1, otherwise
According to this definition v[i] >= 0 for all i. It differs from the
definition in paper [1]_ (eq. (2.2)), where the absolute value of v is
used. Both definitions are equivalent down the line.
Derivatives of v with respect to x take value 1, -1 or 0 depending on a
case.
Returns
-------
v : ndarray with shape of x
Scaling vector.
dv : ndarray with shape of x
Derivatives of v[i] with respect to x[i], diagonal elements of v's
Jacobian.
References
----------
.. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
"""
v = np.ones_like(x)
dv = np.zeros_like(x)
mask = (g < 0) & np.isfinite(ub)
v[mask] = ub[mask] - x[mask]
dv[mask] = -1
mask = (g > 0) & np.isfinite(lb)
v[mask] = x[mask] - lb[mask]
dv[mask] = 1
return v, dv
def reflective_transformation(y, lb, ub):
"""Compute reflective transformation and its gradient."""
if in_bounds(y, lb, ub):
return y, np.ones_like(y)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
x = y.copy()
g_negative = np.zeros_like(y, dtype=bool)
mask = lb_finite & ~ub_finite
x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
g_negative[mask] = y[mask] < lb[mask]
mask = ~lb_finite & ub_finite
x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
g_negative[mask] = y[mask] > ub[mask]
mask = lb_finite & ub_finite
d = ub - lb
t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
g_negative[mask] = t > d[mask]
g = np.ones_like(y)
g[g_negative] = -1
return x, g
# Functions to display algorithm's progress.
def print_header_nonlinear():
print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}"
.format("Iteration", "Total nfev", "Cost", "Cost reduction",
"Step norm", "Optimality"))
def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,
step_norm, optimality):
if cost_reduction is None:
cost_reduction = " " * 15
else:
cost_reduction = "{0:^15.2e}".format(cost_reduction)
if step_norm is None:
step_norm = " " * 15
else:
step_norm = "{0:^15.2e}".format(step_norm)
print("{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}"
.format(iteration, nfev, cost, cost_reduction,
step_norm, optimality))
def print_header_linear():
print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}"
.format("Iteration", "Cost", "Cost reduction", "Step norm",
"Optimality"))
def print_iteration_linear(iteration, cost, cost_reduction, step_norm,
optimality):
if cost_reduction is None:
cost_reduction = " " * 15
else:
cost_reduction = "{0:^15.2e}".format(cost_reduction)
if step_norm is None:
step_norm = " " * 15
else:
step_norm = "{0:^15.2e}".format(step_norm)
print("{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}".format(
iteration, cost, cost_reduction, step_norm, optimality))
# Simple helper functions.
def compute_grad(J, f):
"""Compute gradient of the least-squares cost function."""
if isinstance(J, LinearOperator):
return J.rmatvec(f)
else:
return J.T.dot(f)
def compute_jac_scale(J, scale_inv_old=None):
"""Compute variables scale based on the Jacobian matrix."""
if issparse(J):
scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5
else:
scale_inv = np.sum(J**2, axis=0)**0.5
if scale_inv_old is None:
scale_inv[scale_inv == 0] = 1
else:
scale_inv = np.maximum(scale_inv, scale_inv_old)
return 1 / scale_inv, scale_inv
def left_multiplied_operator(J, d):
"""Return diag(d) J as LinearOperator."""
J = aslinearoperator(J)
def matvec(x):
return d * J.matvec(x)
def matmat(X):
return d[:, np.newaxis] * J.matmat(X)
def rmatvec(x):
return J.rmatvec(x.ravel() * d)
return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
rmatvec=rmatvec)
def right_multiplied_operator(J, d):
"""Return J diag(d) as LinearOperator."""
J = aslinearoperator(J)
def matvec(x):
return J.matvec(np.ravel(x) * d)
def matmat(X):
return J.matmat(X * d[:, np.newaxis])
def rmatvec(x):
return d * J.rmatvec(x)
return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
rmatvec=rmatvec)
def regularized_lsq_operator(J, diag):
"""Return a matrix arising in regularized least squares as LinearOperator.
The matrix is
[ J ]
[ D ]
where D is diagonal matrix with elements from `diag`.
"""
J = aslinearoperator(J)
m, n = J.shape
def matvec(x):
return np.hstack((J.matvec(x), diag * x))
def rmatvec(x):
x1 = x[:m]
x2 = x[m:]
return J.rmatvec(x1) + diag * x2
return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
def right_multiply(J, d, copy=True):
"""Compute J diag(d).
If `copy` is False, `J` is modified in place (unless being LinearOperator).
"""
if copy and not isinstance(J, LinearOperator):
J = J.copy()
if issparse(J):
J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.
elif isinstance(J, LinearOperator):
J = right_multiplied_operator(J, d)
else:
J *= d
return J
def left_multiply(J, d, copy=True):
"""Compute diag(d) J.
If `copy` is False, `J` is modified in place (unless being LinearOperator).
"""
if copy and not isinstance(J, LinearOperator):
J = J.copy()
if issparse(J):
J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.
elif isinstance(J, LinearOperator):
J = left_multiplied_operator(J, d)
else:
J *= d[:, np.newaxis]
return J
def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):
"""Check termination condition for nonlinear least squares."""
ftol_satisfied = dF < ftol * F and ratio > 0.25
xtol_satisfied = dx_norm < xtol * (xtol + x_norm)
if ftol_satisfied and xtol_satisfied:
return 4
elif ftol_satisfied:
return 2
elif xtol_satisfied:
return 3
else:
return None
def scale_for_robust_loss_function(J, f, rho):
"""Scale Jacobian and residuals for a robust loss function.
Arrays are modified in place.
"""
J_scale = rho[1] + 2 * rho[2] * f**2
J_scale[J_scale < EPS] = EPS
J_scale **= 0.5
f *= rho[1] / J_scale
return left_multiply(J, J_scale, copy=False), f
| 20,823 | 27.293478 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/__init__.py
|
"""This module contains least-squares algorithms."""
from __future__ import division, print_function, absolute_import
from .least_squares import least_squares
from .lsq_linear import lsq_linear
__all__ = ['least_squares', 'lsq_linear']
| 238 | 28.875 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/optimize/_lsq/trf_linear.py
|
"""The adaptation of Trust Region Reflective algorithm for a linear
least-squares problem."""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm
from scipy.linalg import qr, solve_triangular
from scipy.sparse.linalg import lsmr
from scipy.optimize import OptimizeResult
from .givens_elimination import givens_elimination
from .common import (
EPS, step_size_to_bound, find_active_constraints, in_bounds,
make_strictly_feasible, build_quadratic_1d, evaluate_quadratic,
minimize_quadratic_1d, CL_scaling_vector, reflective_transformation,
print_header_linear, print_iteration_linear, compute_grad,
regularized_lsq_operator, right_multiplied_operator)
def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True):
"""Solve regularized least squares using information from QR-decomposition.
The initial problem is to solve the following system in a least-squares
sense:
::
A x = b
D x = 0
Where D is diagonal matrix. The method is based on QR decomposition
of the form A P = Q R, where P is a column permutation matrix, Q is an
orthogonal matrix and R is an upper triangular matrix.
Parameters
----------
m, n : int
Initial shape of A.
R : ndarray, shape (n, n)
Upper triangular matrix from QR decomposition of A.
QTb : ndarray, shape (n,)
First n components of Q^T b.
perm : ndarray, shape (n,)
Array defining column permutation of A, such that i-th column of
P is perm[i]-th column of identity matrix.
diag : ndarray, shape (n,)
Array containing diagonal elements of D.
Returns
-------
x : ndarray, shape (n,)
Found least-squares solution.
"""
if copy_R:
R = R.copy()
v = QTb.copy()
givens_elimination(R, v, diag[perm])
abs_diag_R = np.abs(np.diag(R))
threshold = EPS * max(m, n) * np.max(abs_diag_R)
nns, = np.nonzero(abs_diag_R > threshold)
R = R[np.ix_(nns, nns)]
v = v[nns]
x = np.zeros(n)
x[perm[nns]] = solve_triangular(R, v)
return x
def backtracking(A, g, x, p, theta, p_dot_g, lb, ub):
"""Find an appropriate step size using backtracking line search."""
alpha = 1
while True:
x_new, _ = reflective_transformation(x + alpha * p, lb, ub)
step = x_new - x
cost_change = -evaluate_quadratic(A, g, step)
if cost_change > -0.1 * alpha * p_dot_g:
break
alpha *= 0.5
active = find_active_constraints(x_new, lb, ub)
if np.any(active != 0):
x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub)
x_new = make_strictly_feasible(x_new, lb, ub, rstep=0)
step = x_new - x
cost_change = -evaluate_quadratic(A, g, step)
return x, step, cost_change
def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta):
"""Select the best step according to Trust Region Reflective algorithm."""
if in_bounds(x + p, lb, ub):
return p
p_stride, hits = step_size_to_bound(x, p, lb, ub)
r_h = np.copy(p_h)
r_h[hits.astype(bool)] *= -1
r = d * r_h
# Restrict step, such that it hits the bound.
p *= p_stride
p_h *= p_stride
x_on_bound = x + p
# Find the step size along reflected direction.
r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub)
# Stay interior.
r_stride_l = (1 - theta) * r_stride_u
r_stride_u *= theta
if r_stride_u > 0:
a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h)
r_stride, r_value = minimize_quadratic_1d(
a, b, r_stride_l, r_stride_u, c=c)
r_h = p_h + r_h * r_stride
r = d * r_h
else:
r_value = np.inf
# Now correct p_h to make it strictly interior.
p_h *= theta
p *= theta
p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h)
ag_h = -g_h
ag = d * ag_h
ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub)
ag_stride_u *= theta
a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h)
ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u)
ag *= ag_stride
if p_value < r_value and p_value < ag_value:
return p
elif r_value < p_value and r_value < ag_value:
return r
else:
return ag
def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, max_iter,
verbose):
m, n = A.shape
x, _ = reflective_transformation(x_lsq, lb, ub)
x = make_strictly_feasible(x, lb, ub, rstep=0.1)
if lsq_solver == 'exact':
QT, R, perm = qr(A, mode='economic', pivoting=True)
QT = QT.T
if m < n:
R = np.vstack((R, np.zeros((n - m, n))))
QTr = np.zeros(n)
k = min(m, n)
elif lsq_solver == 'lsmr':
r_aug = np.zeros(m + n)
auto_lsmr_tol = False
if lsmr_tol is None:
lsmr_tol = 1e-2 * tol
elif lsmr_tol == 'auto':
auto_lsmr_tol = True
r = A.dot(x) - b
g = compute_grad(A, r)
cost = 0.5 * np.dot(r, r)
initial_cost = cost
termination_status = None
step_norm = None
cost_change = None
if max_iter is None:
max_iter = 100
if verbose == 2:
print_header_linear()
for iteration in range(max_iter):
v, dv = CL_scaling_vector(x, g, lb, ub)
g_scaled = g * v
g_norm = norm(g_scaled, ord=np.inf)
if g_norm < tol:
termination_status = 1
if verbose == 2:
print_iteration_linear(iteration, cost, cost_change,
step_norm, g_norm)
if termination_status is not None:
break
diag_h = g * dv
diag_root_h = diag_h ** 0.5
d = v ** 0.5
g_h = d * g
A_h = right_multiplied_operator(A, d)
if lsq_solver == 'exact':
QTr[:k] = QT.dot(r)
p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm,
diag_root_h, copy_R=False)
elif lsq_solver == 'lsmr':
lsmr_op = regularized_lsq_operator(A_h, diag_root_h)
r_aug[:m] = r
if auto_lsmr_tol:
eta = 1e-2 * min(0.5, g_norm)
lsmr_tol = max(EPS, min(0.1, eta * g_norm))
p_h = -lsmr(lsmr_op, r_aug, atol=lsmr_tol, btol=lsmr_tol)[0]
p = d * p_h
p_dot_g = np.dot(p, g)
if p_dot_g > 0:
termination_status = -1
theta = 1 - min(0.005, g_norm)
step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta)
cost_change = -evaluate_quadratic(A, g, step)
# Perhaps almost never executed, the idea is that `p` is descent
# direction thus we must find acceptable cost decrease using simple
# "backtracking", otherwise algorithm's logic would break.
if cost_change < 0:
x, step, cost_change = backtracking(
A, g, x, p, theta, p_dot_g, lb, ub)
else:
x = make_strictly_feasible(x + step, lb, ub, rstep=0)
step_norm = norm(step)
r = A.dot(x) - b
g = compute_grad(A, r)
if cost_change < tol * cost:
termination_status = 2
cost = 0.5 * np.dot(r, r)
if termination_status is None:
termination_status = 0
active_mask = find_active_constraints(x, lb, ub, rtol=tol)
return OptimizeResult(
x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask,
nit=iteration + 1, status=termination_status,
initial_cost=initial_cost)
| 7,643 | 29.333333 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/ltisys.py
|
"""
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Apr 2011: Jeffrey Armstrong <jeff@approximatrix.com>
# Added dlsim, dstep, dimpulse, cont2discrete
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
# Merged discrete systems and added dlti
import warnings
# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
# use scipy's qr until this is solved
import scipy._lib.six as six
from scipy.linalg import qr as s_qr
from scipy import integrate, interpolate, linalg
from scipy.interpolate import interp1d
from scipy._lib.six import xrange
from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk,
freqz_zpk)
from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
cont2discrete)
import numpy
import numpy as np
from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
'dfreqresp', 'dbode']
class LinearTimeInvariant(object):
def __new__(cls, *system, **kwargs):
"""Create a new object, don't allow direct instances."""
if cls is LinearTimeInvariant:
raise NotImplementedError('The LinearTimeInvariant class is not '
'meant to be used directly, use `lti` '
'or `dlti` instead.')
return super(LinearTimeInvariant, cls).__new__(cls)
def __init__(self):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super(LinearTimeInvariant, self).__init__()
self.inputs = None
self.outputs = None
self._dt = None
@property
def dt(self):
"""Return the sampling time of the system, `None` for `lti` systems."""
return self._dt
@property
def _dt_dict(self):
if self.dt is None:
return {}
else:
return {'dt': self.dt}
@property
def zeros(self):
"""Zeros of the system."""
return self.to_zpk().zeros
@property
def poles(self):
"""Poles of the system."""
return self.to_zpk().poles
def _as_ss(self):
"""Convert to `StateSpace` system, without copying.
Returns
-------
sys: StateSpace
The `StateSpace` system. If the class is already an instance of
`StateSpace` then this instance is returned.
"""
if isinstance(self, StateSpace):
return self
else:
return self.to_ss()
def _as_zpk(self):
"""Convert to `ZerosPolesGain` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `ZerosPolesGain` system. If the class is already an instance of
`ZerosPolesGain` then this instance is returned.
"""
if isinstance(self, ZerosPolesGain):
return self
else:
return self.to_zpk()
def _as_tf(self):
"""Convert to `TransferFunction` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `TransferFunction` system. If the class is already an instance of
`TransferFunction` then this instance is returned.
"""
if isinstance(self, TransferFunction):
return self
else:
return self.to_tf()
class lti(LinearTimeInvariant):
"""
Continuous-time linear time invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
continuous-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, dlti
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
5]``).
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> signal.lti(1, 2, 3, 4)
StateSpaceContinuous(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: None
)
>>> signal.lti([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
>>> signal.lti([3, 4], [1, 2])
TransferFunctionContinuous(
array([3., 4.]),
array([1., 2.]),
dt: None
)
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous, *system)
elif N == 3:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous, *system)
elif N == 4:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system)
else:
raise ValueError("`system` needs to be an instance of `lti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super(lti, self).__init__(*system)
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `bode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `freqresp` for details.
"""
return freqresp(self, w=w, n=n)
def to_discrete(self, dt, method='zoh', alpha=None):
"""Return a discretized version of the current system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti`
"""
raise NotImplementedError('to_discrete is not implemented for this '
'system class.')
class dlti(LinearTimeInvariant):
"""
Discrete-time linear time invariant system base class.
Parameters
----------
*system: arguments
The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
discrete-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to ``True``
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, lti
Notes
-----
`dlti` instances do not exist directly. Instead, `dlti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> signal.dlti(1, 2, 3, 4)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: True
)
>>> signal.dlti(1, 2, 3, 4, dt=0.1)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: 0.1
)
>>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
>>> signal.dlti([3, 4], [1, 2], dt=0.1)
TransferFunctionDiscrete(
array([3., 4.]),
array([1., 2.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create an instance of the appropriate subclass."""
if cls is dlti:
N = len(system)
if N == 2:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete, *system, **kwargs)
elif N == 3:
return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
*system, **kwargs)
elif N == 4:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
**kwargs)
else:
raise ValueError("`system` needs to be an instance of `dlti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(dlti, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
dt = kwargs.pop('dt', True)
super(dlti, self).__init__(*system, **kwargs)
self.dt = dt
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def impulse(self, x0=None, t=None, n=None):
"""
Return the impulse response of the discrete-time `dlti` system.
See `dimpulse` for details.
"""
return dimpulse(self, x0=x0, t=t, n=n)
def step(self, x0=None, t=None, n=None):
"""
Return the step response of the discrete-time `dlti` system.
See `dstep` for details.
"""
return dstep(self, x0=x0, t=t, n=n)
def output(self, u, t, x0=None):
"""
Return the response of the discrete-time system to input `u`.
See `dlsim` for details.
"""
return dlsim(self, u, t, x0=x0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a discrete-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `dbode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3) with sampling time 0.5s
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
Equivalent: signal.dbode(sys)
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return dbode(self, w=w, n=n)
def freqresp(self, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `dfreqresp` for details.
"""
return dfreqresp(self, w=w, n=n, whole=whole)
class TransferFunction(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: None
)
Construct the transfer function with a sampling time of 0.1 seconds:
.. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super(TransferFunction, self).__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
repr(self.dt),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
class TransferFunctionContinuous(TransferFunction, lti):
r"""
Continuous-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Continuous-time `TransferFunction` systems inherit additional
functionality from the `lti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
See Also
--------
ZerosPolesGain, StateSpace, lti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``)
Examples
--------
Construct the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `TransferFunction` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return TransferFunction(*cont2discrete((self.num, self.den),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class TransferFunctionDiscrete(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function with a sampling time of 0.5 seconds:
.. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, 0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
class ZerosPolesGain(LinearTimeInvariant):
r"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the continuous- or discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
`ZerosPolesGain` systems inherit additional functionality from the `lti`,
respectively the `dlti` classes, depending on which system representation
is used.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, lti, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_zpk()
# Choose whether to inherit from `lti` or from `dlti`
if cls is ZerosPolesGain:
if kwargs.get('dt') is None:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous,
*system,
**kwargs)
else:
return ZerosPolesGainDiscrete.__new__(
ZerosPolesGainDiscrete,
*system,
**kwargs
)
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the zeros, poles, gain system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
super(ZerosPolesGain, self).__init__(**kwargs)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system."""
return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
repr(self.dt),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
**self._dt_dict)
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
**self._dt_dict)
class ZerosPolesGainContinuous(ZerosPolesGain, lti):
r"""
Continuous-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the continuous time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Continuous-time `ZerosPolesGain` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
See Also
--------
TransferFunction, StateSpace, lti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `ZerosPolesGain` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `ZerosPolesGain`
"""
return ZerosPolesGain(
*cont2discrete((self.zeros, self.poles, self.gain),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
r"""
Discrete-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Discrete-time `ZerosPolesGain` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
pass
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
class StateSpace(LinearTimeInvariant):
r"""
Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u` or the discrete-time difference
equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
inherit additional functionality from the `lti`, respectively the `dlti`
classes, depending on which system representation is used.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, lti, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
>>> sys.to_discrete(0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
# Override Numpy binary operations and ufuncs
__array_priority__ = 100.0
__array_ufunc__ = None
def __new__(cls, *system, **kwargs):
"""Create new StateSpace object and settle inheritance."""
# Handle object conversion if input is an instance of `lti`
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_ss()
# Choose whether to inherit from `lti` or from `dlti`
if cls is StateSpace:
if kwargs.get('dt') is None:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system, **kwargs)
else:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
*system, **kwargs)
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super(StateSpace, self).__init__(**kwargs)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
repr(self.dt),
)
def _check_binop_other(self, other):
return isinstance(other, (StateSpace, np.ndarray, float, complex,
np.number) + six.integer_types)
def __mul__(self, other):
"""
Post-multiply another system or a scalar
Handles multiplication of systems in the sense of a frequency domain
multiplication. That means, given two systems E1(s) and E2(s), their
multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s)
is equivalent to first applying E2(s), and then E1(s).
Notes
-----
For SISO systems the order of system application does not matter.
However, for MIMO systems, where the two systems are matrices, the
order above ensures standard Matrix multiplication rules apply.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
return NotImplemented
if self.dt != other.dt:
raise TypeError('Cannot multiply systems with different `dt`.')
n1 = self.A.shape[0]
n2 = other.A.shape[0]
# Interconnection of systems
# x1' = A1 x1 + B1 u1
# y1 = C1 x1 + D1 u1
# x2' = A2 x2 + B2 y1
# y2 = C2 x2 + D2 y1
#
# Plugging in with u1 = y2 yields
# [x1'] [A1 B1*C2 ] [x1] [B1*D2]
# [x2'] = [0 A2 ] [x2] + [B2 ] u2
# [x1]
# y2 = [C1 D1*C2] [x2] + D1*D2 u2
a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))),
np.hstack((zeros((n2, n1)), other.A))))
b = np.vstack((np.dot(self.B, other.D), other.B))
c = np.hstack((self.C, np.dot(self.D, other.C)))
d = np.dot(self.D, other.D)
else:
# Assume that other is a scalar / matrix
# For post multiplication the input gets scaled
a = self.A
b = np.dot(self.B, other)
c = self.C
d = np.dot(self.D, other)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype))
def __rmul__(self, other):
"""Pre-multiply a scalar or matrix (but not StateSpace)"""
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
# For pre-multiplication only the output gets scaled
a = self.A
b = self.B
c = np.dot(other, self.C)
d = np.dot(other, self.D)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype))
def __neg__(self):
"""Negate the system (equivalent to pre-multiplying by -1)."""
return StateSpace(self.A, self.B, -self.C, -self.D)
def __add__(self, other):
"""
Adds two systems in the sense of frequency domain addition.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
raise TypeError('Cannot add {} and {}'.format(type(self),
type(other)))
if self.dt != other.dt:
raise TypeError('Cannot add systems with different `dt`.')
# Interconnection of systems
# x1' = A1 x1 + B1 u
# y1 = C1 x1 + D1 u
# x2' = A2 x2 + B2 u
# y2 = C2 x2 + D2 u
# y = y1 + y2
#
# Plugging in yields
# [x1'] [A1 0 ] [x1] [B1]
# [x2'] = [0 A2] [x2] + [B2] u
# [x1]
# y = [C1 C2] [x2] + [D1 + D2] u
a = linalg.block_diag(self.A, other.A)
b = np.vstack((self.B, other.B))
c = np.hstack((self.C, other.C))
d = self.D + other.D
else:
other = np.atleast_2d(other)
if self.D.shape == other.shape:
# A scalar/matrix is really just a static system (A=0, B=0, C=0)
a = self.A
b = self.B
c = self.C
d = self.D + other
else:
raise ValueError("Cannot add systems with incompatible dimensions")
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype))
def __sub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(-other)
def __radd__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(other)
def __rsub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return (-self).__add__(other)
def __truediv__(self, other):
"""
Divide by a scalar
"""
# Division by non-StateSpace scalars
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
if isinstance(other, np.ndarray) and other.ndim > 0:
# It's ambiguous what this means, so disallow it
raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays")
return self.__mul__(1/other)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
class StateSpaceContinuous(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class StateSpaceDiscrete(StateSpace, dlti):
r"""
Discrete-time Linear Time Invariant system in state-space form.
Represents the system as the discrete-time difference equation
:math:`x[k+1] = A x[k] + B u[k]`.
`StateSpace` systems inherit additional functionality from the `dlti`
class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
pass
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Simulate a double integrator y'' = u, with a constant input u = 1
>>> from scipy import signal
>>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.)
>>> t = np.linspace(0, 5)
>>> u = np.ones_like(t)
>>> tout, y, x = signal.lsim(system, u, t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = zeros((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None or
(isinstance(U, (int, float)) and U == 0.) or
not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in xrange(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : array_like
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = signal.bode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given, a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(s) = 5 / (s-1)^3
>>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
if isinstance(system, (TransferFunction, ZerosPolesGain)):
sys = system
else:
sys = system._as_zpk()
elif isinstance(system, dlti):
raise AttributeError('freqresp can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_zpk()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(sys, TransferFunction):
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
elif isinstance(sys, ZerosPolesGain):
w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see http://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
http://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
http://drum.lib.umd.edu/handle/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
http://drum.lib.umd.edu/handle/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and beginning of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles")
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
def dlsim(system, u, t=None, x0=None):
"""
Simulate output of a discrete-time linear system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
u : array_like
An input array describing the input at each time `t` (interpolation is
assumed between given times). If there are multiple inputs, then each
column of the rank-2 array represents an input.
t : array_like, optional
The time steps at which the input is defined. If `t` is given, it
must be the same length as `u`, and the final value in `t` determines
the number of steps returned in the output.
x0 : array_like, optional
The initial conditions on the state vector (zero by default).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
System response, as a 1-D array.
xout : ndarray, optional
Time-evolution of the state-vector. Only generated if the input is a
`StateSpace` system.
See Also
--------
lsim, dstep, dimpulse, cont2discrete
Examples
--------
A simple integrator transfer function with a discrete time step of 1.0
could be implemented as:
>>> from scipy import signal
>>> tf = ([1.0,], [1.0, -1.0], 1.0)
>>> t_in = [0.0, 1.0, 2.0, 3.0]
>>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
>>> t_out, y = signal.dlsim(tf, u, t=t_in)
>>> y.T
array([[ 0., 0., 0., 1.]])
"""
# Convert system to dlti-StateSpace
if isinstance(system, lti):
raise AttributeError('dlsim can only be used with discrete-time dlti '
'systems.')
elif not isinstance(system, dlti):
system = dlti(*system[:-1], dt=system[-1])
# Condition needed to ensure output remains compatible
is_ss_input = isinstance(system, StateSpace)
system = system._as_ss()
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.zeros((out_samples, system.A.shape[0]))
yout = np.zeros((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# Simulate the system
for i in range(0, out_samples - 1):
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]))
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]))
# Last point
yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
np.dot(system.D, u_dt[out_samples-1, :]))
if is_ss_input:
return tout, yout, xout
else:
return tout, yout
def dimpulse(system, x0=None, t=None, n=None):
"""
Impulse response of discrete-time system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
Impulse response of system. Each element of the tuple represents
the output of the system based on an impulse in each input.
See Also
--------
impulse, dstep, dlsim, cont2discrete
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dimpulse can only be used with discrete-time '
'dlti systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[0, i] = 1.0
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dstep(system, x0=None, t=None, n=None):
"""
Step response of discrete-time system.
Parameters
----------
system : tuple of array_like
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Output time points, as a 1-D array.
yout : ndarray
Step response of system. Each element of the tuple represents
the output of the system based on a step response to each input.
See Also
--------
step, dimpulse, dlsim, cont2discrete
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dstep can only be used with discrete-time dlti '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[:, i] = np.ones((t.shape[0],))
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dfreqresp(system, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Parameters
----------
system : an instance of the `dlti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (numerator, denominator, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
whole : bool, optional
Normally, if 'w' is not given, frequencies are computed from 0 to the
Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
`whole` is True, compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : 1D ndarray
Frequency array [radians/sample]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3)
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
>>> w, H = signal.dfreqresp(sys)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if not isinstance(system, dlti):
if isinstance(system, lti):
raise AttributeError('dfreqresp can only be used with '
'discrete-time systems.')
system = dlti(*system[:-1], dt=system[-1])
if isinstance(system, StateSpace):
# No SS->ZPK code exists right now, just SS->TF->ZPK
system = system._as_tf()
if not isinstance(system, (TransferFunction, ZerosPolesGain)):
raise ValueError('Unknown system type')
if system.inputs != 1 or system.outputs != 1:
raise ValueError("dfreqresp requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(system, TransferFunction):
# Convert numerator and denominator from polynomials in the variable
# 'z' to polynomials in the variable 'z^-1', as freqz expects.
num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
w, h = freqz(num, den, worN=worN, whole=whole)
elif isinstance(system, ZerosPolesGain):
w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN,
whole=whole)
return w, h
def dbode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a discrete-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (num, den, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/time_unit]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3)
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
Equivalent: sys.bode()
>>> w, mag, phase = signal.dbode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = dfreqresp(system, w=w, n=n)
if isinstance(system, dlti):
dt = system.dt
else:
dt = system[-1]
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
return w / dt, mag, phase
| 122,859 | 32.52251 | 84 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/setup.py
|
from __future__ import division, print_function, absolute_import
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('signal', parent_package, top_path)
config.add_data_dir('tests')
config.add_subpackage('windows')
config.add_extension('sigtools',
sources=['sigtoolsmodule.c', 'firfilter.c',
'medianfilter.c', 'lfilter.c.src',
'correlate_nd.c.src'],
depends=['sigtools.h'],
include_dirs=['.'],
**numpy_nodepr_api)
config.add_extension('_spectral', sources=['_spectral.c'])
config.add_extension('_max_len_seq_inner', sources=['_max_len_seq_inner.c'])
config.add_extension('_peak_finding_utils',
sources=['_peak_finding_utils.c'])
config.add_extension('_upfirdn_apply', sources=['_upfirdn_apply.c'])
spline_src = ['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c',
'C_bspline_util.c', 'Z_bspline_util.c', 'bspline_util.c']
config.add_extension('spline', sources=spline_src, **numpy_nodepr_api)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,426 | 36.552632 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/lti_conversion.py
|
"""
ltisys -- a collection of functions to convert linear time invariant systems
from one representation to another.
"""
from __future__ import division, print_function, absolute_import
import numpy
import numpy as np
from numpy import (r_, eye, atleast_2d, poly, dot,
asarray, product, zeros, array, outer)
from scipy import linalg
from .filter_design import tf2zpk, zpk2tf, normalize
__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk',
'cont2discrete']
def tf2ss(num, den):
r"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree. The
denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
Examples
--------
Convert the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
to the state-space representation:
.. math::
\dot{\textbf{x}}(t) =
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
>>> from scipy.signal import tf2ss
>>> A, B, C, D = tf2ss(num, den)
>>> A
array([[-2., -1.],
[ 1., 0.]])
>>> B
array([[ 1.],
[ 0.]])
>>> C
array([[ 1., 2.]])
>>> D
array([[ 1.]])
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return (array([], float), array([], float), array([], float),
array([], float))
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = atleast_2d(num[:, 0])
else:
# We don't assign it an empty array because this system
# is not 'null'. It just doesn't have a non-zero D
# matrix. Thus, it should have a non-zero shape so that
# it can be operated on by functions like 'ss2tf'
D = array([[0]], float)
if K == 1:
D = D.reshape(num.shape)
return (zeros((1, 1)), zeros((1, D.shape[1])),
zeros((D.shape[0], 1)), D)
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - outer(num[:, 0], den[1:])
D = D.reshape((C.shape[0], B.shape[1]))
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are two-dimensional.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
See `ss2tf` for format.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
r"""State-space to transfer function.
A, B, C, D defines a linear state-space system with `p` inputs,
`q` outputs, and `n` state variables.
Parameters
----------
A : array_like
State (or system) matrix of shape ``(n, n)``
B : array_like
Input matrix of shape ``(n, p)``
C : array_like
Output matrix of shape ``(q, n)``
D : array_like
Feedthrough (or feedforward) matrix of shape ``(q, p)``
input : int, optional
For multiple-input systems, the index of the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
Examples
--------
Convert the state-space representation:
.. math::
\dot{\textbf{x}}(t) =
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
>>> A = [[-2, -1], [1, 0]]
>>> B = [[1], [0]] # 2-dimensional column vector
>>> C = [[1, 2]] # 2-dimensional row vector
>>> D = 1
to the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy.signal import ss2tf
>>> ss2tf(A, B, C, D)
(array([[1, 3, 3]]), array([ 1., 2., 1.]))
"""
# transfer function is C (sI - A)**(-1) B + D
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make SIMO from possibly MIMO system.
B = B[:, input:input + 1]
D = D[:, input:input + 1]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
A, B, C, D defines a linear state-space system with `p` inputs,
`q` outputs, and `n` state variables.
Parameters
----------
A : array_like
State (or system) matrix of shape ``(n, n)``
B : array_like
Input matrix of shape ``(n, p)``
C : array_like
Output matrix of shape ``(q, n)``
D : array_like
Feedthrough (or feedforward) matrix of shape ``(q, p)``
input : int, optional
For multiple-input systems, the index of the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
def cont2discrete(system, dt, method="zoh", alpha=None):
"""
Transform a continuous to a discrete state-space system.
Parameters
----------
system : a tuple describing the system or an instance of `lti`
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
dt : float
The discretization time step.
method : {"gbt", "bilinear", "euler", "backward_diff", "zoh"}, optional
Which method to use:
* gbt: generalized bilinear transformation
* bilinear: Tustin's approximation ("gbt" with alpha=0.5)
* euler: Euler (or forward differencing) method ("gbt" with alpha=0)
* backward_diff: Backwards differencing ("gbt" with alpha=1.0)
* zoh: zero-order hold (default)
alpha : float within [0, 1], optional
The generalized bilinear transformation weighting parameter, which
should only be specified with method="gbt", and is ignored otherwise
Returns
-------
sysd : tuple containing the discrete system
Based on the input type, the output will be of the form
* (num, den, dt) for transfer function input
* (zeros, poles, gain, dt) for zeros-poles-gain input
* (A, B, C, D, dt) for state-space system input
Notes
-----
By default, the routine uses a Zero-Order Hold (zoh) method to perform
the transformation. Alternatively, a generalized bilinear transformation
may be used, which includes the common Tustin's bilinear approximation,
an Euler's method technique, or a backwards differencing technique.
The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear
approximation is based on [2]_ and [3]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
.. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf
.. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized
bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754,
2009.
(http://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf)
"""
if len(system) == 1:
return system.to_discrete()
if len(system) == 2:
sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method,
alpha=alpha)
return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
elif len(system) == 3:
sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt,
method=method, alpha=alpha)
return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
elif len(system) == 4:
a, b, c, d = system
else:
raise ValueError("First argument must either be a tuple of 2 (tf), "
"3 (zpk), or 4 (ss) arrays.")
if method == 'gbt':
if alpha is None:
raise ValueError("Alpha parameter must be specified for the "
"generalized bilinear transform (gbt) method")
elif alpha < 0 or alpha > 1:
raise ValueError("Alpha parameter must be within the interval "
"[0,1] for the gbt method")
if method == 'gbt':
# This parameter is used repeatedly - compute once here
ima = np.eye(a.shape[0]) - alpha*dt*a
ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a)
bd = linalg.solve(ima, dt*b)
# Similarly solve for the output equation matrices
cd = linalg.solve(ima.transpose(), c.transpose())
cd = cd.transpose()
dd = d + alpha*np.dot(c, bd)
elif method == 'bilinear' or method == 'tustin':
return cont2discrete(system, dt, method="gbt", alpha=0.5)
elif method == 'euler' or method == 'forward_diff':
return cont2discrete(system, dt, method="gbt", alpha=0.0)
elif method == 'backward_diff':
return cont2discrete(system, dt, method="gbt", alpha=1.0)
elif method == 'zoh':
# Build an exponential matrix
em_upper = np.hstack((a, b))
# Need to stack zeros under the a and b matrices
em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])),
np.zeros((b.shape[1], b.shape[1]))))
em = np.vstack((em_upper, em_lower))
ms = linalg.expm(dt * em)
# Dispose of the lower rows
ms = ms[:a.shape[0], :]
ad = ms[:, 0:a.shape[1]]
bd = ms[:, a.shape[1]:]
cd = c
dd = d
else:
raise ValueError("Unknown transformation method '%s'" % method)
return ad, bd, cd, dd, dt
| 13,965 | 28.969957 | 98 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/fir_filter_design.py
|
# -*- coding: utf-8 -*-
"""Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import warnings
import numpy as np
from numpy.fft import irfft, fft, ifft
from scipy.special import sinc
from scipy.linalg import toeplitz, hankel, pinv
from scipy._lib.six import string_types
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase']
def _get_fs(fs, nyq):
"""
Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
"""
if nyq is None and fs is None:
fs = 2
elif nyq is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
fs = 2*nyq
return fs
# Some notes on function parameters:
#
# `cutoff` and `width` are given as numbers between 0 and 1. These are
# relative frequencies, expressed as a fraction of the Nyquist frequency.
# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
Examples
--------
Suppose we want to design a lowpass filter, with 65 dB attenuation
in the stop band. The Kaiser window parameter to be used in the
window method is computed by `kaiser_beta(65)`:
>>> from scipy.signal import kaiser_beta
>>> kaiser_beta(65)
6.20426
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter,
expressed as a fraction of the Nyquist frequency.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
Examples
--------
Suppose we want to design a FIR filter using the Kaiser window method
that will have 211 taps and a transition width of 9 Hz for a signal that
is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency,
the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB)
is computed as follows:
>>> from scipy.signal import kaiser_atten
>>> kaiser_atten(211, 0.0375)
64.48099630593983
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Determine the filter window parameters for the Kaiser window method.
The parameters returned by this function are generally used to create
a finite impulse response filter using the window method, with either
`firwin` or `firwin2`.
Parameters
----------
ripple : float
Upper bound for the deviation (in dB) of the magnitude of the
filter's frequency response from that of the desired filter (not
including frequencies in any transition intervals). That is, if w
is the frequency expressed as a fraction of the Nyquist frequency,
A(w) is the actual frequency response of the filter and D(w) is the
desired frequency response, the design requirement is that::
abs(A(w) - D(w))) < 10**(-ripple/20)
for 0 <= w <= 1 and w not in a transition interval.
width : float
Width of transition region, normalized so that 1 corresponds to pi
radians / sample. That is, the frequency is expressed as a fraction
of the Nyquist frequency.
Returns
-------
numtaps : int
The length of the Kaiser window.
beta : float
The beta parameter for the Kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=True)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
Examples
--------
We will use the Kaiser window method to design a lowpass FIR filter
for a signal that is sampled at 1000 Hz.
We want at least 65 dB rejection in the stop band, and in the pass
band the gain should vary no more than 0.5%.
We want a cutoff frequency of 175 Hz, with a transition between the
pass band and the stop band of 24 Hz. That is, in the band [0, 163],
the gain varies no more than 0.5%, and in the band [187, 500], the
signal is attenuated by at least 65 dB.
>>> from scipy.signal import kaiserord, firwin, freqz
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0
>>> cutoff = 175
>>> width = 24
The Kaiser method accepts just a single parameter to control the pass
band ripple and the stop band rejection, so we use the more restrictive
of the two. In this case, the pass band ripple is 0.005, or 46.02 dB,
so we will use 65 dB as the design parameter.
Use `kaiserord` to determine the length of the filter and the
parameter for the Kaiser window.
>>> numtaps, beta = kaiserord(65, width/(0.5*fs))
>>> numtaps
167
>>> beta
6.20426
Use `firwin` to create the FIR filter.
>>> taps = firwin(numtaps, cutoff, window=('kaiser', beta),
... scale=False, nyq=0.5*fs)
Compute the frequency response of the filter. ``w`` is the array of
frequencies, and ``h`` is the corresponding complex array of frequency
responses.
>>> w, h = freqz(taps, worN=8000)
>>> w *= 0.5*fs/np.pi # Convert w to Hz.
Compute the deviation of the magnitude of the filter's response from
that of the ideal lowpass filter. Values in the transition region are
set to ``nan``, so they won't appear in the plot.
>>> ideal = w < cutoff # The "ideal" frequency response.
>>> deviation = np.abs(np.abs(h) - ideal)
>>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan
Plot the deviation. A close look at the left end of the stop band shows
that the requirement for 65 dB attenuation is violated in the first lobe
by about 0.125 dB. This is not unusual for the Kaiser window method.
>>> plt.plot(w, 20*np.log10(np.abs(deviation)))
>>> plt.xlim(0, 0.5*fs)
>>> plt.ylim(-90, -60)
>>> plt.grid(alpha=0.25)
>>> plt.axhline(-65, color='r', ls='--', alpha=0.3)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Deviation from ideal (dB)')
>>> plt.title('Lowpass Filter Frequency Response')
>>> plt.show()
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=None, fs=None):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist frequency, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist frequency.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist frequency) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
*Deprecated. Use `fs` instead.* This is the Nyquist frequency.
Each frequency in `cutoff` must be between 0 and `nyq`. Default
is 1.
fs : float, optional
The sampling frequency of the signal. Each frequency in `cutoff`
must be between 0 and ``fs/2``. Default is 2.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to ``fs/2``, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See Also
--------
firwin2
firls
minimum_phase
remez
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
nyq = 0.5 * _get_fs(fs, nyq)
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than fs/2.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist frequency.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=None,
antisymmetric=False, fs=None):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency is half `fs`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be ``fs/2``.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
*Deprecated. Use `fs` instead.* This is the Nyquist frequency.
Each frequency in `freq` must be between 0 and `nyq`. Default is 1.
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
fs : float, optional
The sampling frequency of the signal. Each frequency in `cutoff`
must be between 0 and ``fs/2``. Default is 2.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
firls
firwin
minimum_phase
remez
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
nyq = 0.5 * _get_fs(fs, nyq)
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with fs/2.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist frequency.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist frequencies.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero "
"frequency.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=None, type='bandpass',
maxiter=25, grid_density=16, fs=None):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges.
All elements must be non-negative and less than half the sampling
frequency as given by `fs`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
*Deprecated. Use `fs` instead.*
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
* 'bandpass' : flat response in bands. This is the default.
* 'differentiator' : frequency proportional response in bands.
* 'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
fs : float, optional
The sampling frequency of the signal. Default is 1.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
firls
firwin
firwin2
minimum_phase
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
For a signal sampled at 100 Hz, we want to construct a filter with a
passband at 20-40 Hz, and stop bands at 0-10 Hz and 45-50 Hz. Note that
this means that the behavior in the frequency ranges between those bands
is unspecified and may overshoot.
>>> from scipy import signal
>>> fs = 100
>>> bpass = signal.remez(72, [0, 10, 20, 40, 45, 50], [0, 1, 0], fs=fs)
>>> freq, response = signal.freqz(bpass)
>>> import matplotlib.pyplot as plt
>>> plt.semilogy(0.5*fs*freq/np.pi, np.abs(response), 'b-')
>>> plt.grid(alpha=0.25)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Gain')
>>> plt.show()
"""
if Hz is None and fs is None:
fs = 1.0
elif Hz is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'Hz' and 'fs'.")
fs = Hz
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, fs,
maxiter, grid_density)
def firls(numtaps, bands, desired, weight=None, nyq=None, fs=None):
"""
FIR filter design using least-squares error minimization.
Calculate the filter coefficients for the linear-phase finite
impulse response (FIR) filter which has the best approximation
to the desired frequency response described by `bands` and
`desired` in the least squares sense (i.e., the integral of the
weighted mean-squared error within the specified bands is
minimized).
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be odd.
bands : array_like
A monotonic nondecreasing sequence containing the band edges in
Hz. All elements must be non-negative and less than or equal to
the Nyquist frequency given by `nyq`.
desired : array_like
A sequence the same size as `bands` containing the desired gain
at the start and end point of each band.
weight : array_like, optional
A relative weighting to give to each band region when solving
the least squares problem. `weight` has to be half the size of
`bands`.
nyq : float, optional
*Deprecated. Use `fs` instead.*
Nyquist frequency. Each frequency in `bands` must be between 0
and `nyq` (inclusive). Default is 1.
fs : float, optional
The sampling frequency of the signal. Each frequency in `bands`
must be between 0 and ``fs/2`` (inclusive). Default is 2.
Returns
-------
coeffs : ndarray
Coefficients of the optimal (in a least squares sense) FIR filter.
See also
--------
firwin
firwin2
minimum_phase
remez
Notes
-----
This implementation follows the algorithm given in [1]_.
As noted there, least squares design has multiple advantages:
1. Optimal in a least-squares sense.
2. Simple, non-iterative method.
3. The general solution can obtained by solving a linear
system of equations.
4. Allows the use of a frequency dependent weighting function.
This function constructs a Type I linear phase FIR filter, which
contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:
.. math:: coeffs(n) = coeffs(numtaps - 1 - n)
The odd number of coefficients and filter symmetry avoid boundary
conditions that could otherwise occur at the Nyquist and 0 frequencies
(e.g., for Type II, III, or IV variants).
.. versionadded:: 0.18
References
----------
.. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares.
OpenStax CNX. Aug 9, 2005.
http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7
Examples
--------
We want to construct a band-pass filter. Note that the behavior in the
frequency ranges between our stop bands and pass bands is unspecified,
and thus may overshoot depending on the parameters of our filter:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fig, axs = plt.subplots(2)
>>> fs = 10.0 # Hz
>>> desired = (0, 0, 1, 1, 0, 0)
>>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):
... fir_firls = signal.firls(73, bands, desired, fs=fs)
... fir_remez = signal.remez(73, bands, desired[::2], fs=fs)
... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs)
... hs = list()
... ax = axs[bi]
... for fir in (fir_firls, fir_remez, fir_firwin2):
... freq, response = signal.freqz(fir)
... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0])
... for band, gains in zip(zip(bands[::2], bands[1::2]),
... zip(desired[::2], desired[1::2])):
... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)
... if bi == 0:
... ax.legend(hs, ('firls', 'remez', 'firwin2'),
... loc='lower center', frameon=False)
... else:
... ax.set_xlabel('Frequency (Hz)')
... ax.grid(True)
... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')
...
>>> fig.tight_layout()
>>> plt.show()
""" # noqa
nyq = 0.5 * _get_fs(fs, nyq)
numtaps = int(numtaps)
if numtaps % 2 == 0 or numtaps < 1:
raise ValueError("numtaps must be odd and >= 1")
M = (numtaps-1) // 2
# normalize bands 0->1 and make it 2 columns
nyq = float(nyq)
if nyq <= 0:
raise ValueError('nyq must be positive, got %s <= 0.' % nyq)
bands = np.asarray(bands).flatten() / nyq
if len(bands) % 2 != 0:
raise ValueError("bands must contain frequency pairs.")
bands.shape = (-1, 2)
# check remaining params
desired = np.asarray(desired).flatten()
if bands.size != desired.size:
raise ValueError("desired must have one entry per frequency, got %s "
"gains for %s frequencies."
% (desired.size, bands.size))
desired.shape = (-1, 2)
if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():
raise ValueError("bands must be monotonically nondecreasing and have "
"width > 0.")
if (bands[:-1, 1] > bands[1:, 0]).any():
raise ValueError("bands must not overlap.")
if (desired < 0).any():
raise ValueError("desired must be non-negative.")
if weight is None:
weight = np.ones(len(desired))
weight = np.asarray(weight).flatten()
if len(weight) != len(desired):
raise ValueError("weight must be the same size as the number of "
"band pairs (%s)." % (len(bands),))
if (weight < 0).any():
raise ValueError("weight must be non-negative.")
# Set up the linear matrix equation to be solved, Qa = b
# We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)
# where Q1(k,n)=q(k−n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.
# We omit the factor of 0.5 above, instead adding it during coefficient
# calculation.
# We also omit the 1/π from both Q and b equations, as they cancel
# during solving.
# We have that:
# q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)
# Using our nomalization ω=πf and with a constant weight W over each
# interval f1->f2 we get:
# q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf
# integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight)
# Now we assemble our sum of Toeplitz and Hankel
Q1 = toeplitz(q[:M+1])
Q2 = hankel(q[:M+1], q[M:])
Q = Q1 + Q2
# Now for b(n) we have that:
# b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π)
# Using our normalization ω=πf and with a constant weight W over each
# interval and a linear term for D(ω) we get (over each f1->f2 interval):
# b(n) = W ∫ (mf+c)cos(πnf)df
# = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2
# integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
n = n[:M + 1] # only need this many coefficients here
# Choose m and c such that we are at the start and end weights
m = (np.diff(desired, axis=1) / np.diff(bands, axis=1))
c = desired[:, [0]] - bands[:, [0]] * m
b = bands * (m*bands + c) * np.sinc(bands * n)
# Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0
b[0] -= m * bands * bands / 2.
b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2
b = np.dot(np.diff(b, axis=2)[:, :, 0], weight)
# Now we can solve the equation (use pinv because Q can be rank deficient)
a = np.dot(pinv(Q), b)
# make coefficients symmetric (linear phase)
coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:]))
return coeffs
def _dhtm(mag):
"""Compute the modified 1D discrete Hilbert transform
Parameters
----------
mag : ndarray
The magnitude spectrum. Should be 1D with an even length, and
preferably a fast length for FFT/IFFT.
"""
# Adapted based on code by Niranjan Damera-Venkata,
# Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`)
sig = np.zeros(len(mag))
# Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5
midpt = len(mag) // 2
sig[1:midpt] = 1
sig[midpt+1:] = -1
# eventually if we want to support complex filters, we will need a
# np.abs() on the mag inside the log, and should remove the .real
recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real
return recon
def minimum_phase(h, method='homomorphic', n_fft=None):
"""Convert a linear-phase FIR filter to minimum phase
Parameters
----------
h : array
Linear-phase FIR filter coefficients.
method : {'hilbert', 'homomorphic'}
The method to use:
'homomorphic' (default)
This method [4]_ [5]_ works best with filters with an
odd number of taps, and the resulting minimum phase filter
will have a magnitude response that approximates the square
root of the the original filter's magnitude response.
'hilbert'
This method [1]_ is designed to be used with equiripple
filters (e.g., from `remez`) with unity or zero gain
regions.
n_fft : int
The number of points to use for the FFT. Should be at least a
few times larger than the signal length (see Notes).
Returns
-------
h_minimum : array
The minimum-phase version of the filter, with length
``(length(h) + 1) // 2``.
See Also
--------
firwin
firwin2
remez
Notes
-----
Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection
of an FFT length to estimate the complex cepstrum of the filter.
In the case of the Hilbert method, the deviation from the ideal
spectrum ``epsilon`` is related to the number of stopband zeros
``n_stop`` and FFT length ``n_fft`` as::
epsilon = 2. * n_stop / n_fft
For example, with 100 stopband zeros and a FFT length of 2048,
``epsilon = 0.0976``. If we conservatively assume that the number of
stopband zeros is one less than the filter length, we can take the FFT
length to be the next power of 2 that satisfies ``epsilon=0.01`` as::
n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
This gives reasonable results for both the Hilbert and homomorphic
methods, and gives the value used when ``n_fft=None``.
Alternative implementations exist for creating minimum-phase filters,
including zero inversion [2]_ and spectral factorization [3]_ [4]_.
For more information, see:
http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters
Examples
--------
Create an optimal linear-phase filter, then convert it to minimum phase:
>>> from scipy.signal import remez, minimum_phase, freqz, group_delay
>>> import matplotlib.pyplot as plt
>>> freq = [0, 0.2, 0.3, 1.0]
>>> desired = [1, 0]
>>> h_linear = remez(151, freq, desired, Hz=2.)
Convert it to minimum phase:
>>> h_min_hom = minimum_phase(h_linear, method='homomorphic')
>>> h_min_hil = minimum_phase(h_linear, method='hilbert')
Compare the three filters:
>>> fig, axs = plt.subplots(4, figsize=(4, 8))
>>> for h, style, color in zip((h_linear, h_min_hom, h_min_hil),
... ('-', '-', '--'), ('k', 'r', 'c')):
... w, H = freqz(h)
... w, gd = group_delay((h, 1))
... w /= np.pi
... axs[0].plot(h, color=color, linestyle=style)
... axs[1].plot(w, np.abs(H), color=color, linestyle=style)
... axs[2].plot(w, 20 * np.log10(np.abs(H)), color=color, linestyle=style)
... axs[3].plot(w, gd, color=color, linestyle=style)
>>> for ax in axs:
... ax.grid(True, color='0.5')
... ax.fill_between(freq[1:3], *ax.get_ylim(), color='#ffeeaa', zorder=1)
>>> axs[0].set(xlim=[0, len(h_linear) - 1], ylabel='Amplitude', xlabel='Samples')
>>> axs[1].legend(['Linear', 'Min-Hom', 'Min-Hil'], title='Phase')
>>> for ax, ylim in zip(axs[1:], ([0, 1.1], [-150, 10], [-60, 60])):
... ax.set(xlim=[0, 1], ylim=ylim, xlabel='Frequency')
>>> axs[1].set(ylabel='Magnitude')
>>> axs[2].set(ylabel='Magnitude (dB)')
>>> axs[3].set(ylabel='Group delay')
>>> plt.tight_layout()
References
----------
.. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and
complex minimum phase digital FIR filters," Acoustics, Speech,
and Signal Processing, 1999. Proceedings., 1999 IEEE International
Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3.
doi: 10.1109/ICASSP.1999.756179
.. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR
filters by direct factorization," Signal Processing,
vol. 10, no. 4, pp. 369-383, Jun. 1986.
.. [3] T. Saramaki, "Finite Impulse Response Filter Design," in
Handbook for Digital Signal Processing, chapter 4,
New York: Wiley-Interscience, 1993.
.. [4] J. S. Lim, Advanced Topics in Signal Processing.
Englewood Cliffs, N.J.: Prentice Hall, 1988.
.. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck,
"Discrete-Time Signal Processing," 2nd edition.
Upper Saddle River, N.J.: Prentice Hall, 1999.
""" # noqa
h = np.asarray(h)
if np.iscomplexobj(h):
raise ValueError('Complex filters not supported')
if h.ndim != 1 or h.size <= 2:
raise ValueError('h must be 1D and at least 2 samples long')
n_half = len(h) // 2
if not np.allclose(h[-n_half:][::-1], h[:n_half]):
warnings.warn('h does not appear to by symmetric, conversion may '
'fail', RuntimeWarning)
if not isinstance(method, string_types) or method not in \
('homomorphic', 'hilbert',):
raise ValueError('method must be "homomorphic" or "hilbert", got %r'
% (method,))
if n_fft is None:
n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
n_fft = int(n_fft)
if n_fft < len(h):
raise ValueError('n_fft must be at least len(h)==%s' % len(h))
if method == 'hilbert':
w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half)
H = np.real(fft(h, n_fft) * np.exp(1j * w))
dp = max(H) - 1
ds = 0 - min(H)
S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2
H += ds
H *= S
H = np.sqrt(H, out=H)
H += 1e-10 # ensure that the log does not explode
h_minimum = _dhtm(H)
else: # method == 'homomorphic'
# zero-pad; calculate the DFT
h_temp = np.abs(fft(h, n_fft))
# take 0.25*log(|H|**2) = 0.5*log(|H|)
h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up
np.log(h_temp, out=h_temp)
h_temp *= 0.5
# IDFT
h_temp = ifft(h_temp).real
# multiply pointwise by the homomorphic filter
# lmin[n] = 2u[n] - d[n]
win = np.zeros(n_fft)
win[0] = 1
stop = (len(h) + 1) // 2
win[1:stop] = 2
if len(h) % 2:
win[stop] = 1
h_temp *= win
h_temp = ifft(np.exp(fft(h_temp)))
h_minimum = h_temp.real
n_out = n_half + len(h) % 2
return h_minimum[:n_out]
| 41,841 | 35.543231 | 85 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/_max_len_seq.py
|
# Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.ascontiguousarray(taps) # needed for Cython
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because numpy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-dimensional array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| 4,929 | 34.724638 | 111 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/filter_design.py
|
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import math
import operator
import warnings
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from scipy import special, optimize, fftpack
from scipy.special import comb, factorial
from scipy._lib._numpy_compat import polyvalfromroots
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk',
'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e. zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
if kind == 'ba':
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
elif kind == 'zp':
ep = atleast_1d(den) + 0j
tz = atleast_1d(num) + 0j
else:
raise ValueError("input must be one of {'ba', 'zp'}")
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=200, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=200):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
k = np.asarray(k)
if k.size > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
w = findfreqs(z, p, 200, kind='zp')
elif isinstance(worN, int):
N = worN
w = findfreqs(z, p, N, kind='zp')
else:
w = worN
w = atleast_1d(w)
s = 1j * w
num = polyvalfromroots(s, z)
den = polyvalfromroots(s, p)
h = k * num/den
return w, h
def freqz(b, a=1, worN=512, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + ... + b[M]e
H(e ) = ------ = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + ... + a[N]e
Parameters
----------
b : array_like
Numerator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
a : array_like
Denominator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
worN : {None, int, array_like}, optional
If None, then compute at 512 equally spaced frequencies.
If a single integer, then compute at that many frequencies. This is
a convenient alternative to::
np.linspace(0, 2*pi if whole else pi, N, endpoint=False)
Using a number that is fast for FFT computations can result in
faster computations (see Notes).
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz_zpk
sosfreqz
Notes
-----
Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable
for `plot` produces unexpected results, as this plots the real part of the
complex transfer function, not the magnitude.
Try ``lambda w, h: plot(w, np.abs(h))``.
A direct computation via (R)FFT is used to compute the frequency response
when the following conditions are met:
1. An integer value is given for `worN`.
2. `worN` is fast to compute via FFT (i.e.,
`next_fast_len(worN) <scipy.fftpack.next_fast_len>` equals `worN`).
3. The denominator coefficients are a single value (``a.shape[0] == 1``).
4. `worN` is at least as long as the numerator coefficients
(``worN >= b.shape[0]``).
5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``.
For long FIR filters, the FFT approach can have lower error and be much
faster than the equivalent direct polynomial calculation.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
Broadcasting Examples
Suppose we have two FIR filters whose coefficients are stored in the
rows of an array with shape (2, 25). For this demonstration we'll
use random data:
>>> np.random.seed(42)
>>> b = np.random.rand(2, 25)
To compute the frequency response for these two filters with one call
to `freqz`, we must pass in ``b.T``, because `freqz` expects the first
axis to hold the coefficients. We must then extend the shape with a
trivial dimension of length 1 to allow broadcasting with the array
of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has
shape (25, 2, 1):
>>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
Now suppose we have two transfer functions, with the same numerator
coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators
are stored in the first dimension of the two-dimensional array `a`::
a = [ 1 1 ]
[ -0.25, -0.5 ]
>>> b = np.array([0.5, 0.5])
>>> a = np.array([[1, 1], [-0.25, -0.5]])
Only `a` is more than one-dimensional. To make it compatible for
broadcasting with the frequencies, we extend it with a trivial dimension
in the call to `freqz`:
>>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
"""
b = atleast_1d(b)
a = atleast_1d(a)
if worN is None:
worN = 512
h = None
try:
worN = operator.index(worN)
except TypeError: # not int-like
w = atleast_1d(worN)
else:
if worN < 0:
raise ValueError('worN must be nonnegative, got %s' % (worN,))
lastpoint = 2 * pi if whole else pi
w = np.linspace(0, lastpoint, worN, endpoint=False)
if (a.size == 1 and worN >= b.shape[0] and
fftpack.next_fast_len(worN) == worN and
(b.ndim == 1 or (b.shape[-1] == 1))):
# if worN is fast, 2 * worN will be fast, too, so no need to check
n_fft = worN if whole else worN * 2
if np.isrealobj(b) and np.isrealobj(a):
fft_func = np.fft.rfft
else:
fft_func = fftpack.fft
h = fft_func(b, n=n_fft, axis=0)[:worN]
h /= a
if fft_func is np.fft.rfft and whole:
# exclude DC and maybe Nyquist (no need to use axis_reverse
# here because we can build reversal with the truncation)
stop = -1 if n_fft % 2 == 1 else -2
h_flip = slice(stop, 0, -1)
h = np.concatenate((h, h[h_flip].conj()))
if b.ndim > 1:
# Last axis of h has length 1, so drop it.
h = h[..., 0]
# Rotate the first axis of h to the end.
h = np.rollaxis(h, 0, h.ndim)
del worN
if h is None: # still need to compute using freqs w
zm1 = exp(-1j * w)
h = (npp_polyval(zm1, b, tensor=False) /
npp_polyval(zm1, a, tensor=False))
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=512, whole=False):
r"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response::
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If single integer (default 512, same as None), then compute at `worN`
frequencies equally spaced around the unit circle. If an array_like,
compute the response at the frequencies given (in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 0.2, output='zpk')
>>> w, h = signal.freqz_zpk(z, p, k)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
z, p = map(atleast_1d, (z, p))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
w = numpy.linspace(0, lastpoint, 512, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(1j * w)
h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
return w, h
def group_delay(system, w=512, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None, then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded:: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
Using a number that is fast for FFT computations can result in
faster computations (see Notes of `freqz`).
If an array_like, compute the response at the frequencies given (in
radians/sample; must be 1D).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\\pi$', r'$-\\pi/2$', '0', r'$\\pi/2$', r'$\\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print(zc)
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print(zr)
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficients are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
See Also
--------
lp2hp, lp2bp, lp2bs, bilinear
lp2lp_zpk
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
See Also
--------
lp2lp, lp2bp, lp2bs, bilinear
lp2hp_zpk
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
See Also
--------
lp2lp, lp2hp, lp2bs, bilinear
lp2bp_zpk
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
See Also
--------
lp2lp, lp2hp, lp2bp, bilinear
lp2bs_zpk
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
See Also
--------
lp2lp, lp2hp, lp2bp, lp2bs
bilinear_zpk
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = lp2lp_zpk(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = lp2hp_zpk(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = bilinear_zpk(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
def bilinear_zpk(z, p, k, fs):
"""
Return a digital IIR filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk
bilinear
Notes
-----
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2.0*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def lp2lp_zpk(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
See Also
--------
lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
lp2lp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def lp2hp_zpk(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
See Also
--------
lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
lp2hp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear
lp2bp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear
lp2bs
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
.. versionadded:: 1.1.0
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded:: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60Hz component from a
signal sampled at 200Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design notch filter
>>> b, a = signal.iirnotch(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch")
def iirpeak(w0, Q):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Normalized frequency to be retained in a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding
to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded:: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300Hz
component from a signal sampled at 1000Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design peak filter
>>> b, a = signal.iirpeak(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak")
def _design_notch_peak_filter(w0, Q, ftype):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB atenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| 140,151 | 31.814797 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/wavelets.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| 10,523 | 27.754098 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/signaltools.py
|
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import operator
import threading
import sys
import timeit
from . import sigtools, dlti
from ._upfirdn import upfirdn, _output_len
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
product, r_, ravel, real_if_close, reshape,
roots, sort, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
import math
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from .filter_design import cheby1, _validate_sos
from .fir_filter_design import firwin
if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
from math import gcd
else:
from fractions import gcd
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
return _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _bvalfromboundary(boundary):
try:
return _boundarydict[boundary] << 2
except KeyError:
raise ValueError("Acceptable boundary flags are 'fill', 'circular' "
"(or 'wrap'), and 'symmetric' (or 'symm').")
def _inputs_swap_needed(mode, shape1, shape2):
"""
If in 'valid' mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode == 'valid':
ok1, ok2 = True, True
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
ok1 = False
if not d2 >= d1:
ok2 = False
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
return False
def correlate(in1, in2, mode='full', method='auto'):
r"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
then
.. math::
z[k] = (x * y)(k - N + 1)
= \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
and :math:`y_m` is 0 when m is outside the range of y.
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2.conj()
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
elif method == 'direct':
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = asarray(newshape)
currshape = array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complexfloating) or
np.issubdtype(in2.dtype, np.complexfloating))
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape)
sp2 = np.fft.rfftn(in2, fshape)
ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape)
sp2 = fftpack.fftn(in2, fshape)
ret = fftpack.ifftn(sp1 * sp2)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _prod(iterable):
"""
Product of a list of numbers.
Faster than np.prod for short lists like array shapes.
"""
product = 1
for x in iterable:
product *= x
return product
def _fftconv_faster(x, h, mode):
"""
See if using `fftconvolve` or `_correlateND` is faster. The boolean value
returned depends on the sizes and shapes of the input values.
The big O ratios were found to hold across different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor.
"""
if mode == 'full':
out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874
elif mode == 'same':
out_shape = x.shape
if x.ndim == 1:
if h.size <= x.size:
big_O_constant = 7183.41306773
else:
big_O_constant = 856.78174111
else:
big_O_constant = 34519.21021589
elif mode == 'valid':
out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# see whether the Fourier transform convolution method or the direct
# convolution method is faster (discussed in scikit-image PR #1792)
direct_time = (x.size * h.size * _prod(out_shape))
fft_time = sum(n * math.log(n) for n in (x.shape + h.shape +
tuple(out_shape)))
return big_O_constant * fft_time < direct_time
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = [slice(None, None, -1)] * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the
size of the larger input, while Scipy's uses the size of the first input.
Invalid mode strings will return False and be caught by the calling func.
"""
if volume.ndim == kernel.ndim == 1:
if mode in ('full', 'valid'):
return True
elif mode == 'same':
return volume.size >= kernel.size
else:
return False
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`, but can also be used when performing many
convolutions of the same input shapes and dtypes, determining
which method to use for all of them, either to avoid the overhead of the
'auto' option or to use accurate real-world measurements.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
For large n, ``measure=False`` is accurate and can quickly determine the
fastest method to perform the convolution. However, this is not as
accurate for small n (when any dimension in the input or output is small).
In practice, we found that this function estimates the faster method up to
a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
times slower than the fastest method). The estimation values were tuned on
an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
held *fairly* accurately across different machines.
If ``measure=True``, time the convolutions. Because this function uses
`fftconvolve`, an error will be thrown if it does not support the inputs.
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> a = np.random.randn(1000)
>>> b = np.random.randn(1000000)
>>> method = signal.choose_conv_method(a, b, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> c = np.random.randn(1000)
>>> d = np.random.randn(1000000)
>>> # `method` works with correlate and convolve
>>> corr1 = signal.correlate(a, b, mode='same', method=method)
>>> corr2 = signal.correlate(c, d, mode='same', method=method)
>>> conv1 = signal.convolve(a, b, mode='same', method=method)
>>> conv2 = signal.convolve(c, d, mode='same', method=method)
"""
volume = asarray(in1)
kernel = asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# fftconvolve doesn't support complex256
fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
if hasattr(np, fftconv_unsup):
if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
return 'direct'
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel], kinds='b'):
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
elif volume.ndim != kernel.ndim:
raise ValueError("volume and kernel should have the same "
"dimensionality")
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
result_type = np.result_type(volume, kernel)
if result_type.kind in {'u', 'i'}:
out = np.around(out)
return out.astype(result_type)
elif method == 'direct':
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') /
product(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter given input and output vectors.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = zeros(N - D + 1, float)
input[0] = 1
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
See Also
--------
scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x.
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = x.ndim
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
Examples
--------
>>> from scipy import signal
>>> vals = [1, 4, 1+1.j, 3]
>>> p_sorted, indx = signal.cmplx_sort(vals)
>>> p_sorted
array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
>>> indx
array([0, 2, 3, 1])
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) /
factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
W.shape = (Nx,)
sl = [slice(None)] * x.ndim
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
if N % 2 == 0: # special treatment if low number of points is even. So far we have set Y[-N/2]=X[-N/2]
if N < Nx: # if downsampling
sl[axis] = slice(N//2,N//2+1,None) # select the component at frequency N/2
Y[sl] += X[sl] # add the component of X at N/2
elif N < num: # if upsampling
sl[axis] = slice(num-N//2,num-N//2+1,None) # select the component at frequency -N/2
Y[sl] /= 2 # halve the component at -N/2
temp = Y[sl]
sl[axis] = slice(N//2,N//2+1,None) # select the component at +N/2
Y[sl] = temp # set that equal to the component at -N/2
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * down / float(up)``.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
"""
x = asarray(x)
if up != int(up):
raise ValueError("up must be an integer")
if down != int(down):
raise ValueError("down must be an integer")
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
# Determine our up and down factors
# Use a rational approimation to save computation time on really long
# signals
g_ = gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_out = x.shape[axis] * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = array(window) # use array to force a copy (we modify it)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad), h, np.zeros(n_post_pad)))
n_pre_remove_end = n_pre_remove + n_out
# filter then remove excess
y = upfirdn(h, x, up, down, axis=axis)
keep = [slice(None), ]*x.ndim
keep[axis] = slice(n_pre_remove, n_pre_remove_end)
return y[keep]
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
:doi:`10.1063/1.3670512`.
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
:doi:`10.1007/s00422-013-0560-8`.
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Construct initial conditions for lfilter for step response steady-state.
Compute an initial state `zi` for the `lfilter` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Construct initial conditions for sosfilt for step response steady-state.
Compute an initial state `zi` for the `sosfilt` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
Apply a digital filter forward and backward to a signal.
This function applies a linear digital filter twice, once forward and
once backwards. The combined filter has zero phase and a filter order
twice that of the original.
The function provides options for handling the edges of the signal.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections.
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos, n_sections = _validate_sos(sos)
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward digital filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy.signal import sosfiltfilt, butter
>>> import matplotlib.pyplot as plt
Create an interesting signal to filter.
>>> n = 201
>>> t = np.linspace(0, 1, n)
>>> np.random.seed(123)
>>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*np.random.randn(n)
Create a lowpass Butterworth filter, and use it to filter `x`.
>>> sos = butter(4, 0.125, output='sos')
>>> y = sosfiltfilt(sos, x)
For comparison, apply an 8th order filter using `sosfilt`. The filter
is initialized using the mean of the first four values of `x`.
>>> from scipy.signal import sosfilt, sosfilt_zi
>>> sos8 = butter(8, 0.125, output='sos')
>>> zi = x[:4].mean() * sosfilt_zi(sos8)
>>> y2, zo = sosfilt(sos8, x, zi=zi)
Plot the results. Note that the phase of `y` matches the input, while
`y2` has a significant phase delay.
>>> plt.plot(t, x, alpha=0.5, label='x(t)')
>>> plt.plot(t, y, label='y(t)')
>>> plt.plot(t, y2, label='y2(t)')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.xlabel('t')
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : array_like
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. When using IIR downsampling, it is recommended
to call `decimate` multiple times for downsampling factors higher than
13.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 20 times the downsampling factor for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. The default value of ``True`` is
recommended, since a phase shift is generally not desired.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
"""
x = asarray(x)
q = operator.index(q)
if n is not None:
n = operator.index(n)
if ftype == 'fir':
if n is None:
half_len = 10 * q # reasonable cutoff for our sinc-like function
n = 2 * half_len
b, a = firwin(n+1, 1. / q, window='hamming'), 1.
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
b, a = system.num, system.den
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
b, a = system.num, system.den
else:
raise ValueError('invalid ftype')
sl = [slice(None)] * x.ndim
a = np.asarray(a)
if a.size == 1: # FIR case
b = b / a
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=b)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(b, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(b, a, x, axis=axis)
else:
y = lfilter(b, a, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[sl]
| 117,882 | 33.030889 | 107 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/_peak_finding.py
|
"""
Functions for identifying peaks in signals.
"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from scipy._lib.six import xrange
from scipy.signal.wavelets import cwt, ricker
from scipy.stats import scoreatpercentile
from ._peak_finding_utils import (_argmaxima1d, _select_by_peak_distance,
_peak_prominences, _peak_widths)
__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences',
'peak_widths', 'find_peaks', 'find_peaks_cwt']
def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return results
def argrelmin(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative minima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative minima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : tuple of ndarrays
Indices of the minima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelextrema, argrelmax, find_peaks
Notes
-----
This function uses `argrelextrema` with np.less as comparator. Therefore it
requires a strict inequality on both sides of a value to consider it a
minimum. This means flat minima (more than one sample wide) are not detected.
In case of one-dimensional `data` `find_peaks` can be used to detect all
local minima, including flat ones, by calling it with negated `data`.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmin
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmin(x)
(array([1, 5]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmin(y, axis=1)
(array([0, 2]), array([2, 1]))
"""
return argrelextrema(data, np.less, axis, order, mode)
def argrelmax(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelextrema, argrelmin, find_peaks
Notes
-----
This function uses `argrelextrema` with np.greater as comparator. Therefore
it requires a strict inequality on both sides of a value to consider it a
maximum. This means flat maxima (more than one sample wide) are not detected.
In case of one-dimensional `data` `find_peaks` can be used to detect all
local maxima, including flat ones.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmax
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmax(x)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmax(y, axis=1)
(array([0]), array([1]))
"""
return argrelextrema(data, np.greater, axis, order, mode)
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default is 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelmin, argrelmax
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
return np.where(results)
def peak_prominences(x, peaks, wlen=None):
"""
Calculate the prominence of each peak in a signal.
The prominence of a peak measures how much a peak stands out from the
surrounding baseline of the signal and is defined as the vertical distance
between the peak and its lowest contour line.
Parameters
----------
x : sequence
A signal with peaks.
peaks : sequence
Indices of peaks in `x`.
wlen : int or float, optional
A window length in samples that optionally limits the evaluated area for
each peak to a subset of `x`. The peak is always placed in the middle of
the window therefore the given length is rounded up to the next odd
integer. This parameter can speed up the calculation (see Notes).
Returns
-------
prominences : ndarray
The calculated prominences for each peak in `peaks`.
left_bases, right_bases : ndarray
The peaks' bases as indices in `x` to the left and right of each peak.
The higher base of each pair is a peak's lowest contour line.
Raises
------
ValueError
If an index in `peaks` does not point to a local maximum in `x`.
See Also
--------
find_peaks
Find peaks inside a signal based on peak properties.
peak_widths
Calculate the width of peaks.
Notes
-----
Strategy to compute a peak's prominence:
1. Extend a horizontal line from the current peak to the left and right
until the line either reaches the window border (see `wlen`) or
intersects the signal again at the slope of a higher peak. An
intersection with a peak of the same height is ignored.
2. On each side find the minimal signal value within the interval defined
above. These points are the peak's bases.
3. The higher one of the two bases marks the peak's lowest contour line. The
prominence can then be calculated as the vertical difference between the
peaks height itself and its lowest contour line.
Searching for the peak's bases can be slow for large `x` with periodic
behavior because large chunks or even the full signal need to be evaluated
for the first algorithmic step. This evaluation area can be limited with the
parameter `wlen` which restricts the algorithm to a window around the
current peak and can shorten the calculation time if the window length is
short in relation to `x`.
However this may stop the algorithm from finding the true global contour
line if the peak's true bases are outside this window. Instead a higher
contour line is found within the restricted window leading to a smaller
calculated prominence. In practice this is only relevant for the highest set
of peaks in `x`. This behavior may even be used intentionally to calculate
"local" prominences.
.. warning::
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
.. versionadded:: 1.1.0
References
----------
.. [1] Wikipedia Article for Topographic Prominence:
https://en.wikipedia.org/wiki/Topographic_prominence
Examples
--------
>>> from scipy.signal import find_peaks, peak_prominences
>>> import matplotlib.pyplot as plt
Create a test signal with two overlayed harmonics
>>> x = np.linspace(0, 6 * np.pi, 1000)
>>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
Find all peaks and calculate prominences
>>> peaks, _ = find_peaks(x)
>>> prominences = peak_prominences(x, peaks)[0]
>>> prominences
array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 ,
0.47822491, 2.48340261, 0.47822491])
Calculate the height of each peak's contour line and plot the results
>>> contour_heights = x[peaks] - prominences
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks])
>>> plt.show()
Let's evaluate a second example that demonstrates several edge cases for
one peak at index 5.
>>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0])
>>> peaks = np.array([5])
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
>>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases)
(array([3.]), array([2]), array([6]))
Note how the peak at index 3 of the same height is not considered as a
border while searching for the left base. Instead two minima at 0 and 2
are found in which case the one closer to the evaluated peak is always
chosen. On the right side however the base must be placed at 6 because the
higher peak represents the right border to the evaluated area.
>>> peak_prominences(x, peaks, wlen=3.1)
(array([2.]), array([4]), array([6]))
Here we restricted the algorithm to a window from 3 to 7 (the length is 5
samples because `wlen` was rounded up to the next odd integer). Thus the
only two candidates in the evaluated area are the two neighbouring samples
and a smaller prominence is calculated.
"""
# Inner function expects `x` to be C-contiguous
x = np.asarray(x, order='C', dtype=np.float64)
if x.ndim != 1:
raise ValueError('`x` must have exactly one dimension')
peaks = np.asarray(peaks)
if peaks.size == 0:
# Empty arrays default to np.float64 but are valid input
peaks = np.array([], dtype=np.intp)
try:
# Safely convert to C-contiguous array of type np.intp
peaks = peaks.astype(np.intp, order='C', casting='safe',
subok=False, copy=False)
except TypeError:
raise TypeError("Cannot safely cast `peaks` to dtype('intp')")
if peaks.ndim != 1:
raise ValueError('`peaks` must have exactly one dimension')
if wlen is None:
wlen = -1 # Inner function expects int -> None == -1
elif 1 < wlen:
# Round up to next positive integer; rounding up to next odd integer
# happens implicitly inside the inner function
wlen = int(math.ceil(wlen))
else:
# Give feedback if wlen has unexpected value
raise ValueError('`wlen` must be at larger than 1, was ' + str(wlen))
return _peak_prominences(x, peaks, wlen)
def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None):
"""
Calculate the width of each peak in a signal.
This function calculates the width of a peak in samples at a relative
distance to the peak's height and prominence.
Parameters
----------
x : sequence
A signal with peaks.
peaks : sequence
Indices of peaks in `x`.
rel_height : float, optional
Chooses the relative height at which the peak width is measured as a
percentage of its prominence. 1.0 calculates the width of the peak at
its lowest contour line while 0.5 evaluates at half the prominence
height. Must be at least 0. See notes for further explanation.
prominence_data : tuple, optional
A tuple of three arrays matching the output of `peak_prominences` when
called with the same arguments `x` and `peaks`. This data is calculated
internally if not provided.
wlen : int, optional
A window length in samples passed to `peak_prominences` as an optional
argument for internal calculation of `prominence_data`. This argument
is ignored if `prominence_data` is given.
Returns
-------
widths : ndarray
The widths for each peak in samples.
width_heights : ndarray
The height of the contour lines at which the `widths` where evaluated.
left_ips, right_ips : ndarray
Interpolated positions of left and right intersection points of a
horizontal line at the respective evaluation height.
Raises
------
ValueError
If `prominence_data` is supplied but doesn't satisfy the condition
``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak,
has the wrong dtype, is not C-contiguous or does not have the same
shape.
See Also
--------
find_peaks
Find peaks inside a signal based on peak properties.
peak_prominences
Calculate the prominence of peaks.
Notes
-----
The basic algorithm to calculate a peak's width is as follows:
* Calculate the evaluation height :math:`h_{eval}` with the formula
:math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the
height of the peak itself, :math:`P` is the peak's prominence and
:math:`R` a positive ratio specified with the argument `rel_height`.
* Draw a horizontal line at the evaluation height to both sides, starting at
the peak's current vertical position until the lines either intersect a
slope, the signal border or cross the vertical position of the peak's
base (see `peak_prominences` for an definition). For the first case,
intersection with the signal, the true intersection point is estimated
with linear interpolation.
* Calculate the width as the horizontal distance between the chosen
endpoints on both sides. As a consequence of this the maximal possible
width for each peak is the horizontal distance between its bases.
As shown above to calculate a peak's width its prominence and bases must be
known. You can supply these yourself with the argument `prominence_data`.
Otherwise they are internally calculated (see `peak_prominences`).
.. warning::
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
.. versionadded:: 1.1.0
Examples
--------
>>> from scipy.signal import chirp, find_peaks, peak_widths
>>> import matplotlib.pyplot as plt
Create a test signal with two overlayed harmonics
>>> x = np.linspace(0, 6 * np.pi, 1000)
>>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
Find all peaks and calculate their widths at the relative height of 0.5
(contour line at half the prominence height) and 1 (at the lowest contour
line at full prominence height).
>>> peaks, _ = find_peaks(x)
>>> results_half = peak_widths(x, peaks, rel_height=0.5)
>>> results_half[0] # widths
array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081,
35.46729324, 41.30429622, 181.93835853, 45.37078546])
>>> results_full = peak_widths(x, peaks, rel_height=1)
>>> results_full[0] # widths
array([181.9396084 , 72.99284945, 61.28657872, 373.84622694,
61.78404617, 72.48822812, 253.09161876, 79.36860878])
Plot signal, peaks and contour lines at which the widths where calculated
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.hlines(*results_half[1:], color="C2")
>>> plt.hlines(*results_full[1:], color="C3")
>>> plt.show()
"""
# Inner function expects `x` to be C-contiguous
x = np.asarray(x, order='C', dtype=np.float64)
if x.ndim != 1:
raise ValueError('`x` must have exactly one dimension')
peaks = np.asarray(peaks)
if peaks.size == 0:
# Empty arrays default to np.float64 but are valid input
peaks = np.array([], dtype=np.intp)
try:
# Safely convert to C-contiguous array of type np.intp
peaks = peaks.astype(np.intp, order='C', casting='safe',
subok=False, copy=False)
except TypeError:
raise TypeError("Cannot safely cast `peaks` to dtype('intp')")
if peaks.ndim != 1:
raise ValueError('`peaks` must have exactly one dimension')
if rel_height < 0.0:
raise ValueError('`rel_height` must be greater or equal to 0.0')
if prominence_data is None:
# Calculate prominence if not supplied and use wlen if supplied.
prominence_data = peak_prominences(x, peaks, wlen)
return _peak_widths(x, peaks, rel_height, *prominence_data)
def _unpack_condition_args(interval, x, peaks):
"""
Parse condition arguments for `find_peaks`.
Parameters
----------
interval : number or ndarray or sequence
Either a number or ndarray or a 2-element sequence of the former. The
first value is always interpreted as `imin` and the second, if supplied,
as `imax`.
x : ndarray
The signal with `peaks`.
peaks : ndarray
An array with indices used to reduce `imin` and / or `imax` if those are
arrays.
Returns
-------
imin, imax : number or ndarray or None
Minimal and maximal value in `argument`.
Raises
------
ValueError :
If interval border is given as array and its size does not match the size
of `x`.
Notes
-----
.. versionadded:: 1.1.0
"""
try:
imin, imax = interval
except (TypeError, ValueError):
imin, imax = (interval, None)
# Reduce arrays if arrays
if isinstance(imin, np.ndarray):
if imin.size != x.size:
raise ValueError('array size of lower interval border must match x')
imin = imin[peaks]
if isinstance(imax, np.ndarray):
if imax.size != x.size:
raise ValueError('array size of upper interval border must match x')
imax = imax[peaks]
return imin, imax
def _select_by_property(peak_properties, pmin, pmax):
"""
Evaluate where the generic property of peaks confirms to an interval.
Parameters
----------
peak_properties : ndarray
An array with properties for each peak.
pmin : None or number or ndarray
Lower interval boundary for `peak_properties`. ``None`` is interpreted as
an open border.
pmax : None or number or ndarray
Upper interval boundary for `peak_properties`. ``None`` is interpreted as
an open border.
Returns
-------
keep : bool
A boolean mask evaluating to true where `peak_properties` confirms to the
interval.
See Also
--------
find_peaks
Notes
-----
.. versionadded:: 1.1.0
"""
keep = np.ones(peak_properties.size, dtype=bool)
if pmin is not None:
keep &= (pmin <= peak_properties)
if pmax is not None:
keep &= (peak_properties <= pmax)
return keep
def _select_by_peak_threshold(x, peaks, tmin, tmax):
"""
Evaluate which peaks fulfill the threshold condition.
Parameters
----------
x : ndarray
A one-dimensional array which is indexable by `peaks`.
peaks : ndarray
Indices of peaks in `x`.
tmin, tmax : scalar or ndarray or None
Minimal and / or maximal required thresholds. If supplied as ndarrays
their size must match `peaks`. ``None`` is interpreted as an open
border.
Returns
-------
keep : bool
A boolean mask evaluating to true where `peaks` fulfill the threshold
condition.
left_thresholds, right_thresholds : ndarray
Array matching `peak` containing the thresholds of each peak on
both sides.
Notes
-----
.. versionadded:: 1.1.0
"""
# Stack thresholds on both sides to make min / max operations easier:
# tmin is compared with the smaller, and tmax with the greater thresold to
# each peak's side
stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1],
x[peaks] - x[peaks + 1]])
keep = np.ones(peaks.size, dtype=bool)
if tmin is not None:
min_thresholds = np.min(stacked_thresholds, axis=0)
keep &= (tmin <= min_thresholds)
if tmax is not None:
max_thresholds = np.max(stacked_thresholds, axis=0)
keep &= (max_thresholds <= tmax)
return keep, stacked_thresholds[0], stacked_thresholds[1]
def find_peaks(x, height=None, threshold=None, distance=None,
prominence=None, width=None, wlen=None, rel_height=0.5):
"""
Find peaks inside a signal based on peak properties.
This function takes a one-dimensional array and finds all local maxima by
simple comparison of neighbouring values. Optionally, a subset of these
peaks can be selected by specifying conditions for a peak's properties.
Parameters
----------
x : sequence
A signal with peaks.
height : number or ndarray or sequence, optional
Required height of peaks. Either a number, ``None``, an array matching
`x` or a 2-element sequence of the former. The first element is
always interpreted as the minimal and the second, if supplied, as the
maximal required height.
threshold : number or ndarray or sequence, optional
Required threshold of peaks, the vertical distance to its neighbouring
samples. Either a number, ``None``, an array matching `x` or a
2-element sequence of the former. The first element is always
interpreted as the minimal and the second, if supplied, as the maximal
required threshold.
distance : number, optional
Required minimal horizontal distance (>= 1) in samples between
neighbouring peaks. The removal order is explained in the notes section.
prominence : number or ndarray or sequence, optional
Required prominence of peaks. Either a number, ``None``, an array
matching `x` or a 2-element sequence of the former. The first
element is always interpreted as the minimal and the second, if
supplied, as the maximal required prominence.
width : number or ndarray or sequence, optional
Required width of peaks in samples. Either a number, ``None``, an array
matching `x` or a 2-element sequence of the former. The first
element is always interpreted as the minimal and the second, if
supplied, as the maximal required prominence.
wlen : number, optional
Used for calculation of the peaks prominences, thus it is only used if
one of the arguments `prominence` or `width` is given. See argument
`wlen` in `peak_prominences` for a full description of its effects.
rel_height : float, optional
Used for calculation of the peaks width, thus it is only used if `width`
is given. See argument `rel_height` in `peak_widths` for a full
description of its effects.
Returns
-------
peaks : ndarray
Indices of peaks in `x` that satisfy all given conditions.
properties : dict
A dictionary containing properties of the returned peaks which were
calculated as intermediate results during evaluation of the specified
conditions:
* 'peak_heights'
If `height` is given, the height of each peak in `x`.
* 'left_thresholds', 'right_thresholds'
If `threshold` is given, these keys contain a peaks vertical
distance to its neighbouring samples.
* 'peak_prominences', 'right_bases', 'left_bases'
If `prominence` is given, these keys are accessible. See
`peak_prominences` for a description of their content.
* 'width_heights', 'left_ips', 'right_ips'
If `width` is given, these keys are accessible. See `peak_widths`
for a description of their content.
To calculate and return properties without excluding peaks, provide the
open interval ``(None, None)`` as a value to the appropriate argument
(excluding `distance`).
See Also
--------
find_peaks_cwt
Find peaks using the wavelet transformation.
peak_prominences
Directly calculate the prominence of peaks.
peak_widths
Directly calculate the width of peaks.
Notes
-----
In the context of this function, a peak or local maximum is defined as any
sample whose two direct neighbours have a smaller amplitude. For flat peaks
(more than one sample of equal amplitude wide) the index of the middle
sample is returned (rounded down in case the number of samples is even).
For noisy signals the peak locations can be off because the noise might
change the position of local maxima. In those cases consider smoothing the
signal before searching for peaks or use other peak finding and fitting
methods (like `find_peaks_cwt`).
Some additional comments on specifying conditions:
* Almost all conditions (excluding `distance`) can be given as half-open or
closed intervals, e.g ``1`` or ``(1, None)`` defines the half-open
interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval
:math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified
as well, which returns the matching properties without exclusion of peaks.
* The border is always included in the interval used to select valid peaks.
* For several conditions the interval borders can be specified with
arrays matching `x` in shape which enables dynamic constrains based on
the sample position.
* The order of arguments given in the function definition above mirrors the
actual order in which conditions are evaluated. In most cases this order
is the fastest one because faster operations are applied first to reduce
the number of peaks that need to be evaluated later.
* Satisfying the distance condition is accomplished by iterating over all
peaks in descending order based on their height and removing all lower
peaks that are too close.
* Use `wlen` to reduce the time it takes to evaluate the conditions for
`prominence` or `width` if `x` is large or has many local maxima
(see `peak_prominences`).
.. warning::
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
.. versionadded:: 1.1.0
Examples
--------
To demonstrate this function's usage we use a signal `x` supplied with
SciPy (see `scipy.misc.electrocardiogram`). Let's find all peaks (local
maxima) in `x` whose amplitude lies above 0.
>>> import matplotlib.pyplot as plt
>>> from scipy.misc import electrocardiogram
>>> from scipy.signal import find_peaks
>>> x = electrocardiogram()[2000:4000]
>>> peaks, _ = find_peaks(x, height=0)
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.plot(np.zeros_like(x), "--", color="gray")
>>> plt.show()
We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching
`x` in size to reflect a changing condition for different parts of the
signal.
>>> border = np.sin(np.linspace(0, 3 * np.pi, x.size))
>>> peaks, _ = find_peaks(x, height=(-border, border))
>>> plt.plot(x)
>>> plt.plot(-border, "--", color="gray")
>>> plt.plot(border, ":", color="gray")
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
Another useful condition for periodic signals can be given with the
`distance` argument. In this case we can easily select the positions of
QRS complexes within the electrocardiogram (ECG) by demanding a distance of
at least 150 samples.
>>> peaks, _ = find_peaks(x, distance=150)
>>> np.diff(peaks)
array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172])
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
Especially for noisy signals peaks can be easily grouped by their
prominence (see `peak_prominences`). E.g. we can select all peaks except
for the mentioned QRS complexes by limiting the allowed prominenence to 0.6.
>>> peaks, properties = find_peaks(x, prominence=(None, 0.6))
>>> properties["prominences"].max()
0.5049999999999999
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
And finally let's examine a different section of the ECG which contains
beat forms of different shape. To select only the atypical heart beats we
combine two conditions: a minimal prominence of 1 and width of at least 20
samples.
>>> x = electrocardiogram()[17000:18000]
>>> peaks, properties = find_peaks(x, prominence=1, width=20)
>>> properties["prominences"], properties["widths"]
(array([1.495, 2.3 ]), array([36.93773946, 39.32723577]))
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"],
... ymax = x[peaks], color = "C1")
>>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"],
... xmax=properties["right_ips"], color = "C1")
>>> plt.show()
"""
# _argmaxima1d expects array of dtype 'float64'
x = np.asarray(x, dtype=np.float64)
if x.ndim != 1:
raise ValueError('`x` must have exactly one dimension')
if distance is not None and distance < 1:
raise ValueError('`distance` must be greater or equal to 1')
peaks = _argmaxima1d(x)
properties = {}
if height is not None:
# Evaluate height condition
peak_heights = x[peaks]
hmin, hmax = _unpack_condition_args(height, x, peaks)
keep = _select_by_property(peak_heights, hmin, hmax)
peaks = peaks[keep]
properties["peak_heights"] = peak_heights[keep]
if threshold is not None:
# Evaluate threshold condition
tmin, tmax = _unpack_condition_args(threshold, x, peaks)
keep, left_thresholds, right_thresholds = _select_by_peak_threshold(
x, peaks, tmin, tmax)
peaks = peaks[keep]
properties["left_thresholds"] = left_thresholds
properties["right_thresholds"] = right_thresholds
properties = {key: array[keep] for key, array in properties.items()}
if distance is not None:
# Evaluate distance condition
keep = _select_by_peak_distance(peaks, x[peaks], distance)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
if prominence is not None or width is not None:
# Calculate prominence (required for both conditions)
properties.update(zip(
['prominences', 'left_bases', 'right_bases'],
peak_prominences(x, peaks, wlen=wlen)
))
if prominence is not None:
# Evaluate prominence condition
pmin, pmax = _unpack_condition_args(prominence, x, peaks)
keep = _select_by_property(properties['prominences'], pmin, pmax)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
if width is not None:
# Calculate widths
properties.update(zip(
['widths', 'width_heights', 'left_ips', 'right_ips'],
peak_widths(x, peaks, rel_height, (properties['prominences'],
properties['left_bases'],
properties['right_bases']))
))
# Evaluate width condition
wmin, wmax = _unpack_condition_args(width, x, peaks)
keep = _select_by_property(properties['widths'], wmin, wmax)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
return peaks, properties
def _identify_ridge_lines(matr, max_distances, gap_thresh):
"""
Identify ridges in the 2-D matrix.
Expect that the width of the wavelet feature increases with increasing row
number.
Parameters
----------
matr : 2-D ndarray
Matrix in which to identify ridge lines.
max_distances : 1-D sequence
At each row, a ridge line is only connected
if the relative max at row[n] is within
`max_distances`[n] from the relative max at row[n+1].
gap_thresh : int
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if
there are more than `gap_thresh` points without connecting
a new relative maximum.
Returns
-------
ridge_lines : tuple
Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
found. Each ridge-line will be sorted by row (increasing), but the
order of the ridge lines is not specified.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> data = np.random.rand(5,5)
>>> ridge_lines = _identify_ridge_lines(data, 1, 1)
Notes
-----
This function is intended to be used in conjunction with `cwt`
as part of `find_peaks_cwt`.
"""
if(len(max_distances) < matr.shape[0]):
raise ValueError('Max_distances must have at least as many rows '
'as matr')
all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
# Highest row for which there are any relative maxima
has_relmax = np.where(all_max_cols.any(axis=1))[0]
if(len(has_relmax) == 0):
return []
start_row = has_relmax[-1]
# Each ridge line is a 3-tuple:
# rows, cols,Gap number
ridge_lines = [[[start_row],
[col],
0] for col in np.where(all_max_cols[start_row])[0]]
final_lines = []
rows = np.arange(start_row - 1, -1, -1)
cols = np.arange(0, matr.shape[1])
for row in rows:
this_max_cols = cols[all_max_cols[row]]
# Increment gap number of each line,
# set it to zero later if appropriate
for line in ridge_lines:
line[2] += 1
# XXX These should always be all_max_cols[row]
# But the order might be different. Might be an efficiency gain
# to make sure the order is the same and avoid this iteration
prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
# Look through every relative maximum found at current row
# Attempt to connect them with existing ridge lines.
for ind, col in enumerate(this_max_cols):
# If there is a previous ridge line within
# the max_distance to connect to, do so.
# Otherwise start a new one.
line = None
if(len(prev_ridge_cols) > 0):
diffs = np.abs(col - prev_ridge_cols)
closest = np.argmin(diffs)
if diffs[closest] <= max_distances[row]:
line = ridge_lines[closest]
if(line is not None):
# Found a point close enough, extend current ridge line
line[1].append(col)
line[0].append(row)
line[2] = 0
else:
new_line = [[row],
[col],
0]
ridge_lines.append(new_line)
# Remove the ridge lines with gap_number too high
# XXX Modifying a list while iterating over it.
# Should be safe, since we iterate backwards, but
# still tacky.
for ind in xrange(len(ridge_lines) - 1, -1, -1):
line = ridge_lines[ind]
if line[2] > gap_thresh:
final_lines.append(line)
del ridge_lines[ind]
out_lines = []
for line in (final_lines + ridge_lines):
sortargs = np.array(np.argsort(line[0]))
rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
rows[sortargs] = line[0]
cols[sortargs] = line[1]
out_lines.append([rows, cols])
return out_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
----------
cwt : 2-D ndarray
Continuous wavelet transform from which the `ridge_lines` were defined.
ridge_lines : 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively).
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. :doi:`10.1093/bioinformatics/btl355`
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
window_size = int(window_size)
hf_window, odd = divmod(window_size, 2)
# Filter based on SNR
row_one = cwt[0, :]
noises = np.zeros_like(row_one)
for ind, val in enumerate(row_one):
window_start = max(ind - hf_window, 0)
window_end = min(ind + hf_window + odd, num_points)
noises[ind] = scoreatpercentile(row_one[window_start:window_end],
per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return list(filter(filt_func, ridge_lines))
def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
gap_thresh=None, min_length=None, min_snr=1, noise_perc=10):
"""
Find peaks in a 1-D array with wavelet transformation.
The general approach is to smooth `vector` by convolving it with
`wavelet(width)` for each width in `widths`. Relative maxima which
appear at enough length scales, and with sufficiently high SNR, are
accepted.
Parameters
----------
vector : ndarray
1-D array in which to find the peaks.
widths : sequence
1-D array of widths to use for calculating the CWT matrix. In general,
this range should cover the expected width of peaks of interest.
wavelet : callable, optional
Should take two parameters and return a 1-D array to convolve
with `vector`. The first parameter determines the number of points
of the returned wavelet array, the second parameter is the scale
(`width`) of the wavelet. Should be normalized and symmetric.
Default is the ricker wavelet.
max_distances : ndarray, optional
At each row, a ridge line is only connected if the relative max at
row[n] is within ``max_distances[n]`` from the relative max at
``row[n+1]``. Default value is ``widths/4``.
gap_thresh : float, optional
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if there are more
than `gap_thresh` points without connecting a new relative maximum.
Default is the first value of the widths array i.e. widths[0].
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
`stats.scoreatpercentile`. Default is 10.
Returns
-------
peaks_indices : ndarray
Indices of the locations in the `vector` where peaks were found.
The list is sorted.
See Also
--------
cwt
Continuous wavelet transform.
find_peaks
Find peaks inside a signal based on peak properties.
Notes
-----
This approach was designed for finding sharp peaks among noisy data,
however with proper parameter selection it should function well for
different peak shapes.
The algorithm is as follows:
1. Perform a continuous wavelet transform on `vector`, for the supplied
`widths`. This is a convolution of `vector` with `wavelet(width)` for
each width in `widths`. See `cwt`
2. Identify "ridge lines" in the cwt matrix. These are relative maxima
at each row, connected across adjacent rows. See identify_ridge_lines
3. Filter the ridge_lines using filter_ridge_lines.
.. versionadded:: 0.11.0
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> from scipy import signal
>>> xs = np.arange(0, np.pi, 0.05)
>>> data = np.sin(xs)
>>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
>>> peakind, xs[peakind], data[peakind]
([32], array([ 1.6]), array([ 0.9995736]))
"""
widths = np.asarray(widths)
if gap_thresh is None:
gap_thresh = np.ceil(widths[0])
if max_distances is None:
max_distances = widths / 4.0
if wavelet is None:
wavelet = ricker
cwt_dat = cwt(vector, wavelet, widths)
ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
min_snr=min_snr, noise_perc=noise_perc)
max_locs = np.asarray([x[1][0] for x in filtered])
max_locs.sort()
return max_locs
| 46,361 | 36.754072 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/bsplines.py
|
from __future__ import division, print_function, absolute_import
from scipy._lib.six import xrange
from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb, gamma
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def factorial(n):
return gamma(n + 1)
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in xrange(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand-side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in xrange(Mk + 1)]
shifts = [-bound - k for k in xrange(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in xrange(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
"""Gaussian approximation to B-spline basis function of order n.
"""
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| 11,615 | 29.408377 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/spectral.py
|
"""Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be one-dimensional and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
Examples
--------
>>> import scipy.signal
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[25000:])
0.00099728892368242854
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned. Defaults to
`True`.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
The COLA constraint ensures that every point in the input data is
equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is True,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares esimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
if not check_COLA(window, nperseg, noverlap):
raise ValueError('Window, STFT shape and noverlap do not satisfy the '
'COLA constraint.')
# Rearrange axes if necessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
if input_onesided:
ifunc = np.fft.irfft
else:
ifunc = fftpack.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Divide out normalization where non-tiny
x /= np.where(norm > 1e-10, norm, 1.0)
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='spectrum', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependent on *mode* kwarg.
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if necessary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = fftpack.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = np.fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input validation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
# http://stackoverflow.com/a/5568169
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = fftpack.fft
else:
result = result.real
func = np.fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg,input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
---------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
#parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different from"
" length of window")
return win, nperseg
| 66,089 | 35.921788 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/waveforms.py
|
# Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
from scipy._lib.six import string_types
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
'unit_impulse']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if isinstance(t, string_types):
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
else:
raise ValueError("If `t` is a string, it must be 'cutoff'")
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
Examples
--------
The following will be used in the examples:
>>> from scipy.signal import chirp, spectrogram
>>> import matplotlib.pyplot as plt
For the first example, we'll plot the waveform for a linear chirp
from 6 Hz to 1 Hz over 10 seconds:
>>> t = np.linspace(0, 10, 5001)
>>> w = chirp(t, f0=6, f1=1, t1=10, method='linear')
>>> plt.plot(t, w)
>>> plt.title("Linear Chirp, f(0)=6, f(10)=1")
>>> plt.xlabel('t (sec)')
>>> plt.show()
For the remaining examples, we'll use higher frequency ranges,
and demonstrate the result using `scipy.signal.spectrogram`.
We'll use a 10 second interval sampled at 8000 Hz.
>>> fs = 8000
>>> T = 10
>>> t = np.linspace(0, T, T*fs, endpoint=False)
Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds
(vertex of the parabolic curve of the frequency is at t=0):
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds
(vertex of the parabolic curve of the frequency is at t=10):
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic',
... vertex_zero=False)
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Quadratic Chirp, f(0)=2500, f(10)=250\\n' +
... '(vertex_zero=False)')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Logarithmic chirp from 1500 Hz to 250 Hz over 10 seconds:
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='logarithmic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Logarithmic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
Hyperbolic chirp from 1500 Hz to 250 Hz over 10 seconds:
>>> w = chirp(t, f0=1500, f1=250, t1=10, method='hyperbolic')
>>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512,
... nfft=2048)
>>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r')
>>> plt.title('Hyperbolic Chirp, f(0)=1500, f(10)=250')
>>> plt.xlabel('t (sec)')
>>> plt.ylabel('Frequency (Hz)')
>>> plt.grid()
>>> plt.show()
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
Examples
--------
Compute the waveform with instantaneous frequency::
f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2
over the interval 0 <= t <= 10.
>>> from scipy.signal import sweep_poly
>>> p = np.poly1d([0.025, -0.36, 1.25, 2.0])
>>> t = np.linspace(0, 10, 5001)
>>> w = sweep_poly(t, p)
Plot it:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, w)
>>> plt.title("Sweep Poly\\nwith frequency " +
... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$")
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, p(t), 'r', label='f(t)')
>>> plt.legend()
>>> plt.xlabel('t')
>>> plt.tight_layout()
>>> plt.show()
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
def unit_impulse(shape, idx=None, dtype=float):
"""
Unit impulse signal (discrete delta function) or unit basis vector.
Parameters
----------
shape : int or tuple of int
Number of samples in the output (1-D), or a tuple that represents the
shape of the output (N-D).
idx : None or int or tuple of int or 'mid', optional
Index at which the value is 1. If None, defaults to the 0th element.
If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
all dimensions. If an int, the impulse will be at `idx` in all
dimensions.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
Returns
-------
y : ndarray
Output array containing an impulse signal.
Notes
-----
The 1D case is also known as the Kronecker delta.
.. versionadded:: 0.19.0
Examples
--------
An impulse at the 0th element (:math:`\\delta[n]`):
>>> from scipy import signal
>>> signal.unit_impulse(8)
array([ 1., 0., 0., 0., 0., 0., 0., 0.])
Impulse offset by 2 samples (:math:`\\delta[n-2]`):
>>> signal.unit_impulse(7, 2)
array([ 0., 0., 1., 0., 0., 0., 0.])
2-dimensional impulse, centered:
>>> signal.unit_impulse((3, 3), 'mid')
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
Impulse at (2, 2), using broadcasting:
>>> signal.unit_impulse((4, 4), 2)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 0.]])
Plot the impulse response of a 4th-order Butterworth lowpass filter:
>>> imp = signal.unit_impulse(100, 'mid')
>>> b, a = signal.butter(4, 0.2)
>>> response = signal.lfilter(b, a, imp)
>>> import matplotlib.pyplot as plt
>>> plt.plot(np.arange(-50, 50), imp)
>>> plt.plot(np.arange(-50, 50), response)
>>> plt.margins(0.1, 0.1)
>>> plt.xlabel('Time [samples]')
>>> plt.ylabel('Amplitude')
>>> plt.grid(True)
>>> plt.show()
"""
out = zeros(shape, dtype)
shape = np.atleast_1d(shape)
if idx is None:
idx = (0,) * len(shape)
elif idx == 'mid':
idx = tuple(shape // 2)
elif not hasattr(idx, "__iter__"):
idx = (idx,) * len(shape)
out[idx] = 1
return out
| 21,039 | 29.895742 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/__init__.py
|
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
sosfiltfilt -- A forward-backward filter for second-order sections.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
resample_poly -- Resample using polyphase filtering method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
bilinear_zpk -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firls -- FIR filter design using least-squares error minimization.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response from TF coefficients.
freqs_zpk -- Analog filter frequency response from ZPK coefficients.
freqz -- Digital filter frequency response from TF coefficients.
freqz_zpk -- Digital filter frequency response from ZPK coefficients.
sosfreqz -- Digital filter frequency response for SOS format filter.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
minimum_phase -- Convert a linear phase FIR filter to minimum phase.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
iirnotch -- Design second-order IIR notch digital filter.
iirpeak -- Design second-order IIR peak (resonant) digital filter.
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
lti -- Continuous-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
freqresp -- frequency response of a continuous-time LTI system.
bode -- Bode magnitude and phase data (continuous-time LTI).
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlti -- Discrete-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
dfreqresp -- frequency response of a discrete-time LTI system.
dbode -- Bode magnitude and phase data (discrete-time LTI).
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order sections to zero-pole-gain.
sos2tf -- second-order sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
unit_impulse -- Discrete unit impulse
Window functions
================
Most window functions are available in the `scipy.signal.windows` namespace,
but we list them here for convenience:
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
windows.barthann -- Bartlett-Hann window
windows.bartlett -- Bartlett window
windows.blackman -- Blackman window
windows.blackmanharris -- Minimum 4-term Blackman-Harris window
windows.bohman -- Bohman window
windows.boxcar -- Boxcar window
windows.chebwin -- Dolph-Chebyshev window
windows.cosine -- Cosine window
windows.dpss -- Discrete prolate spheroidal sequences
windows.exponential -- Exponential window
windows.flattop -- Flat top window
windows.gaussian -- Gaussian window
windows.general_cosine -- Generalized Cosine window
windows.general_gaussian -- Generalized Gaussian window
windows.general_hamming -- Generalized Hamming window
windows.hamming -- Hamming window
windows.hann -- Hann window
windows.hanning -- Hann window
windows.kaiser -- Kaiser window
windows.nuttall -- Nuttall's minimum 4-term Blackman-Harris window
windows.parzen -- Parzen window
windows.slepian -- Slepian window
windows.triang -- Triangular window
windows.tukey -- Tukey window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
find_peaks -- Find a subset of peaks inside a signal.
find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation.
peak_prominences -- Calculate the prominence of each peak in a signal.
peak_widths -- Calculate the width of each peak in a signal.
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
stft -- Compute the Short Time Fourier Transform
istft -- Compute the Inverse Short Time Fourier Transform
check_COLA -- Check the COLA constraint for iSTFT reconstruction
"""
from __future__ import division, print_function, absolute_import
from . import sigtools, windows
from .waveforms import *
from ._max_len_seq import max_len_seq
from ._upfirdn import upfirdn
# The spline module (a C extension) provides:
# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2
from .spline import *
from .bsplines import *
from .filter_design import *
from .fir_filter_design import *
from .ltisys import *
from .lti_conversion import *
from .signaltools import *
from ._savitzky_golay import savgol_coeffs, savgol_filter
from .spectral import *
from .wavelets import *
from ._peak_finding import *
from .windows import get_window # keep this one in signal namespace
# deal with * -> windows.* doc-only soft-deprecation
deprecated_windows = ('boxcar', 'triang', 'parzen', 'bohman', 'blackman',
'nuttall', 'blackmanharris', 'flattop', 'bartlett',
'barthann', 'hamming', 'kaiser', 'gaussian',
'general_gaussian', 'chebwin', 'slepian', 'cosine',
'hann', 'exponential', 'tukey')
# backward compatibility imports for actually deprecated windows not
# in the above list
from .windows import hanning
def deco(name):
f = getattr(windows, name)
# Add deprecation to docstring
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
wrapped.__name__ = name
if f.__doc__ is not None:
lines = f.__doc__.splitlines()
for li, line in enumerate(lines):
if line.strip() == 'Parameters':
break
else:
raise RuntimeError('dev error: badly formatted doc')
spacing = ' ' * line.find('P')
lines.insert(li, ('{0}.. warning:: scipy.signal.{1} is deprecated,\n'
'{0} use scipy.signal.windows.{1} '
'instead.\n'.format(spacing, name)))
wrapped.__doc__ = '\n'.join(lines)
return wrapped
for name in deprecated_windows:
locals()[name] = deco(name)
del deprecated_windows, name, deco
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 15,629 | 40.023622 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/_upfirdn.py
|
# Code adapted from "upfirdn" python library with permission:
#
# Copyright (c) 2009, Motorola, Inc
#
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Motorola nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from ._upfirdn_apply import _output_len, _apply
__all__ = ['upfirdn', '_output_len']
def _pad_h(h, up):
"""Store coefficients in a transposed, flipped arrangement.
For example, suppose upRate is 3, and the
input number of coefficients is 10, represented as h[0], ..., h[9].
Then the internal buffer will look like this::
h[9], h[6], h[3], h[0], // flipped phase 0 coefs
0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded)
0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)
"""
h_padlen = len(h) + (-len(h) % up)
h_full = np.zeros(h_padlen, h.dtype)
h_full[:len(h)] = h
h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()
return h_full
class _UpFIRDn(object):
def __init__(self, h, x_dtype, up, down):
"""Helper for resampling"""
h = np.asarray(h)
if h.ndim != 1 or h.size == 0:
raise ValueError('h must be 1D with non-zero length')
self._output_type = np.result_type(h.dtype, x_dtype, np.float32)
h = np.asarray(h, self._output_type)
self._up = int(up)
self._down = int(down)
if self._up < 1 or self._down < 1:
raise ValueError('Both up and down must be >= 1')
# This both transposes, and "flips" each phase for filtering
self._h_trans_flip = _pad_h(h, self._up)
self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)
def apply_filter(self, x, axis=-1):
"""Apply the prepared filter to the specified axis of a nD signal x"""
output_len = _output_len(len(self._h_trans_flip), x.shape[axis],
self._up, self._down)
output_shape = np.asarray(x.shape)
output_shape[axis] = output_len
out = np.zeros(output_shape, dtype=self._output_type, order='C')
axis = axis % x.ndim
_apply(np.asarray(x, self._output_type),
self._h_trans_flip, out,
self._up, self._down, axis)
return out
def upfirdn(h, x, up=1, down=1, axis=-1):
"""Upsample, FIR filter, and downsample
Parameters
----------
h : array_like
1-dimensional FIR (finite-impulse response) filter coefficients.
x : array_like
Input signal array.
up : int, optional
Upsampling rate. Default is 1.
down : int, optional
Downsampling rate. Default is 1.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
Returns
-------
y : ndarray
The output signal array. Dimensions will be the same as `x` except
for along `axis`, which will change size according to the `h`,
`up`, and `down` parameters.
Notes
-----
The algorithm is an implementation of the block diagram shown on page 129
of the Vaidyanathan text [1]_ (Figure 4.3-8d).
.. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,
Prentice Hall, 1993.
The direct approach of upsampling by factor of P with zero insertion,
FIR filtering of length ``N``, and downsampling by factor of Q is
O(N*Q) per output sample. The polyphase implementation used here is
O(N/P).
.. versionadded:: 0.18
Examples
--------
Simple operations:
>>> from scipy.signal import upfirdn
>>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter
array([ 1., 2., 3., 2., 1.])
>>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion
array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])
>>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold
array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])
>>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation
array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])
>>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3
array([ 0., 3., 6., 9.])
>>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3
array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])
Apply a single filter to multiple signals:
>>> x = np.reshape(np.arange(8), (4, 2))
>>> x
array([[0, 1],
[2, 3],
[4, 5],
[6, 7]])
Apply along the last dimension of ``x``:
>>> h = [1, 1]
>>> upfirdn(h, x, 2)
array([[ 0., 0., 1., 1.],
[ 2., 2., 3., 3.],
[ 4., 4., 5., 5.],
[ 6., 6., 7., 7.]])
Apply along the 0th dimension of ``x``:
>>> upfirdn(h, x, 2, axis=0)
array([[ 0., 1.],
[ 0., 1.],
[ 2., 3.],
[ 2., 3.],
[ 4., 5.],
[ 4., 5.],
[ 6., 7.],
[ 6., 7.]])
"""
x = np.asarray(x)
ufd = _UpFIRDn(h, x.dtype, up, down)
# This is equivalent to (but faster than) using np.apply_along_axis
return ufd.apply_filter(x, axis)
| 6,587 | 34.804348 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/_arraytools.py
|
"""
Functions for acting on a axis of an array.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exacty the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[a_slice]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext
| 7,553 | 29.959016 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/_savitzky_golay.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import lstsq
from math import factorial
from scipy.ndimage import convolve1d
from ._arraytools import axis_slice
def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None,
use="conv"):
"""Compute the coefficients for a 1-d Savitzky-Golay FIR filter.
Parameters
----------
window_length : int
The length of the filter window (i.e. the number of coefficients).
`window_length` must be an odd positive integer.
polyorder : int
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
deriv : int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
delta : float, optional
The spacing of the samples to which the filter will be applied.
This is only used if deriv > 0.
pos : int or None, optional
If pos is not None, it specifies evaluation position within the
window. The default is the middle of the window.
use : str, optional
Either 'conv' or 'dot'. This argument chooses the order of the
coefficients. The default is 'conv', which means that the
coefficients are ordered to be used in a convolution. With
use='dot', the order is reversed, so the filter is applied by
dotting the coefficients with the data set.
Returns
-------
coeffs : 1-d ndarray
The filter coefficients.
References
----------
A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by
Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8),
pp 1627-1639.
See Also
--------
savgol_filter
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.signal import savgol_coeffs
>>> savgol_coeffs(5, 2)
array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429])
>>> savgol_coeffs(5, 2, deriv=1)
array([ 2.00000000e-01, 1.00000000e-01, 2.00607895e-16,
-1.00000000e-01, -2.00000000e-01])
Note that use='dot' simply reverses the coefficients.
>>> savgol_coeffs(5, 2, pos=3)
array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714])
>>> savgol_coeffs(5, 2, pos=3, use='dot')
array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286])
`x` contains data from the parabola x = t**2, sampled at
t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the
derivative at the last position. When dotted with `x` the result should
be 6.
>>> x = np.array([1, 0, 1, 4, 9])
>>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot')
>>> c.dot(x)
6.0000000000000018
"""
# An alternative method for finding the coefficients when deriv=0 is
# t = np.arange(window_length)
# unit = (t == pos).astype(int)
# coeffs = np.polyval(np.polyfit(t, unit, polyorder), t)
# The method implemented here is faster.
# To recreate the table of sample coefficients shown in the chapter on
# the Savitzy-Golay filter in the Numerical Recipes book, use
# window_length = nL + nR + 1
# pos = nL + 1
# c = savgol_coeffs(window_length, M, pos=pos, use='dot')
if polyorder >= window_length:
raise ValueError("polyorder must be less than window_length.")
halflen, rem = divmod(window_length, 2)
if rem == 0:
raise ValueError("window_length must be odd.")
if pos is None:
pos = halflen
if not (0 <= pos < window_length):
raise ValueError("pos must be nonnegative and less than "
"window_length.")
if use not in ['conv', 'dot']:
raise ValueError("`use` must be 'conv' or 'dot'")
# Form the design matrix A. The columns of A are powers of the integers
# from -pos to window_length - pos - 1. The powers (i.e. rows) range
# from 0 to polyorder. (That is, A is a vandermonde matrix, but not
# necessarily square.)
x = np.arange(-pos, window_length - pos, dtype=float)
if use == "conv":
# Reverse so that result can be used in a convolution.
x = x[::-1]
order = np.arange(polyorder + 1).reshape(-1, 1)
A = x ** order
# y determines which order derivative is returned.
y = np.zeros(polyorder + 1)
# The coefficient assigned to y[deriv] scales the result to take into
# account the order of the derivative and the sample spacing.
y[deriv] = factorial(deriv) / (delta ** deriv)
# Find the least-squares solution of A*c = y
coeffs, _, _, _ = lstsq(A, y)
return coeffs
def _polyder(p, m):
"""Differentiate polynomials represented with coefficients.
p must be a 1D or 2D array. In the 2D case, each column gives
the coefficients of a polynomial; the first row holds the coefficients
associated with the highest power. m must be a nonnegative integer.
(numpy.polyder doesn't handle the 2D case.)
"""
if m == 0:
result = p
else:
n = len(p)
if n <= m:
result = np.zeros_like(p[:1, ...])
else:
dp = p[:-m].copy()
for k in range(m):
rng = np.arange(n - k - 1, m - k - 1, -1)
dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1))
result = dp
return result
def _fit_edge(x, window_start, window_stop, interp_start, interp_stop,
axis, polyorder, deriv, delta, y):
"""
Given an n-d array `x` and the specification of a slice of `x` from
`window_start` to `window_stop` along `axis`, create an interpolating
polynomial of each 1-d slice, and evaluate that polynomial in the slice
from `interp_start` to `interp_stop`. Put the result into the
corresponding slice of `y`.
"""
# Get the edge into a (window_length, -1) array.
x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis)
if axis == 0 or axis == -x.ndim:
xx_edge = x_edge
swapped = False
else:
xx_edge = x_edge.swapaxes(axis, 0)
swapped = True
xx_edge = xx_edge.reshape(xx_edge.shape[0], -1)
# Fit the edges. poly_coeffs has shape (polyorder + 1, -1),
# where '-1' is the same as in xx_edge.
poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start),
xx_edge, polyorder)
if deriv > 0:
poly_coeffs = _polyder(poly_coeffs, deriv)
# Compute the interpolated values for the edge.
i = np.arange(interp_start - window_start, interp_stop - window_start)
values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv)
# Now put the values into the appropriate slice of y.
# First reshape values to match y.
shp = list(y.shape)
shp[0], shp[axis] = shp[axis], shp[0]
values = values.reshape(interp_stop - interp_start, *shp[1:])
if swapped:
values = values.swapaxes(0, axis)
# Get a view of the data to be replaced by values.
y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis)
y_edge[...] = values
def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y):
"""
Use polynomial interpolation of x at the low and high ends of the axis
to fill in the halflen values in y.
This function just calls _fit_edge twice, once for each end of the axis.
"""
halflen = window_length // 2
_fit_edge(x, 0, window_length, 0, halflen, axis,
polyorder, deriv, delta, y)
n = x.shape[axis]
_fit_edge(x, n - window_length, n, n - halflen, n, axis,
polyorder, deriv, delta, y)
def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0,
axis=-1, mode='interp', cval=0.0):
""" Apply a Savitzky-Golay filter to an array.
This is a 1-d filter. If `x` has dimension greater than 1, `axis`
determines the axis along which the filter is applied.
Parameters
----------
x : array_like
The data to be filtered. If `x` is not a single or double precision
floating point array, it will be converted to type `numpy.float64`
before filtering.
window_length : int
The length of the filter window (i.e. the number of coefficients).
`window_length` must be a positive odd integer. If `mode` is 'interp',
`window_length` must be less than or equal to the size of `x`.
polyorder : int
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
deriv : int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
delta : float, optional
The spacing of the samples to which the filter will be applied.
This is only used if deriv > 0. Default is 1.0.
axis : int, optional
The axis of the array `x` along which the filter is to be applied.
Default is -1.
mode : str, optional
Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This
determines the type of extension to use for the padded signal to
which the filter is applied. When `mode` is 'constant', the padding
value is given by `cval`. See the Notes for more details on 'mirror',
'constant', 'wrap', and 'nearest'.
When the 'interp' mode is selected (the default), no extension
is used. Instead, a degree `polyorder` polynomial is fit to the
last `window_length` values of the edges, and this polynomial is
used to evaluate the last `window_length // 2` output values.
cval : scalar, optional
Value to fill past the edges of the input if `mode` is 'constant'.
Default is 0.0.
Returns
-------
y : ndarray, same shape as `x`
The filtered data.
See Also
--------
savgol_coeffs
Notes
-----
Details on the `mode` options:
'mirror':
Repeats the values at the edges in reverse order. The value
closest to the edge is not included.
'nearest':
The extension contains the nearest input value.
'constant':
The extension contains the value given by the `cval` argument.
'wrap':
The extension contains the values from the other end of the array.
For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and
`window_length` is 7, the following shows the extended data for
the various `mode` options (assuming `cval` is 0)::
mode | Ext | Input | Ext
-----------+---------+------------------------+---------
'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5
'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8
'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0
'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.signal import savgol_filter
>>> np.set_printoptions(precision=2) # For compact display.
>>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9])
Filter with a window length of 5 and a degree 2 polynomial. Use
the defaults for all other parameters.
>>> savgol_filter(x, 5, 2)
array([ 1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ])
Note that the last five values in x are samples of a parabola, so
when mode='interp' (the default) is used with polyorder=2, the last
three values are unchanged. Compare that to, for example,
`mode='nearest'`:
>>> savgol_filter(x, 5, 2, mode='nearest')
array([ 1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97])
"""
if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]:
raise ValueError("mode must be 'mirror', 'constant', 'nearest' "
"'wrap' or 'interp'.")
x = np.asarray(x)
# Ensure that x is either single or double precision floating point.
if x.dtype != np.float64 and x.dtype != np.float32:
x = x.astype(np.float64)
coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)
if mode == "interp":
if window_length > x.size:
raise ValueError("If mode is 'interp', window_length must be less "
"than or equal to the size of x.")
# Do not pad. Instead, for the elements within `window_length // 2`
# of the ends of the sequence, use the polynomial that is fitted to
# the last `window_length` elements.
y = convolve1d(x, coeffs, axis=axis, mode="constant")
_fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y)
else:
# Any mode other than 'interp' is passed on to ndimage.convolve1d.
y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval)
return y
| 13,193 | 36.697143 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_array_tools.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_equal
from pytest import raises as assert_raises
from scipy.signal._arraytools import (axis_slice, axis_reverse,
odd_ext, even_ext, const_ext, zero_ext)
class TestArrayTools(object):
def test_axis_slice(self):
a = np.arange(12).reshape(3, 4)
s = axis_slice(a, start=0, stop=1, axis=0)
assert_array_equal(s, a[0:1, :])
s = axis_slice(a, start=-1, axis=0)
assert_array_equal(s, a[-1:, :])
s = axis_slice(a, start=0, stop=1, axis=1)
assert_array_equal(s, a[:, 0:1])
s = axis_slice(a, start=-1, axis=1)
assert_array_equal(s, a[:, -1:])
s = axis_slice(a, start=0, step=2, axis=0)
assert_array_equal(s, a[::2, :])
s = axis_slice(a, start=0, step=2, axis=1)
assert_array_equal(s, a[:, ::2])
def test_axis_reverse(self):
a = np.arange(12).reshape(3, 4)
r = axis_reverse(a, axis=0)
assert_array_equal(r, a[::-1, :])
r = axis_reverse(a, axis=1)
assert_array_equal(r, a[:, ::-1])
def test_odd_ext(self):
a = np.array([[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5]])
odd = odd_ext(a, 2, axis=1)
expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[11, 10, 9, 8, 7, 6, 5, 4, 3]])
assert_array_equal(odd, expected)
odd = odd_ext(a, 1, axis=0)
expected = np.array([[-7, -4, -1, 2, 5],
[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[17, 14, 11, 8, 5]])
assert_array_equal(odd, expected)
assert_raises(ValueError, odd_ext, a, 2, axis=0)
assert_raises(ValueError, odd_ext, a, 5, axis=1)
def test_even_ext(self):
a = np.array([[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5]])
even = even_ext(a, 2, axis=1)
expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3],
[7, 8, 9, 8, 7, 6, 5, 6, 7]])
assert_array_equal(even, expected)
even = even_ext(a, 1, axis=0)
expected = np.array([[9, 8, 7, 6, 5],
[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[1, 2, 3, 4, 5]])
assert_array_equal(even, expected)
assert_raises(ValueError, even_ext, a, 2, axis=0)
assert_raises(ValueError, even_ext, a, 5, axis=1)
def test_const_ext(self):
a = np.array([[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5]])
const = const_ext(a, 2, axis=1)
expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5],
[9, 9, 9, 8, 7, 6, 5, 5, 5]])
assert_array_equal(const, expected)
const = const_ext(a, 1, axis=0)
expected = np.array([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[9, 8, 7, 6, 5]])
assert_array_equal(const, expected)
def test_zero_ext(self):
a = np.array([[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5]])
zero = zero_ext(a, 2, axis=1)
expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0],
[0, 0, 9, 8, 7, 6, 5, 0, 0]])
assert_array_equal(zero, expected)
zero = zero_ext(a, 1, axis=0)
expected = np.array([[0, 0, 0, 0, 0],
[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[0, 0, 0, 0, 0]])
assert_array_equal(zero, expected)
| 3,706 | 31.517544 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_dltisys.py
|
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# April 4, 2011
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_equal,
assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_, assert_almost_equal)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti,
StateSpace, TransferFunction, ZerosPolesGain,
dfreqresp, dbode, BadCoefficients)
class TestDLTI(object):
def test_dlsim(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Create an input matrix with inputs down the columns (3 cols) and its
# respective time input vector
u = np.hstack((np.asmatrix(np.linspace(0, 4.0, num=5)).transpose(),
0.01 * np.ones((5, 1)),
-0.002 * np.ones((5, 1))))
t_in = np.linspace(0, 2.0, num=5)
# Define the known result
yout_truth = np.asmatrix([-0.001,
-0.00073,
0.039446,
0.0915387,
0.13195948]).transpose()
xout_truth = np.asarray([[0, 0],
[0.0012, 0.0005],
[0.40233, 0.00071],
[1.163368, -0.079327],
[2.2402985, -0.3035679]])
tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in)
assert_array_almost_equal(yout_truth, yout)
assert_array_almost_equal(xout_truth, xout)
assert_array_almost_equal(t_in, tout)
# Make sure input with single-dimension doesn't raise error
dlsim((1, 2, 3), 4)
# Interpolated control - inputs should have different time steps
# than the discrete model uses internally
u_sparse = u[[0, 4], :]
t_sparse = np.asarray([0.0, 2.0])
tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse)
assert_array_almost_equal(yout_truth, yout)
assert_array_almost_equal(xout_truth, xout)
assert_equal(len(tout), yout.shape[0])
# Transfer functions (assume dt = 0.5)
num = np.asarray([1.0, -0.1])
den = np.asarray([0.3, 1.0, 0.2])
yout_truth = np.asmatrix([0.0,
0.0,
3.33333333333333,
-4.77777777777778,
23.0370370370370]).transpose()
# Assume use of the first column of the control input built earlier
tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# Retest the same with a 1-D input vector
uflat = np.asarray(u[:, 0])
uflat = uflat.reshape((5,))
tout, yout = dlsim((num, den, 0.5), uflat, t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# zeros-poles-gain representation
zd = np.array([0.5, -0.5])
pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
k = 1.0
yout_truth = np.asmatrix([0.0, 1.0, 2.0, 2.25, 2.5]).transpose()
tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dlsim, system, u)
def test_dstep(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Because b.shape[1] == 3, dstep should result in a tuple of three
# result vectors
yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956,
-0.036324, -0.093318, -0.15782348,
-0.226628324, -0.2969374948]),
np.asarray([-0.1, -0.075, -0.058, -0.04815,
-0.04453, -0.0461895, -0.0521812,
-0.061588875, -0.073549579,
-0.08727047595]),
np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239,
0.009081, 0.0233295, 0.03945587,
0.056657081, 0.0742343737]))
tout, yout = dstep((a, b, c, d, dt), n=10)
assert_equal(len(yout), 3)
for i in range(0, len(yout)):
assert_equal(yout[i].shape[0], 10)
assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i])
# Check that the other two inputs (tf, zpk) will work as well
tfin = ([1.0], [1.0, 1.0], 0.5)
yout_tfstep = np.asarray([0.0, 1.0, 0.0])
tout, yout = dstep(tfin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
tout, yout = dstep(zpkin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dstep, system)
def test_dimpulse(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Because b.shape[1] == 3, dimpulse should result in a tuple of three
# result vectors
yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084,
-0.045884, -0.056994, -0.06450548,
-0.068804844, -0.0703091708]),
np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362,
-0.0016595, -0.0059917, -0.009407675,
-0.011960704, -0.01372089695]),
np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771,
0.011471, 0.0142485, 0.01612637,
0.017201211, 0.0175772927]))
tout, yout = dimpulse((a, b, c, d, dt), n=10)
assert_equal(len(yout), 3)
for i in range(0, len(yout)):
assert_equal(yout[i].shape[0], 10)
assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i])
# Check that the other two inputs (tf, zpk) will work as well
tfin = ([1.0], [1.0, 1.0], 0.5)
yout_tfimpulse = np.asarray([0.0, 1.0, -1.0])
tout, yout = dimpulse(tfin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
tout, yout = dimpulse(zpkin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dimpulse, system)
def test_dlsim_trivial(self):
a = np.array([[0.0]])
b = np.array([[0.0]])
c = np.array([[0.0]])
d = np.array([[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u)
assert_array_equal(tout, np.arange(float(n)))
assert_array_equal(yout, np.zeros((n, 1)))
assert_array_equal(xout, np.zeros((n, 1)))
def test_dlsim_simple1d(self):
a = np.array([[0.5]])
b = np.array([[0.0]])
c = np.array([[1.0]])
d = np.array([[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
assert_array_equal(tout, np.arange(float(n)))
expected = (0.5 ** np.arange(float(n))).reshape(-1, 1)
assert_array_equal(yout, expected)
assert_array_equal(xout, expected)
def test_dlsim_simple2d(self):
lambda1 = 0.5
lambda2 = 0.25
a = np.array([[lambda1, 0.0],
[0.0, lambda2]])
b = np.array([[0.0],
[0.0]])
c = np.array([[1.0, 0.0],
[0.0, 1.0]])
d = np.array([[0.0],
[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
assert_array_equal(tout, np.arange(float(n)))
# The analytical solution:
expected = (np.array([lambda1, lambda2]) **
np.arange(float(n)).reshape(-1, 1))
assert_array_equal(yout, expected)
assert_array_equal(xout, expected)
def test_more_step_and_impulse(self):
lambda1 = 0.5
lambda2 = 0.75
a = np.array([[lambda1, 0.0],
[0.0, lambda2]])
b = np.array([[1.0, 0.0],
[0.0, 1.0]])
c = np.array([[1.0, 1.0]])
d = np.array([[0.0, 0.0]])
n = 10
# Check a step response.
ts, ys = dstep((a, b, c, d, 1), n=n)
# Create the exact step response.
stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n))
stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n))
assert_allclose(ys[0][:, 0], stp0)
assert_allclose(ys[1][:, 0], stp1)
# Check an impulse response with an initial condition.
x0 = np.array([1.0, 1.0])
ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0)
# Create the exact impulse response.
imp = (np.array([lambda1, lambda2]) **
np.arange(-1, n + 1).reshape(-1, 1))
imp[0, :] = 0.0
# Analytical solution to impulse response
y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0)
y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0)
assert_allclose(yi[0][:, 0], y0)
assert_allclose(yi[1][:, 0], y1)
# Check that dt=0.1, n=3 gives 3 time values.
system = ([1.0], [1.0, -0.5], 0.1)
t, (y,) = dstep(system, n=3)
assert_allclose(t, [0, 0.1, 0.2])
assert_array_equal(y.T, [[0, 1.0, 1.5]])
t, (y,) = dimpulse(system, n=3)
assert_allclose(t, [0, 0.1, 0.2])
assert_array_equal(y.T, [[0, 1, 0.5]])
class TestDlti(object):
def test_dlti_instantiation(self):
# Test that lti can be instantiated.
dt = 0.05
# TransferFunction
s = dlti([1], [-1], dt=dt)
assert_(isinstance(s, TransferFunction))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# ZerosPolesGain
s = dlti(np.array([]), np.array([-1]), 1, dt=dt)
assert_(isinstance(s, ZerosPolesGain))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# StateSpace
s = dlti([1], [-1], 1, 3, dt=dt)
assert_(isinstance(s, StateSpace))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# Number of inputs
assert_raises(ValueError, dlti, 1)
assert_raises(ValueError, dlti, 1, 1, 1, 1, 1)
class TestStateSpaceDisc(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
s = StateSpace(1, 1, 1, 1, dt=dt)
s = StateSpace([1], [2], [3], [4], dt=dt)
s = StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
np.array([[1, 0]]), np.array([[0]]), dt=dt)
s = StateSpace(1, 1, 1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = StateSpace(1, 2, 3, 4, dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(StateSpace(s) is not s)
assert_(s.to_ss() is not s)
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_tf() and to_zpk()
# Getters
s = StateSpace(1, 1, 1, 1, dt=0.05)
assert_equal(s.poles, [1])
assert_equal(s.zeros, [0])
class TestTransferFunction(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
s = TransferFunction(1, 1, dt=dt)
s = TransferFunction([1], [2], dt=dt)
s = TransferFunction(np.array([1]), np.array([2]), dt=dt)
s = TransferFunction(1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = TransferFunction([1, 0], [1, -1], dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(TransferFunction(s) is not s)
assert_(s.to_tf() is not s)
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_ss() and to_zpk()
# Getters
s = TransferFunction([1, 0], [1, -1], dt=0.05)
assert_equal(s.poles, [1])
assert_equal(s.zeros, [0])
class TestZerosPolesGain(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
s = ZerosPolesGain(1, 1, 1, dt=dt)
s = ZerosPolesGain([1], [2], 1, dt=dt)
s = ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt)
s = ZerosPolesGain(1, 1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = ZerosPolesGain(1, 2, 3, dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(ZerosPolesGain(s) is not s)
assert_(s.to_zpk() is not s)
class Test_dfreqresp(object):
def test_manual(self):
# Test dfreqresp() real part calculation (manual sanity check).
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
system = TransferFunction(1, [1, -0.2], dt=0.1)
w = [0.1, 1, 10]
w, H = dfreqresp(system, w=w)
# test real
expected_re = [1.2383, 0.4130, -0.7553]
assert_almost_equal(H.real, expected_re, decimal=4)
# test imag
expected_im = [-0.1555, -1.0214, 0.3955]
assert_almost_equal(H.imag, expected_im, decimal=4)
def test_auto(self):
# Test dfreqresp() real part calculation.
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
system = TransferFunction(1, [1, -0.2], dt=0.1)
w = [0.1, 1, 10, 100]
w, H = dfreqresp(system, w=w)
jw = np.exp(w * 1j)
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
# test real
expected_re = y.real
assert_almost_equal(H.real, expected_re)
# test imag
expected_im = y.imag
assert_almost_equal(H.imag, expected_im)
def test_freq_range(self):
# Test that freqresp() finds a reasonable frequency range.
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
# Expected range is from 0.01 to 10.
system = TransferFunction(1, [1, -0.2], dt=0.1)
n = 10
expected_w = np.linspace(0, np.pi, 10, endpoint=False)
w, H = dfreqresp(system, n=n)
assert_almost_equal(w, expected_w)
def test_pole_one(self):
# Test that freqresp() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = TransferFunction([1], [1, -1], dt=0.1)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, message="divide by zero")
sup.filter(RuntimeWarning, message="invalid value encountered")
w, H = dfreqresp(system, n=2)
assert_equal(w[0], 0.) # a fail would give not-a-number
def test_error(self):
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dfreqresp, system)
def test_from_state_space(self):
# H(z) = 2 / z^3 - 0.5 * z^2
system_TF = dlti([2], [1, -0.5, 0, 0])
A = np.array([[0.5, 0, 0],
[1, 0, 0],
[0, 1, 0]])
B = np.array([[1, 0, 0]]).T
C = np.array([[0, 0, 2]])
D = 0
system_SS = dlti(A, B, C, D)
w = 10.0**np.arange(-3,0,.5)
with suppress_warnings() as sup:
sup.filter(BadCoefficients)
w1, H1 = dfreqresp(system_TF, w=w)
w2, H2 = dfreqresp(system_SS, w=w)
assert_almost_equal(H1, H2)
def test_from_zpk(self):
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
system_ZPK = dlti([],[0.2],0.3)
system_TF = dlti(0.3, [1, -0.2])
w = [0.1, 1, 10, 100]
w1, H1 = dfreqresp(system_ZPK, w=w)
w2, H2 = dfreqresp(system_TF, w=w)
assert_almost_equal(H1, H2)
class Test_bode(object):
def test_manual(self):
# Test bode() magnitude calculation (manual sanity check).
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
dt = 0.1
system = TransferFunction(0.3, [1, -0.2], dt=dt)
w = [0.1, 0.5, 1, np.pi]
w2, mag, phase = dbode(system, w=w)
# Test mag
expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412]
assert_almost_equal(mag, expected_mag, decimal=4)
# Test phase
expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000]
assert_almost_equal(phase, expected_phase, decimal=4)
# Test frequency
assert_equal(np.array(w) / dt, w2)
def test_auto(self):
# Test bode() magnitude calculation.
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
w = np.array([0.1, 0.5, 1, np.pi])
w2, mag, phase = dbode(system, w=w)
jw = np.exp(w * 1j)
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
# Test mag
expected_mag = 20.0 * np.log10(abs(y))
assert_almost_equal(mag, expected_mag)
# Test phase
expected_phase = np.rad2deg(np.angle(y))
assert_almost_equal(phase, expected_phase)
def test_range(self):
# Test that bode() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
dt = 0.1
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
n = 10
# Expected range is from 0.01 to 10.
expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt
w, mag, phase = dbode(system, n=n)
assert_almost_equal(w, expected_w)
def test_pole_one(self):
# Test that freqresp() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = TransferFunction([1], [1, -1], dt=0.1)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, message="divide by zero")
sup.filter(RuntimeWarning, message="invalid value encountered")
w, mag, phase = dbode(system, n=2)
assert_equal(w[0], 0.) # a fail would give not-a-number
def test_imaginary(self):
# bode() should not fail on a system with pure imaginary poles.
# The test passes if bode doesn't raise an exception.
system = TransferFunction([1], [1, 0, 100], dt=0.1)
dbode(system, n=2)
def test_error(self):
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dbode, system)
class TestTransferFunctionZConversion(object):
"""Test private conversions between 'z' and 'z**-1' polynomials."""
def test_full(self):
# Numerator and denominator same order
num = [2, 3, 4]
den = [5, 6, 7]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal(num, num2)
assert_equal(den, den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal(num, num2)
assert_equal(den, den2)
def test_numerator(self):
# Numerator lower order than denominator
num = [2, 3]
den = [5, 6, 7]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal([0, 2, 3], num2)
assert_equal(den, den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal([2, 3, 0], num2)
assert_equal(den, den2)
def test_denominator(self):
# Numerator higher order than denominator
num = [2, 3, 4]
den = [5, 6]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal(num, num2)
assert_equal([0, 5, 6], den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal(num, num2)
assert_equal([5, 6, 0], den2)
| 21,810 | 35.291181 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_signaltools.py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import sys
from decimal import Decimal
from itertools import product
import warnings
import pytest
from pytest import raises as assert_raises
from numpy.testing import (
assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns, assert_array_less)
from scipy._lib._numpy_compat import suppress_warnings
from numpy import array, arange
import numpy as np
from scipy.ndimage.filters import correlate1d
from scipy.optimize import fmin
from scipy import signal
from scipy.signal import (
correlate, convolve, convolve2d, fftconvolve, choose_conv_method,
hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos,
invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt,
sosfilt_zi, tf2zpk, BadCoefficients)
from scipy.signal.windows import hann
from scipy.signal.signaltools import _filtfilt_gust
if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
from math import gcd
else:
from fractions import gcd
class _TestConvolve(object):
def test_basic(self):
a = [3, 4, 5, 6, 5, 4]
b = [1, 2, 3]
c = convolve(a, b)
assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12]))
def test_same(self):
a = [3, 4, 5]
b = [1, 2, 3, 4]
c = convolve(a, b, mode="same")
assert_array_equal(c, array([10, 22, 34]))
def test_same_eq(self):
a = [3, 4, 5]
b = [1, 2, 3]
c = convolve(a, b, mode="same")
assert_array_equal(c, array([10, 22, 22]))
def test_complex(self):
x = array([1 + 1j, 2 + 1j, 3 + 1j])
y = array([1 + 1j, 2 + 1j])
z = convolve(x, y)
assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j]))
def test_zero_rank(self):
a = 1289
b = 4567
c = convolve(a, b)
assert_equal(c, a * b)
def test_single_element(self):
a = array([4967])
b = array([3920])
c = convolve(a, b)
assert_equal(c, a * b)
def test_2d_arrays(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve(a, b)
d = array([[2, 7, 16, 17, 12],
[10, 30, 62, 58, 38],
[12, 31, 58, 49, 30]])
assert_array_equal(c, d)
def test_input_swapping(self):
small = arange(8).reshape(2, 2, 2)
big = 1j * arange(27).reshape(3, 3, 3)
big += arange(27)[::-1].reshape(3, 3, 3)
out_array = array(
[[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j],
[52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j],
[46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j],
[40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]],
[[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j],
[282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j],
[246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j],
[142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]],
[[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j],
[174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j],
[138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j],
[70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]],
[[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j],
[68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j],
[38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j],
[12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]])
assert_array_equal(convolve(small, big, 'full'), out_array)
assert_array_equal(convolve(big, small, 'full'), out_array)
assert_array_equal(convolve(small, big, 'same'),
out_array[1:3, 1:3, 1:3])
assert_array_equal(convolve(big, small, 'same'),
out_array[0:3, 0:3, 0:3])
assert_array_equal(convolve(small, big, 'valid'),
out_array[1:3, 1:3, 1:3])
assert_array_equal(convolve(big, small, 'valid'),
out_array[1:3, 1:3, 1:3])
def test_invalid_params(self):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, convolve, a, b, mode='spam')
assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft')
assert_raises(ValueError, convolve, a, b, mode='ham', method='direct')
assert_raises(ValueError, convolve, a, b, mode='full', method='bacon')
assert_raises(ValueError, convolve, a, b, mode='same', method='bacon')
class TestConvolve(_TestConvolve):
def test_valid_mode2(self):
# See gh-5897
a = [1, 2, 3, 6, 5, 3]
b = [2, 3, 4, 5, 3, 4, 2, 2, 1]
expected = [70, 78, 73, 65]
out = convolve(a, b, 'valid')
assert_array_equal(out, expected)
out = convolve(b, a, 'valid')
assert_array_equal(out, expected)
a = [1 + 5j, 2 - 1j, 3 + 0j]
b = [2 - 3j, 1 + 0j]
expected = [2 - 3j, 8 - 10j]
out = convolve(a, b, 'valid')
assert_array_equal(out, expected)
out = convolve(b, a, 'valid')
assert_array_equal(out, expected)
def test_same_mode(self):
a = [1, 2, 3, 3, 1, 2]
b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3]
c = convolve(a, b, 'same')
d = array([57, 61, 63, 57, 45, 36])
assert_array_equal(c, d)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'})
def test_convolve_method(self, n=100):
types = sum([t for _, t in np.sctypes.items()], [])
types = {np.dtype(t).name for t in types}
# These types include 'bool' and all precisions (int8, float32, etc)
# The removed types throw errors in correlate or fftconvolve
for dtype in ['complex256', 'complex192', 'float128', 'float96',
'str', 'void', 'bytes', 'object', 'unicode', 'string']:
if dtype in types:
types.remove(dtype)
args = [(t1, t2, mode) for t1 in types for t2 in types
for mode in ['valid', 'full', 'same']]
# These are random arrays, which means test is much stronger than
# convolving testing by convolving two np.ones arrays
np.random.seed(42)
array_types = {'i': np.random.choice([0, 1], size=n),
'f': np.random.randn(n)}
array_types['b'] = array_types['u'] = array_types['i']
array_types['c'] = array_types['f'] + 0.5j*array_types['f']
for t1, t2, mode in args:
x1 = array_types[np.dtype(t1).kind].astype(t1)
x2 = array_types[np.dtype(t2).kind].astype(t2)
results = {key: convolve(x1, x2, method=key, mode=mode)
for key in ['fft', 'direct']}
assert_equal(results['fft'].dtype, results['direct'].dtype)
if 'bool' in t1 and 'bool' in t2:
assert_equal(choose_conv_method(x1, x2), 'direct')
continue
# Found by experiment. Found approx smallest value for (rtol, atol)
# threshold to have tests pass.
if any([t in {'complex64', 'float32'} for t in [t1, t2]]):
kwargs = {'rtol': 1.0e-4, 'atol': 1e-6}
elif 'float16' in [t1, t2]:
# atol is default for np.allclose
kwargs = {'rtol': 1e-3, 'atol': 1e-8}
else:
# defaults for np.allclose (different from assert_allclose)
kwargs = {'rtol': 1e-5, 'atol': 1e-8}
assert_allclose(results['fft'], results['direct'], **kwargs)
def test_convolve_method_large_input(self):
# This is really a test that convolving two large integers goes to the
# direct method even if they're in the fft method.
for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]:
z = np.array([2**n], dtype=np.int64)
fft = convolve(z, z, method='fft')
direct = convolve(z, z, method='direct')
# this is the case when integer precision gets to us
# issue #6076 has more detail, hopefully more tests after resolved
if n < 50:
assert_equal(fft, direct)
assert_equal(fft, 2**(2*n))
assert_equal(direct, 2**(2*n))
def test_mismatched_dims(self):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, convolve, [1], 2, method='direct')
assert_raises(ValueError, convolve, 1, [2], method='direct')
assert_raises(ValueError, convolve, [1], 2, method='fft')
assert_raises(ValueError, convolve, 1, [2], method='fft')
assert_raises(ValueError, convolve, [1], [[2]])
assert_raises(ValueError, convolve, [3], 2)
class _TestConvolve2d(object):
def test_2d_arrays(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
d = array([[2, 7, 16, 17, 12],
[10, 30, 62, 58, 38],
[12, 31, 58, 49, 30]])
e = convolve2d(a, b)
assert_array_equal(e, d)
def test_valid_mode(self):
e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
f = [[1, 2, 3], [3, 4, 5]]
h = array([[62, 80, 98, 116, 134]])
g = convolve2d(e, f, 'valid')
assert_array_equal(g, h)
# See gh-5897
g = convolve2d(f, e, 'valid')
assert_array_equal(g, h)
def test_valid_mode_complx(self):
e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j
h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]])
g = convolve2d(e, f, 'valid')
assert_array_almost_equal(g, h)
# See gh-5897
g = convolve2d(f, e, 'valid')
assert_array_equal(g, h)
def test_fillvalue(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
fillval = 1
c = convolve2d(a, b, 'full', 'fill', fillval)
d = array([[24, 26, 31, 34, 32],
[28, 40, 62, 64, 52],
[32, 46, 67, 62, 48]])
assert_array_equal(c, d)
def test_fillvalue_deprecations(self):
# Deprecated 2017-07, scipy version 1.0.0
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, "Casting complex values to real")
r = sup.record(DeprecationWarning, "could not cast `fillvalue`")
convolve2d([[1]], [[1, 2]], fillvalue=1j)
assert_(len(r) == 1)
warnings.filterwarnings(
"error", message="could not cast `fillvalue`",
category=DeprecationWarning)
assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=1j)
with suppress_warnings():
warnings.filterwarnings(
"always", message="`fillvalue` must be scalar or an array ",
category=DeprecationWarning)
assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=[1, 2])
warnings.filterwarnings(
"error", message="`fillvalue` must be scalar or an array ",
category=DeprecationWarning)
assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=[1, 2])
def test_fillvalue_empty(self):
# Check that fillvalue being empty raises an error:
assert_raises(ValueError, convolve2d, [[1]], [[1, 2]],
fillvalue=[])
def test_wrap_boundary(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve2d(a, b, 'full', 'wrap')
d = array([[80, 80, 74, 80, 80],
[68, 68, 62, 68, 68],
[80, 80, 74, 80, 80]])
assert_array_equal(c, d)
def test_sym_boundary(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve2d(a, b, 'full', 'symm')
d = array([[34, 30, 44, 62, 66],
[52, 48, 62, 80, 84],
[82, 78, 92, 110, 114]])
assert_array_equal(c, d)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'})
class TestConvolve2d(_TestConvolve2d):
def test_same_mode(self):
e = [[1, 2, 3], [3, 4, 5]]
f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
g = convolve2d(e, f, 'same')
h = array([[22, 28, 34],
[80, 98, 116]])
assert_array_equal(g, h)
def test_valid_mode2(self):
# See gh-5897
e = [[1, 2, 3], [3, 4, 5]]
f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
expected = [[62, 80, 98, 116, 134]]
out = convolve2d(e, f, 'valid')
assert_array_equal(out, expected)
out = convolve2d(f, e, 'valid')
assert_array_equal(out, expected)
e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]]
f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]]
expected = [[27 - 1j, 46. + 2j]]
out = convolve2d(e, f, 'valid')
assert_array_equal(out, expected)
# See gh-5897
out = convolve2d(f, e, 'valid')
assert_array_equal(out, expected)
def test_consistency_convolve_funcs(self):
# Compare np.convolve, signal.convolve, signal.convolve2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.convolve(a, b, mode=mode),
signal.convolve(a, b, mode=mode))
assert_almost_equal(np.squeeze(
signal.convolve2d([a], [b], mode=mode)),
signal.convolve(a, b, mode=mode))
def test_invalid_dims(self):
assert_raises(ValueError, convolve2d, 3, 4)
assert_raises(ValueError, convolve2d, [3], [4])
assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]])
class TestFFTConvolve(object):
def test_real(self):
x = array([1, 2, 3])
assert_array_almost_equal(signal.fftconvolve(x, x), [1, 4, 10, 12, 9.])
def test_complex(self):
x = array([1 + 1j, 2 + 2j, 3 + 3j])
assert_array_almost_equal(signal.fftconvolve(x, x),
[0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
def test_2d_real_same(self):
a = array([[1, 2, 3], [4, 5, 6]])
assert_array_almost_equal(signal.fftconvolve(a, a),
array([[1, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]]))
def test_2d_complex_same(self):
a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]])
c = fftconvolve(a, a)
d = array([[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]])
assert_array_almost_equal(c, d)
def test_real_same_mode(self):
a = array([1, 2, 3])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
c = fftconvolve(a, b, 'same')
d = array([35., 41., 47.])
assert_array_almost_equal(c, d)
def test_real_same_mode2(self):
a = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
b = array([1, 2, 3])
c = fftconvolve(a, b, 'same')
d = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
assert_array_almost_equal(c, d)
def test_valid_mode(self):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
out = fftconvolve(a, b, 'valid')
assert_array_almost_equal(out, expected)
out = fftconvolve(b, a, 'valid')
assert_array_almost_equal(out, expected)
a = array([3 - 1j, 2 + 7j, 1 + 0j])
b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
out = fftconvolve(a, b, 'valid')
assert_array_almost_equal(out, expected)
out = fftconvolve(b, a, 'valid')
assert_array_almost_equal(out, expected)
def test_real_valid_mode(self):
a = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
b = array([3, 2, 1])
d = array([24., 31., 41., 43., 49., 25., 12.])
c = fftconvolve(a, b, 'valid')
assert_array_almost_equal(c, d)
# See gh-5897
c = fftconvolve(b, a, 'valid')
assert_array_almost_equal(c, d)
def test_empty(self):
# Regression test for #1745: crashes with 0-length input.
assert_(fftconvolve([], []).size == 0)
assert_(fftconvolve([5, 6], []).size == 0)
assert_(fftconvolve([], [7]).size == 0)
def test_zero_rank(self):
a = array(4967)
b = array(3920)
c = fftconvolve(a, b)
assert_equal(c, a * b)
def test_single_element(self):
a = array([4967])
b = array([3920])
c = fftconvolve(a, b)
assert_equal(c, a * b)
def test_random_data(self):
np.random.seed(1234)
a = np.random.rand(1233) + 1j * np.random.rand(1233)
b = np.random.rand(1321) + 1j * np.random.rand(1321)
c = fftconvolve(a, b, 'full')
d = np.convolve(a, b, 'full')
assert_(np.allclose(c, d, rtol=1e-10))
@pytest.mark.slow
def test_many_sizes(self):
np.random.seed(1234)
def ns():
for j in range(1, 100):
yield j
for j in range(1000, 1500):
yield j
for k in range(50):
yield np.random.randint(1001, 10000)
for n in ns():
msg = 'n=%d' % (n,)
a = np.random.rand(n) + 1j * np.random.rand(n)
b = np.random.rand(n) + 1j * np.random.rand(n)
c = fftconvolve(a, b, 'full')
d = np.convolve(a, b, 'full')
assert_allclose(c, d, atol=1e-10, err_msg=msg)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, fftconvolve, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, fftconvolve, *(b, a), **{'mode': 'valid'})
def test_mismatched_dims(self):
assert_raises(ValueError, fftconvolve, [1], 2)
assert_raises(ValueError, fftconvolve, 1, [2])
assert_raises(ValueError, fftconvolve, [1], [[2]])
assert_raises(ValueError, fftconvolve, [3], 2)
def test_invalid_flags(self):
assert_raises(ValueError, fftconvolve, [1], [2], mode='chips')
class TestMedFilt(object):
def test_basic(self):
f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
[50, 50, 50, 50, 50, 0, 72, 77, 68, 66],
[50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
[50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
[50, 50, 50, 50, 50, 46, 34, 9, 21, 66],
[70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
[64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
[3, 33, 53, 67, 1, 78, 74, 55, 12, 83],
[7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
[32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]
d = signal.medfilt(f, [7, 3])
e = signal.medfilt2d(np.array(f, float), [7, 3])
assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0],
[0, 50, 50, 50, 50, 42, 19, 21, 29, 0],
[50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
[50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
[50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
[33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
[32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
[7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
[0, 32, 33, 39, 32, 32, 43, 43, 43, 0],
[0, 7, 11, 7, 4, 4, 19, 19, 24, 0]])
assert_array_equal(d, e)
def test_none(self):
# Ticket #1124. Ensure this does not segfault.
try:
signal.medfilt(None)
except:
pass
# Expand on this test to avoid a regression with possible contiguous
# numpy arrays that have odd strides. The stride value below gets
# us into wrong memory if used (but it does not need to be used)
dummy = np.arange(10, dtype=np.float64)
a = dummy[5:6]
a.strides = 16
assert_(signal.medfilt(a, 1) == 5.)
def test_refcounting(self):
# Check a refcounting-related crash
a = Decimal(123)
x = np.array([a, a], dtype=object)
if hasattr(sys, 'getrefcount'):
n = 2 * sys.getrefcount(a)
else:
n = 10
# Shouldn't segfault:
for j in range(n):
signal.medfilt(x)
if hasattr(sys, 'getrefcount'):
assert_(sys.getrefcount(a) < n)
assert_equal(x, [a, a])
class TestWiener(object):
def test_basic(self):
g = array([[5, 6, 4, 3],
[3, 5, 6, 2],
[2, 3, 5, 6],
[1, 6, 9, 7]], 'd')
h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667],
[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],
[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],
[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
assert_array_almost_equal(signal.wiener(g), h, decimal=6)
assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6)
class TestResample(object):
def test_basic(self):
# Some basic tests
# Regression test for issue #3603.
# window.shape must equal to sig.shape[0]
sig = np.arange(128)
num = 256
win = signal.get_window(('kaiser', 8.0), 160)
assert_raises(ValueError, signal.resample, sig, num, window=win)
# Other degenerate conditions
assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1)
assert_raises(ValueError, signal.resample_poly, sig, 1, 0)
# test for issue #6505 - should not modify window.shape when axis ≠ 0
sig2 = np.tile(np.arange(160), (2,1))
signal.resample(sig2, num, axis=-1, window=win)
assert_(win.shape == (160,))
def test_fft(self):
# Test FFT-based resampling
self._test_data(method='fft')
def test_polyphase(self):
# Test polyphase resampling
self._test_data(method='polyphase')
def test_polyphase_extfilter(self):
# Test external specification of downsampling filter
self._test_data(method='polyphase', ext=True)
def test_mutable_window(self):
# Test that a mutable window is not modified
impulse = np.zeros(3)
window = np.random.RandomState(0).randn(2)
window_orig = window.copy()
signal.resample_poly(impulse, 5, 1, window=window)
assert_array_equal(window, window_orig)
def _test_data(self, method, ext=False):
# Test resampling of sinusoids and random noise (1-sec)
rate = 100
rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201]
# Sinusoids, windowed to avoid edge artifacts
t = np.arange(rate) / float(rate)
freqs = np.array((1., 10., 40.))[:, np.newaxis]
x = np.sin(2 * np.pi * freqs * t) * hann(rate)
for rate_to in rates_to:
t_to = np.arange(rate_to) / float(rate_to)
y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to)
if method == 'fft':
y_resamps = signal.resample(x, rate_to, axis=-1)
else:
if ext and rate_to != rate:
# Match default window design
g = gcd(rate_to, rate)
up = rate_to // g
down = rate // g
max_rate = max(up, down)
f_c = 1. / max_rate
half_len = 10 * max_rate
window = signal.firwin(2 * half_len + 1, f_c,
window=('kaiser', 5.0))
polyargs = {'window': window}
else:
polyargs = {}
y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1,
**polyargs)
for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs):
if freq >= 0.5 * rate_to:
y_to.fill(0.) # mostly low-passed away
assert_allclose(y_resamp, y_to, atol=1e-3)
else:
assert_array_equal(y_to.shape, y_resamp.shape)
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert_(corr > 0.99, msg=(corr, rate, rate_to))
# Random data
rng = np.random.RandomState(0)
x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind
for rate_to in rates_to:
# random data
t_to = np.arange(rate_to) / float(rate_to)
y_to = np.interp(t_to, t, x)
if method == 'fft':
y_resamp = signal.resample(x, rate_to)
else:
y_resamp = signal.resample_poly(x, rate_to, rate)
assert_array_equal(y_to.shape, y_resamp.shape)
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert_(corr > 0.99, msg=corr)
# More tests of fft method (Master 0.18.1 fails these)
if method == 'fft':
x1 = np.array([1.+0.j,0.+0.j])
y1_test = signal.resample(x1,4)
y1_true = np.array([1.+0.j,0.5+0.j,0.+0.j,0.5+0.j]) # upsampling a complex array
assert_allclose(y1_test, y1_true, atol=1e-12)
x2 = np.array([1.,0.5,0.,0.5])
y2_test = signal.resample(x2,2) # downsampling a real array
y2_true = np.array([1.,0.])
assert_allclose(y2_test, y2_true, atol=1e-12)
def test_poly_vs_filtfilt(self):
# Check that up=1.0 gives same answer as filtfilt + slicing
random_state = np.random.RandomState(17)
try_types = (int, np.float32, np.complex64, float, complex)
size = 10000
down_factors = [2, 11, 79]
for dtype in try_types:
x = random_state.randn(size).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
# resample_poly assumes zeros outside of signl, whereas filtfilt
# can only constant-pad. Make them equivalent:
x[0] = 0
x[-1] = 0
for down in down_factors:
h = signal.firwin(31, 1. / down, window='hamming')
yf = filtfilt(h, 1.0, x, padtype='constant')[::down]
# Need to pass convolved version of filter to resample_poly,
# since filtfilt does forward and backward, but resample_poly
# only goes forward
hc = convolve(h, h[::-1])
y = signal.resample_poly(x, 1, down, window=hc)
assert_allclose(yf, y, atol=1e-7, rtol=1e-7)
def test_correlate1d(self):
for down in [2, 4]:
for nx in range(1, 40, down):
for nweights in (32, 33):
x = np.random.random((nx,))
weights = np.random.random((nweights,))
y_g = correlate1d(x, weights[::-1], mode='constant')
y_s = signal.resample_poly(x, up=1, down=down, window=weights)
assert_allclose(y_g[::down], y_s)
class TestCSpline1DEval(object):
def test_basic(self):
y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0])
x = arange(len(y))
dx = x[1] - x[0]
cj = signal.cspline1d(y)
x2 = arange(len(y) * 10.0) / 10.0
y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0])
# make sure interpolated values are on knot points
assert_array_almost_equal(y2[::10], y, decimal=5)
def test_complex(self):
# create some smoothly varying complex signal to interpolate
x = np.arange(2)
y = np.zeros(x.shape, dtype=np.complex64)
T = 10.0
f = 1.0 / T
y = np.exp(2.0J * np.pi * f * x)
# get the cspline transform
cy = signal.cspline1d(y)
# determine new test x value and interpolate
xnew = np.array([0.5])
ynew = signal.cspline1d_eval(cy, xnew)
assert_equal(ynew.dtype, y.dtype)
class TestOrderFilt(object):
def test_basic(self):
assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1),
[2, 3, 2])
class _TestLinearFilter(object):
def generate(self, shape):
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
return self.convert_dtype(x)
def convert_dtype(self, arr):
if self.dtype == np.dtype('O'):
arr = np.asarray(arr)
out = np.empty(arr.shape, self.dtype)
iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'],
[['readonly'],['writeonly']])
for x, y in iter:
y[...] = self.type(x[()])
return out
else:
return np.array(arr, self.dtype, copy=False)
def test_rank_1_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, -0.5])
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_IIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([0.5, -0.5])
zi = self.convert_dtype([1, 2])
y_r = self.convert_dtype([1, 5, 9, 13, 17, 21])
zf_r = self.convert_dtype([13, -10])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_1_FIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1, 1])
a = self.convert_dtype([1])
zi = self.convert_dtype([1, 1])
y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.])
zf_r = self.convert_dtype([9, 5])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_0(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4],
[6, 4, 2]])
y = lfilter(b, a, x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
def test_rank_2_IIR_axis_1(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]])
y = lfilter(b, a, x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank_2_IIR_axis_0_init_cond(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((4,1)))
y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]])
zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis]
y, zf = lfilter(b, a, x, axis=1, zi=zi)
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_1_init_cond(self):
x = self.generate((4,3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((1,3)))
y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1],
[1, 3, 5], [5, 3, 1]])
zf_r = self.convert_dtype([[-23, -23, -23]])
y, zf = lfilter(b, a, x, axis=0, zi=zi)
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_IIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_IIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 1
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_FIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_FIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 2
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1, 1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_zi_pseudobroadcast(self):
x = self.generate((4, 5, 20))
b,a = signal.butter(8, 0.2, output='ba')
b = self.convert_dtype(b)
a = self.convert_dtype(a)
zi_size = b.shape[0] - 1
# lfilter requires x.ndim == zi.ndim exactly. However, zi can have
# length 1 dimensions.
zi_full = self.convert_dtype(np.ones((4, 5, zi_size)))
zi_sing = self.convert_dtype(np.ones((1, 1, zi_size)))
y_full, zf_full = lfilter(b, a, x, zi=zi_full)
y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing)
assert_array_almost_equal(y_sing, y_full)
assert_array_almost_equal(zf_full, zf_sing)
# lfilter does not prepend ones
assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size))
def test_scalar_a(self):
# a can be a scalar.
x = self.generate(6)
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 2, 2, 2, 2])
y = lfilter(b, a[0], x)
assert_array_almost_equal(y, y_r)
def test_zi_some_singleton_dims(self):
# lfilter doesn't really broadcast (no prepending of 1's). But does
# do singleton expansion if x and zi have the same ndim. This was
# broken only if a subset of the axes were singletons (gh-4681).
x = self.convert_dtype(np.zeros((3,2,5), 'l'))
b = self.convert_dtype(np.ones(5, 'l'))
a = self.convert_dtype(np.array([1,0,0]))
zi = np.ones((3,1,4), 'l')
zi[1,:,:] *= 2
zi[2,:,:] *= 3
zi = self.convert_dtype(zi)
zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l'))
y_expected = np.zeros((3,2,5), 'l')
y_expected[:,:,:4] = [[[1]], [[2]], [[3]]]
y_expected = self.convert_dtype(y_expected)
# IIR
y_iir, zf_iir = lfilter(b, a, x, -1, zi)
assert_array_almost_equal(y_iir, y_expected)
assert_array_almost_equal(zf_iir, zf_expected)
# FIR
y_fir, zf_fir = lfilter(b, a[0], x, -1, zi)
assert_array_almost_equal(y_fir, y_expected)
assert_array_almost_equal(zf_fir, zf_expected)
def base_bad_size_zi(self, b, a, x, axis, zi):
b = self.convert_dtype(b)
a = self.convert_dtype(a)
x = self.convert_dtype(x)
zi = self.convert_dtype(zi)
assert_raises(ValueError, lfilter, b, a, x, axis, zi)
def test_bad_size_zi(self):
# rank 1
x1 = np.arange(6)
self.base_bad_size_zi([1], [1], x1, -1, [1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1])
self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3])
# rank 2
x2 = np.arange(12).reshape((4,3))
# for axis=0 zi.shape should == (max(len(a),len(b))-1, 3)
self.base_bad_size_zi([1], [1], x2, 0, [0])
# for each of these there are 5 cases tested (in this order):
# 1. not deep enough, right # elements
# 2. too deep, right # elements
# 3. right depth, right # elements, transposed
# 4. right depth, too few elements
# 5. right depth, too many elements
self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
# for axis=1 zi.shape should == (4, max(len(a),len(b))-1)
self.base_bad_size_zi([1], [1], x2, 1, [0])
self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
def test_empty_zi(self):
# Regression test for #880: empty array for zi crashes.
x = self.generate((5,))
a = self.convert_dtype([1])
b = self.convert_dtype([1])
zi = self.convert_dtype([])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
assert_equal(zf.dtype, self.dtype)
assert_equal(zf.size, 0)
def test_lfiltic_bad_zi(self):
# Regression test for #3699: bad initial conditions
a = self.convert_dtype([1])
b = self.convert_dtype([1])
# "y" sets the datatype of zi, so it truncates if int
zi = lfiltic(b, a, [1., 0])
zi_1 = lfiltic(b, a, [1, 0])
zi_2 = lfiltic(b, a, [True, False])
assert_array_equal(zi, zi_1)
assert_array_equal(zi, zi_2)
def test_short_x_FIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([7, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_short_x_IIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1, 1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([-67, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_do_not_modify_a_b_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
b0 = b.copy()
a = self.convert_dtype([0.5, -0.5])
a0 = a.copy()
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
def test_do_not_modify_a_b_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, 1])
b0 = b.copy()
a = self.convert_dtype([2])
a0 = a.copy()
y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
class TestLinearFilterFloat32(_TestLinearFilter):
dtype = np.dtype('f')
class TestLinearFilterFloat64(_TestLinearFilter):
dtype = np.dtype('d')
class TestLinearFilterFloatExtended(_TestLinearFilter):
dtype = np.dtype('g')
class TestLinearFilterComplex64(_TestLinearFilter):
dtype = np.dtype('F')
class TestLinearFilterComplex128(_TestLinearFilter):
dtype = np.dtype('D')
class TestLinearFilterComplexExtended(_TestLinearFilter):
dtype = np.dtype('G')
class TestLinearFilterDecimal(_TestLinearFilter):
dtype = np.dtype('O')
def type(self, x):
return Decimal(str(x))
class TestLinearFilterObject(_TestLinearFilter):
dtype = np.dtype('O')
type = float
def test_lfilter_bad_object():
# lfilter: object arrays with non-numeric objects raise TypeError.
# Regression test for ticket #1452.
assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0])
assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0])
assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0])
def test_lfilter_notimplemented_input():
# Should not crash, gh-7991
assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5])
@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short,
np.uint, int, np.ulonglong, np.ulonglong,
np.float32, np.float64, np.longdouble,
Decimal])
class TestCorrelateReal(object):
def _setup_rank1(self, dt):
a = np.linspace(0, 3, 4).astype(dt)
b = np.linspace(1, 2, 2).astype(dt)
y_r = np.array([0, 2, 5, 8, 3]).astype(dt)
return a, b, y_r
def equal_tolerance(self, res_dt):
# default value of keyword
decimal = 6
try:
dt_info = np.finfo(res_dt)
if hasattr(dt_info, 'resolution'):
decimal = int(-0.5*np.log10(dt_info.resolution))
except Exception:
pass
return decimal
def test_method(self, dt):
if dt == Decimal:
method = choose_conv_method([Decimal(4)], [Decimal(3)])
assert_equal(method, 'direct')
else:
a, b, y_r = self._setup_rank3(dt)
y_fft = correlate(a, b, method='fft')
y_direct = correlate(a, b, method='direct')
assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance(y_fft.dtype))
assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_fft.dtype))
assert_equal(y_fft.dtype, dt)
assert_equal(y_direct.dtype, dt)
def test_rank1_valid(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r[1:4])
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[1:4][::-1])
assert_equal(y.dtype, dt)
def test_rank1_same(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r[:-1])
assert_equal(y.dtype, dt)
def test_rank1_full(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r)
assert_equal(y.dtype, dt)
def _setup_rank3(self, dt):
a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(
dt)
b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(
dt)
y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.],
[46., 432., 1062., 1840., 2672., 1698., 864., 266.],
[134., 736., 1662., 2768., 3920., 2418., 1168., 314.],
[260., 952., 1932., 3056., 4208., 2580., 1240., 332.],
[202., 664., 1290., 1984., 2688., 1590., 712., 150.],
[114., 344., 642., 960., 1280., 726., 296., 38.]],
[[23., 400., 1035., 1832., 2696., 1737., 904., 293.],
[134., 920., 2166., 3680., 5280., 3306., 1640., 474.],
[325., 1544., 3369., 5512., 7720., 4683., 2192., 535.],
[571., 1964., 3891., 6064., 8272., 4989., 2324., 565.],
[434., 1360., 2586., 3920., 5264., 3054., 1312., 230.],
[241., 700., 1281., 1888., 2496., 1383., 532., 39.]],
[[22., 214., 528., 916., 1332., 846., 430., 132.],
[86., 484., 1098., 1832., 2600., 1602., 772., 206.],
[188., 802., 1698., 2732., 3788., 2256., 1018., 218.],
[308., 1006., 1950., 2996., 4052., 2400., 1078., 230.],
[230., 692., 1290., 1928., 2568., 1458., 596., 78.],
[126., 354., 636., 924., 1212., 654., 234., 0.]]],
dtype=dt)
return a, b, y_r
def test_rank3_valid(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5])
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1])
assert_equal(y.dtype, dt)
def test_rank3_same(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b, "same")
assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2])
assert_equal(y.dtype, dt)
def test_rank3_all(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b)
assert_array_almost_equal(y, y_r)
assert_equal(y.dtype, dt)
class TestCorrelate(object):
# Tests that don't depend on dtype
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'})
def test_invalid_params(self):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, correlate, a, b, mode='spam')
assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft')
assert_raises(ValueError, correlate, a, b, mode='ham', method='direct')
assert_raises(ValueError, correlate, a, b, mode='full', method='bacon')
assert_raises(ValueError, correlate, a, b, mode='same', method='bacon')
def test_mismatched_dims(self):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, correlate, [1], 2, method='direct')
assert_raises(ValueError, correlate, 1, [2], method='direct')
assert_raises(ValueError, correlate, [1], 2, method='fft')
assert_raises(ValueError, correlate, 1, [2], method='fft')
assert_raises(ValueError, correlate, [1], [[2]])
assert_raises(ValueError, correlate, [3], 2)
def test_numpy_fastpath(self):
a = [1, 2, 3]
b = [4, 5]
assert_allclose(correlate(a, b, mode='same'), [5, 14, 23])
a = [1, 2, 3]
b = [4, 5, 6]
assert_allclose(correlate(a, b, mode='same'), [17, 32, 23])
assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12])
assert_allclose(correlate(a, b, mode='valid'), [32])
@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble])
class TestCorrelateComplex(object):
# The decimal precision to be used for comparing results.
# This value will be passed as the 'decimal' keyword argument of
# assert_array_almost_equal().
def decimal(self, dt):
return int(2 * np.finfo(dt).precision / 3)
def _setup_rank1(self, dt, mode):
np.random.seed(9)
a = np.random.randn(10).astype(dt)
a += 1j * np.random.randn(10).astype(dt)
b = np.random.randn(8).astype(dt)
b += 1j * np.random.randn(8).astype(dt)
y_r = (correlate(a.real, b.real, mode=mode) +
correlate(a.imag, b.imag, mode=mode)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag, mode=mode) +
correlate(a.imag, b.real, mode=mode))
return a, b, y_r
def test_rank1_valid(self, dt):
a, b, y_r = self._setup_rank1(dt, 'valid')
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_rank1_same(self, dt):
a, b, y_r = self._setup_rank1(dt, 'same')
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_rank1_full(self, dt):
a, b, y_r = self._setup_rank1(dt, 'full')
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_swap_full(self, dt):
d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt)
k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt)
y = correlate(d, k)
assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j])
def test_swap_same(self, dt):
d = [0.+0.j, 1.+1.j, 2.+2.j]
k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j]
y = correlate(d, k, mode="same")
assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j])
def test_rank3(self, dt):
a = np.random.randn(10, 8, 6).astype(dt)
a += 1j * np.random.randn(10, 8, 6).astype(dt)
b = np.random.randn(8, 6, 4).astype(dt)
b += 1j * np.random.randn(8, 6, 4).astype(dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
assert_equal(y.dtype, dt)
def test_rank0(self, dt):
a = np.array(np.random.randn()).astype(dt)
a += 1j * np.array(np.random.randn()).astype(dt)
b = np.array(np.random.randn()).astype(dt)
b += 1j * np.array(np.random.randn()).astype(dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
assert_equal(y.dtype, dt)
assert_equal(correlate([1], [2j]), correlate(1, 2j))
assert_equal(correlate([2j], [3j]), correlate(2j, 3j))
assert_equal(correlate([3j], [4]), correlate(3j, 4))
class TestCorrelate2d(object):
def test_consistency_correlate_funcs(self):
# Compare np.correlate, signal.correlate, signal.correlate2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.correlate(a, b, mode=mode),
signal.correlate(a, b, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([a], [b],
mode=mode)),
signal.correlate(a, b, mode=mode))
# See gh-5897
if mode == 'valid':
assert_almost_equal(np.correlate(b, a, mode=mode),
signal.correlate(b, a, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([b], [a],
mode=mode)),
signal.correlate(b, a, mode=mode))
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'})
def test_complex_input(self):
assert_equal(signal.correlate2d([[1]], [[2j]]), -2j)
assert_equal(signal.correlate2d([[2j]], [[3j]]), 6)
assert_equal(signal.correlate2d([[3j]], [[4]]), 12j)
class TestLFilterZI(object):
def test_basic(self):
a = np.array([1.0, -1.0, 0.5])
b = np.array([1.0, 0.0, 2.0])
zi_expected = np.array([5.0, -1.0])
zi = lfilter_zi(b, a)
assert_array_almost_equal(zi, zi_expected)
def test_scale_invariance(self):
# Regression test. There was a bug in which b was not correctly
# rescaled when a[0] was nonzero.
b = np.array([2, 8, 5])
a = np.array([1, 1, 8])
zi1 = lfilter_zi(b, a)
zi2 = lfilter_zi(2*b, 2*a)
assert_allclose(zi2, zi1, rtol=1e-12)
class TestFiltFilt(object):
filtfilt_kind = 'tf'
def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None,
method='pad', irlen=None):
if self.filtfilt_kind == 'tf':
b, a = zpk2tf(*zpk)
return filtfilt(b, a, x, axis, padtype, padlen, method, irlen)
elif self.filtfilt_kind == 'sos':
sos = zpk2sos(*zpk)
return sosfiltfilt(sos, x, axis, padtype, padlen)
def test_basic(self):
zpk = tf2zpk([1, 2, 3], [1, 2, 3])
out = self.filtfilt(zpk, np.arange(12))
assert_allclose(out, arange(12), atol=1e-11)
def test_sine(self):
rate = 2000
t = np.linspace(0, 1.0, rate + 1)
# A signal with low frequency and a high frequency.
xlow = np.sin(5 * 2 * np.pi * t)
xhigh = np.sin(250 * 2 * np.pi * t)
x = xlow + xhigh
zpk = butter(8, 0.125, output='zpk')
# r is the magnitude of the largest pole.
r = np.abs(zpk[1]).max()
eps = 1e-5
# n estimates the number of steps for the
# transient to decay by a factor of eps.
n = int(np.ceil(np.log(eps) / np.log(r)))
# High order lowpass filter...
y = self.filtfilt(zpk, x, padlen=n)
# Result should be just xlow.
err = np.abs(y - xlow).max()
assert_(err < 1e-4)
# A 2D case.
x2d = np.vstack([xlow, xlow + xhigh])
y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1)
assert_equal(y2d.shape, x2d.shape)
err = np.abs(y2d - xlow).max()
assert_(err < 1e-4)
# Use the previous result to check the use of the axis keyword.
# (Regression test for ticket #1620)
y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0)
assert_equal(y2d, y2dt.T)
def test_axis(self):
# Test the 'axis' keyword on a 3D array.
x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)
zpk = butter(3, 0.125, output='zpk')
y0 = self.filtfilt(zpk, x, padlen=0, axis=0)
y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1)
assert_array_equal(y0, np.swapaxes(y1, 0, 1))
y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2)
assert_array_equal(y0, np.swapaxes(y2, 0, 2))
def test_acoeff(self):
if self.filtfilt_kind != 'tf':
return # only necessary for TF
# test for 'a' coefficient as single number
out = signal.filtfilt([.5, .5], 1, np.arange(10))
assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14)
def test_gust_simple(self):
if self.filtfilt_kind != 'tf':
pytest.skip('gust only implemented for TF systems')
# The input array has length 2. The exact solution for this case
# was computed "by hand".
x = np.array([1.0, 2.0])
b = np.array([0.5])
a = np.array([1.0, -0.5])
y, z1, z2 = _filtfilt_gust(b, a, x)
assert_allclose([z1[0], z2[0]],
[0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]])
assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1],
0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]])
def test_gust_scalars(self):
if self.filtfilt_kind != 'tf':
pytest.skip('gust only implemented for TF systems')
# The filter coefficients are both scalars, so the filter simply
# multiplies its input by b/a. When it is used in filtfilt, the
# factor is (b/a)**2.
x = np.arange(12)
b = 3.0
a = 2.0
y = filtfilt(b, a, x, method="gust")
expected = (b/a)**2 * x
assert_allclose(y, expected)
class TestSOSFiltFilt(TestFiltFilt):
filtfilt_kind = 'sos'
def test_equivalence(self):
"""Test equivalence between sosfiltfilt and filtfilt"""
x = np.random.RandomState(0).randn(1000)
for order in range(1, 6):
zpk = signal.butter(order, 0.35, output='zpk')
b, a = zpk2tf(*zpk)
sos = zpk2sos(*zpk)
y = filtfilt(b, a, x)
y_sos = sosfiltfilt(sos, x)
assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order)
def filtfilt_gust_opt(b, a, x):
"""
An alternative implementation of filtfilt with Gustafsson edges.
This function computes the same result as
`scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays
are accepted. The problem is solved using `fmin` from `scipy.optimize`.
`_filtfilt_gust` is significanly faster than this implementation.
"""
def filtfilt_gust_opt_func(ics, b, a, x):
"""Objective function used in filtfilt_gust_opt."""
m = max(len(a), len(b)) - 1
z0f = ics[:m]
z0b = ics[m:]
y_f = lfilter(b, a, x, zi=z0f)[0]
y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1]
y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
y_bf = lfilter(b, a, y_b, zi=z0f)[0]
value = np.sum((y_fb - y_bf)**2)
return value
m = max(len(a), len(b)) - 1
zi = lfilter_zi(b, a)
ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi))
result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x),
xtol=1e-10, ftol=1e-12,
maxfun=10000, maxiter=10000,
full_output=True, disp=False)
opt, fopt, niter, funcalls, warnflag = result
if warnflag > 0:
raise RuntimeError("minimization failed in filtfilt_gust_opt: "
"warnflag=%d" % warnflag)
z0f = opt[:m]
z0b = opt[m:]
# Apply the forward-backward filter using the computed initial
# conditions.
y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
y = lfilter(b, a, y_b, zi=z0f)[0]
return y, z0f, z0b
def check_filtfilt_gust(b, a, shape, axis, irlen=None):
# Generate x, the data to be filtered.
np.random.seed(123)
x = np.random.randn(*shape)
# Apply filtfilt to x. This is the main calculation to be checked.
y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen)
# Also call the private function so we can test the ICs.
yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
# filtfilt_gust_opt is an independent implementation that gives the
# expected result, but it only handles 1-d arrays, so use some looping
# and reshaping shenanigans to create the expected output arrays.
xx = np.swapaxes(x, axis, -1)
out_shape = xx.shape[:-1]
yo = np.empty_like(xx)
m = max(len(a), len(b)) - 1
zo1 = np.empty(out_shape + (m,))
zo2 = np.empty(out_shape + (m,))
for indx in product(*[range(d) for d in out_shape]):
yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx])
yo = np.swapaxes(yo, -1, axis)
zo1 = np.swapaxes(zo1, -1, axis)
zo2 = np.swapaxes(zo2, -1, axis)
assert_allclose(y, yo, rtol=1e-9, atol=1e-10)
assert_allclose(yg, yo, rtol=1e-9, atol=1e-10)
assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10)
assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10)
def test_choose_conv_method():
for mode in ['valid', 'same', 'full']:
for ndims in [1, 2]:
n, k, true_method = 8, 6, 'direct'
x = np.random.randn(*((n,) * ndims))
h = np.random.randn(*((k,) * ndims))
method = choose_conv_method(x, h, mode=mode)
assert_equal(method, true_method)
method_try, times = choose_conv_method(x, h, mode=mode, measure=True)
assert_(method_try in {'fft', 'direct'})
assert_(type(times) is dict)
assert_('fft' in times.keys() and 'direct' in times.keys())
n = 10
for not_fft_conv_supp in ["complex256", "complex192"]:
if hasattr(np, not_fft_conv_supp):
x = np.ones(n, dtype=not_fft_conv_supp)
h = x.copy()
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
x = np.array([2**51], dtype=np.int64)
h = x.copy()
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
x = [Decimal(3), Decimal(2)]
h = [Decimal(1), Decimal(4)]
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
def test_filtfilt_gust():
# Design a filter.
z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk')
# Find the approximate impulse response length of the filter.
eps = 1e-10
r = np.max(np.abs(p))
approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
np.random.seed(123)
b, a = zpk2tf(z, p, k)
for irlen in [None, approx_impulse_len]:
signal_len = 5 * approx_impulse_len
# 1-d test case
check_filtfilt_gust(b, a, (signal_len,), 0, irlen)
# 3-d test case; test each axis.
for axis in range(3):
shape = [2, 2, 2]
shape[axis] = signal_len
check_filtfilt_gust(b, a, shape, axis, irlen)
# Test case with length less than 2*approx_impulse_len.
# In this case, `filtfilt_gust` should behave the same as if
# `irlen=None` was given.
length = 2*approx_impulse_len - 50
check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len)
class TestDecimate(object):
def test_bad_args(self):
x = np.arange(12)
assert_raises(TypeError, signal.decimate, x, q=0.5, n=1)
assert_raises(TypeError, signal.decimate, x, q=2, n=0.5)
def test_basic_IIR(self):
x = np.arange(12)
y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round()
assert_array_equal(y, x[::2])
def test_basic_FIR(self):
x = np.arange(12)
y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round()
assert_array_equal(y, x[::2])
def test_shape(self):
# Regression test for ticket #1480.
z = np.zeros((30, 30))
d0 = signal.decimate(z, 2, axis=0, zero_phase=False)
assert_equal(d0.shape, (15, 30))
d1 = signal.decimate(z, 2, axis=1, zero_phase=False)
assert_equal(d1.shape, (30, 15))
def test_phaseshift_FIR(self):
with suppress_warnings() as sup:
sup.filter(BadCoefficients, "Badly conditioned filter")
self._test_phaseshift(method='fir', zero_phase=False)
def test_zero_phase_FIR(self):
with suppress_warnings() as sup:
sup.filter(BadCoefficients, "Badly conditioned filter")
self._test_phaseshift(method='fir', zero_phase=True)
def test_phaseshift_IIR(self):
self._test_phaseshift(method='iir', zero_phase=False)
def test_zero_phase_IIR(self):
self._test_phaseshift(method='iir', zero_phase=True)
def _test_phaseshift(self, method, zero_phase):
rate = 120
rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3
t_tot = int(100) # Need to let antialiasing filters settle
t = np.arange(rate*t_tot+1) / float(rate)
# Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts
freqs = np.array(rates_to) * 0.8 / 2
d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t)
* signal.windows.tukey(t.size, 0.1))
for rate_to in rates_to:
q = rate // rate_to
t_to = np.arange(rate_to*t_tot+1) / float(rate_to)
d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to)
* signal.windows.tukey(t_to.size, 0.1))
# Set up downsampling filters, match v0.17 defaults
if method == 'fir':
n = 30
system = signal.dlti(signal.firwin(n + 1, 1. / q,
window='hamming'), 1.)
elif method == 'iir':
n = 8
wc = 0.8*np.pi/q
system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi))
# Calculate expected phase response, as unit complex vector
if zero_phase is False:
_, h_resps = signal.freqz(system.num, system.den,
freqs/rate*2*np.pi)
h_resps /= np.abs(h_resps)
else:
h_resps = np.ones_like(freqs)
y_resamps = signal.decimate(d.real, q, n, ftype=system,
zero_phase=zero_phase)
# Get phase from complex inner product, like CSD
h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1)
h_resamps /= np.abs(h_resamps)
subnyq = freqs < 0.5*rate_to
# Complex vectors should be aligned, only compare below nyquist
assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0,
atol=1e-3, rtol=1e-3)
def test_auto_n(self):
# Test that our value of n is a reasonable choice (depends on
# the downsampling factor)
sfreq = 100.
n = 1000
t = np.arange(n) / sfreq
# will alias for decimations (>= 15)
x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t)
assert_allclose(np.linalg.norm(x), 1., rtol=1e-3)
x_out = signal.decimate(x, 30, ftype='fir')
assert_array_less(np.linalg.norm(x_out), 0.01)
class TestHilbert(object):
def test_bad_args(self):
x = np.array([1.0 + 0.0j])
assert_raises(ValueError, hilbert, x)
x = np.arange(8.0)
assert_raises(ValueError, hilbert, x, N=0)
def test_hilbert_theoretical(self):
# test cases by Ariel Rokem
decimal = 14
pi = np.pi
t = np.arange(0, 2 * pi, pi / 256)
a0 = np.sin(t)
a1 = np.cos(t)
a2 = np.sin(2 * t)
a3 = np.cos(2 * t)
a = np.vstack([a0, a1, a2, a3])
h = hilbert(a)
h_abs = np.abs(h)
h_angle = np.angle(h)
h_real = np.real(h)
# The real part should be equal to the original signals:
assert_almost_equal(h_real, a, decimal)
# The absolute value should be one everywhere, for this input:
assert_almost_equal(h_abs, np.ones(a.shape), decimal)
# For the 'slow' sine - the phase should go from -pi/2 to pi/2 in
# the first 256 bins:
assert_almost_equal(h_angle[0, :256],
np.arange(-pi / 2, pi / 2, pi / 256),
decimal)
# For the 'slow' cosine - the phase should go from 0 to pi in the
# same interval:
assert_almost_equal(
h_angle[1, :256], np.arange(0, pi, pi / 256), decimal)
# The 'fast' sine should make this phase transition in half the time:
assert_almost_equal(h_angle[2, :128],
np.arange(-pi / 2, pi / 2, pi / 128),
decimal)
# Ditto for the 'fast' cosine:
assert_almost_equal(
h_angle[3, :128], np.arange(0, pi, pi / 128), decimal)
# The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia
assert_almost_equal(h[1].imag, a0, decimal)
def test_hilbert_axisN(self):
# tests for axis and N arguments
a = np.arange(18).reshape(3, 6)
# test axis
aa = hilbert(a, axis=-1)
assert_equal(hilbert(a.T, axis=0), aa.T)
# test 1d
assert_almost_equal(hilbert(a[0]), aa[0], 14)
# test N
aan = hilbert(a, N=20, axis=-1)
assert_equal(aan.shape, [3, 20])
assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3])
# the next test is just a regression test,
# no idea whether numbers make sense
a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j,
1.000000000000000e+00 - 2.047794505137069j,
1.999999999999999e+00 - 2.244055555687583j,
3.000000000000000e+00 - 1.262750302935009j,
4.000000000000000e+00 - 1.066489252384493j,
5.000000000000000e+00 + 2.918022706971047j,
8.881784197001253e-17 + 3.845658908989067j,
-9.444121133484362e-17 + 0.985044202202061j,
-1.776356839400251e-16 + 1.332257797702019j,
-3.996802888650564e-16 + 0.501905089898885j,
1.332267629550188e-16 + 0.668696078880782j,
-1.192678053963799e-16 + 0.235487067862679j,
-1.776356839400251e-16 + 0.286439612812121j,
3.108624468950438e-16 + 0.031676888064907j,
1.332267629550188e-16 - 0.019275656884536j,
-2.360035624836702e-16 - 0.1652588660287j,
0.000000000000000e+00 - 0.332049855010597j,
3.552713678800501e-16 - 0.403810179797771j,
8.881784197001253e-17 - 0.751023775297729j,
9.444121133484362e-17 - 0.79252210110103j])
assert_almost_equal(aan[0], a0hilb, 14, 'N regression')
class TestHilbert2(object):
def test_bad_args(self):
# x must be real.
x = np.array([[1.0 + 0.0j]])
assert_raises(ValueError, hilbert2, x)
# x must be rank 2.
x = np.arange(24).reshape(2, 3, 4)
assert_raises(ValueError, hilbert2, x)
# Bad value for N.
x = np.arange(16).reshape(4, 4)
assert_raises(ValueError, hilbert2, x, N=0)
assert_raises(ValueError, hilbert2, x, N=(2, 0))
assert_raises(ValueError, hilbert2, x, N=(2,))
class TestPartialFractionExpansion(object):
def test_invresz_one_coefficient_bug(self):
# Regression test for issue in gh-4646.
r = [1]
p = [2]
k = [0]
a_expected = [1.0, 0.0]
b_expected = [1.0, -2.0]
a_observed, b_observed = invresz(r, p, k)
assert_allclose(a_observed, a_expected)
assert_allclose(b_observed, b_expected)
def test_invres_distinct_roots(self):
# This test was inspired by github issue 2496.
r = [3 / 10, -1 / 6, -2 / 15]
p = [0, -2, -5]
k = []
a_expected = [1, 3]
b_expected = [1, 7, 10, 0]
a_observed, b_observed = invres(r, p, k)
assert_allclose(a_observed, a_expected)
assert_allclose(b_observed, b_expected)
rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum')
# With the default tolerance, the rtype does not matter
# for this example.
for rtype in rtypes:
a_observed, b_observed = invres(r, p, k, rtype=rtype)
assert_allclose(a_observed, a_expected)
assert_allclose(b_observed, b_expected)
# With unrealistically large tolerances, repeated roots may be inferred
# and the rtype comes into play.
ridiculous_tolerance = 1e10
for rtype in rtypes:
a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype)
def test_invres_repeated_roots(self):
r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
p = [0, -2, -2, -5]
k = []
a_expected = [1, 3]
b_expected = [1, 9, 24, 20, 0]
rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum')
for rtype in rtypes:
a_observed, b_observed = invres(r, p, k, rtype=rtype)
assert_allclose(a_observed, a_expected)
assert_allclose(b_observed, b_expected)
def test_invres_bad_rtype(self):
r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
p = [0, -2, -2, -5]
k = []
assert_raises(ValueError, invres, r, p, k, rtype='median')
class TestVectorstrength(object):
def test_single_1dperiod(self):
events = np.array([.5])
period = 5.
targ_strength = 1.
targ_phase = .1
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_single_2dperiod(self):
events = np.array([.5])
period = [1, 2, 5.]
targ_strength = [1.] * 3
targ_phase = np.array([.5, .25, .1])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_array_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_equal_1dperiod(self):
events = np.array([.25, .25, .25, .25, .25, .25])
period = 2
targ_strength = 1.
targ_phase = .125
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_equal_2dperiod(self):
events = np.array([.25, .25, .25, .25, .25, .25])
period = [1, 2, ]
targ_strength = [1.] * 2
targ_phase = np.array([.25, .125])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_spaced_1dperiod(self):
events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
period = 1
targ_strength = 1.
targ_phase = .1
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_spaced_2dperiod(self):
events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
period = [1, .5]
targ_strength = [1.] * 2
targ_phase = np.array([.1, .2])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_partial_1dperiod(self):
events = np.array([.25, .5, .75])
period = 1
targ_strength = 1. / 3.
targ_phase = .5
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_partial_2dperiod(self):
events = np.array([.25, .5, .75])
period = [1., 1., 1., 1.]
targ_strength = [1. / 3.] * 4
targ_phase = np.array([.5, .5, .5, .5])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_opposite_1dperiod(self):
events = np.array([0, .25, .5, .75])
period = 1.
targ_strength = 0
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
def test_opposite_2dperiod(self):
events = np.array([0, .25, .5, .75])
period = [1.] * 10
targ_strength = [0.] * 10
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
def test_2d_events_ValueError(self):
events = np.array([[1, 2]])
period = 1.
assert_raises(ValueError, vectorstrength, events, period)
def test_2d_period_ValueError(self):
events = 1.
period = np.array([[1]])
assert_raises(ValueError, vectorstrength, events, period)
def test_zero_period_ValueError(self):
events = 1.
period = 0
assert_raises(ValueError, vectorstrength, events, period)
def test_negative_period_ValueError(self):
events = 1.
period = -1
assert_raises(ValueError, vectorstrength, events, period)
class TestSOSFilt(object):
# For sosfilt we only test a single datatype. Since sosfilt wraps
# to lfilter under the hood, it's hopefully good enough to ensure
# lfilter is extensively tested.
dt = np.float64
# The test_rank* tests are pulled from _TestLinearFilter
def test_rank1(self):
x = np.linspace(0, 5, 6).astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, -0.5]).astype(self.dt)
# Test simple IIR
y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt)
assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r)
# Test simple FIR
b = np.array([1, 1]).astype(self.dt)
# NOTE: This was changed (rel. to TestLinear...) to add a pole @zero:
a = np.array([1, 0]).astype(self.dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r)
b = [1, 1, 0]
a = [1, 0, 0]
x = np.ones(8)
sos = np.concatenate((b, a))
sos.shape = (1, 6)
y = sosfilt(sos, x)
assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2])
def test_rank2(self):
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]],
dtype=self.dt)
y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]], dtype=self.dt)
y = sosfilt(tf2sos(b, a), x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
y = sosfilt(tf2sos(b, a), x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank3(self):
shape = (4, 3, 2)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
# Test last axis
y = sosfilt(tf2sos(b, a), x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))
def test_initial_conditions(self):
b1, a1 = signal.butter(2, 0.25, 'low')
b2, a2 = signal.butter(2, 0.75, 'low')
b3, a3 = signal.butter(2, 0.75, 'low')
b = np.convolve(np.convolve(b1, b2), b3)
a = np.convolve(np.convolve(a1, a2), a3)
sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3]))
x = np.random.rand(50)
# Stopping filtering and continuing
y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6))
y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]]
assert_allclose(y_true, lfilter(b, a, x))
y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2)))
y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]]
assert_allclose(y_true, y_sos)
# Use a step function
zi = sosfilt_zi(sos)
x = np.ones(8)
y, zf = sosfilt(sos, x, zi=zi)
assert_allclose(y, np.ones(8))
assert_allclose(zf, zi)
# Initial condition shape matching
x.shape = (1, 1) + x.shape # 3D
assert_raises(ValueError, sosfilt, sos, x, zi=zi)
zi_nd = zi.copy()
zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1])
assert_raises(ValueError, sosfilt, sos, x,
zi=zi_nd[:, :, :, [0, 1, 1]])
y, zf = sosfilt(sos, x, zi=zi_nd)
assert_allclose(y[0, 0], np.ones(8))
assert_allclose(zf[:, 0, 0, :], zi)
def test_initial_conditions_3d_axis1(self):
# Test the use of zi when sosfilt is applied to axis 1 of a 3-d input.
# Input array is x.
x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3))
# Design a filter in ZPK format and convert to SOS
zpk = signal.butter(6, 0.35, output='zpk')
sos = zpk2sos(*zpk)
nsections = sos.shape[0]
# Filter along this axis.
axis = 1
# Initial conditions, all zeros.
shp = list(x.shape)
shp[axis] = 2
shp = [nsections] + shp
z0 = np.zeros(shp)
# Apply the filter to x.
yf, zf = sosfilt(sos, x, axis=axis, zi=z0)
# Apply the filter to x in two stages.
y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0)
y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1)
# y should equal yf, and z2 should equal zf.
y = np.concatenate((y1, y2), axis=axis)
assert_allclose(y, yf, rtol=1e-10, atol=1e-13)
assert_allclose(z2, zf, rtol=1e-10, atol=1e-13)
# let's try the "step" initial condition
zi = sosfilt_zi(sos)
zi.shape = [nsections, 1, 2, 1]
zi = zi * x[:, 0:1, :]
y = sosfilt(sos, x, axis=axis, zi=zi)[0]
# check it against the TF form
b, a = zpk2tf(*zpk)
zi = lfilter_zi(b, a)
zi.shape = [1, zi.size, 1]
zi = zi * x[:, 0:1, :]
y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0]
assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13)
def test_bad_zi_shape(self):
# The shape of zi is checked before using any values in the
# arguments, so np.empty is fine for creating the arguments.
x = np.empty((3, 15, 3))
sos = np.empty((4, 6))
zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3)
assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1)
def test_sosfilt_zi(self):
sos = signal.butter(6, 0.2, output='sos')
zi = sosfilt_zi(sos)
y, zf = sosfilt(sos, np.ones(40), zi=zi)
assert_allclose(zf, zi, rtol=1e-13)
# Expected steady state value of the step response of this filter:
ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1))
assert_allclose(y, ss, rtol=1e-13)
class TestDeconvolve(object):
def test_basic(self):
# From docstring example
original = [0, 1, 0, 0, 1, 1, 0, 0]
impulse_response = [2, 1]
recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0]
recovered, remainder = signal.deconvolve(recorded, impulse_response)
assert_allclose(recovered, original)
| 90,590 | 36.983648 | 95 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_ltisys.py
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy.signal import (ss2tf, tf2ss, lsim2, impulse2, step2, lti,
dlti, bode, freqresp, lsim, impulse, step,
abcd_normalize, place_poles,
TransferFunction, StateSpace, ZerosPolesGain)
from scipy.signal.filter_design import BadCoefficients
import scipy.linalg as linalg
import scipy._lib.six as six
def _assert_poles_close(P1,P2, rtol=1e-8, atol=1e-8):
"""
Check each pole in P1 is close to a pole in P2 with a 1e-8
relative tolerance or 1e-8 absolute tolerance (useful for zero poles).
These tolerances are very strict but the systems tested are known to
accept these poles so we should not be far from what is requested.
"""
P2 = P2.copy()
for p1 in P1:
found = False
for p2_idx in range(P2.shape[0]):
if np.allclose([np.real(p1), np.imag(p1)],
[np.real(P2[p2_idx]), np.imag(P2[p2_idx])],
rtol, atol):
found = True
np.delete(P2, p2_idx)
break
if not found:
raise ValueError("Can't find pole " + str(p1) + " in " + str(P2))
class TestPlacePoles(object):
def _check(self, A, B, P, **kwargs):
"""
Perform the most common tests on the poles computed by place_poles
and return the Bunch object for further specific tests
"""
fsf = place_poles(A, B, P, **kwargs)
expected, _ = np.linalg.eig(A - np.dot(B, fsf.gain_matrix))
_assert_poles_close(expected,fsf.requested_poles)
_assert_poles_close(expected,fsf.computed_poles)
_assert_poles_close(P,fsf.requested_poles)
return fsf
def test_real(self):
# Test real pole placement using KNV and YT0 algorithm and example 1 in
# section 4 of the reference publication (see place_poles docstring)
A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0,
0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273,
1.343, -2.104]).reshape(4, 4)
B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146,0]).reshape(4, 2)
P = np.array([-0.2, -0.5, -5.0566, -8.6659])
# Check that both KNV and YT compute correct K matrix
self._check(A, B, P, method='KNV0')
self._check(A, B, P, method='YT')
# Try to reach the specific case in _YT_real where two singular
# values are almost equal. This is to improve code coverage but I
# have no way to be sure this code is really reached
# on some architectures this can lead to a RuntimeWarning invalid
# value in divide (see gh-7590), so suppress it for now
with np.errstate(invalid='ignore'):
self._check(A, B, (2,2,3,3))
def test_complex(self):
# Test complex pole placement on a linearized car model, taken from L.
# Jaulin, Automatique pour la robotique, Cours et Exercices, iSTE
# editions p 184/185
A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4)
B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2)
# Test complex poles on YT
P = np.array([-3, -1, -2-1j, -2+1j])
self._check(A, B, P)
# Try to reach the specific case in _YT_complex where two singular
# values are almost equal. This is to improve code coverage but I
# have no way to be sure this code is really reached
P = [0-1e-6j,0+1e-6j,-10,10]
self._check(A, B, P, maxiter=1000)
# Try to reach the specific case in _YT_complex where the rank two
# update yields two null vectors. This test was found via Monte Carlo.
A = np.array(
[-2148,-2902, -2267, -598, -1722, -1829, -165, -283, -2546,
-167, -754, -2285, -543, -1700, -584, -2978, -925, -1300,
-1583, -984, -386, -2650, -764, -897, -517, -1598, 2, -1709,
-291, -338, -153, -1804, -1106, -1168, -867, -2297]
).reshape(6,6)
B = np.array(
[-108, -374, -524, -1285, -1232, -161, -1204, -672, -637,
-15, -483, -23, -931, -780, -1245, -1129, -1290, -1502,
-952, -1374, -62, -964, -930, -939, -792, -756, -1437,
-491, -1543, -686]
).reshape(6,5)
P = [-25.-29.j, -25.+29.j, 31.-42.j, 31.+42.j, 33.-41.j, 33.+41.j]
self._check(A, B, P)
# Use a lot of poles to go through all cases for update_order
# in _YT_loop
big_A = np.ones((11,11))-np.eye(11)
big_B = np.ones((11,10))-np.diag([1]*10,1)[:,1:]
big_A[:6,:6] = A
big_B[:6,:5] = B
P = [-10,-20,-30,40,50,60,70,-20-5j,-20+5j,5+3j,5-3j]
self._check(big_A, big_B, P)
#check with only complex poles and only real poles
P = [-10,-20,-30,-40,-50,-60,-70,-80,-90,-100]
self._check(big_A[:-1,:-1], big_B[:-1,:-1], P)
P = [-10+10j,-20+20j,-30+30j,-40+40j,-50+50j,
-10-10j,-20-20j,-30-30j,-40-40j,-50-50j]
self._check(big_A[:-1,:-1], big_B[:-1,:-1], P)
# need a 5x5 array to ensure YT handles properly when there
# is only one real pole and several complex
A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0,
0,0,0,5,0,0,0,0,9]).reshape(5,5)
B = np.array([0,0,0,0,1,0,0,1,2,3]).reshape(5,2)
P = np.array([-2, -3+1j, -3-1j, -1+1j, -1-1j])
place_poles(A, B, P)
# same test with an odd number of real poles > 1
# this is another specific case of YT
P = np.array([-2, -3, -4, -1+1j, -1-1j])
self._check(A, B, P)
def test_tricky_B(self):
# check we handle as we should the 1 column B matrices and
# n column B matrices (with n such as shape(A)=(n, n))
A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0,
0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273,
1.343, -2.104]).reshape(4, 4)
B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146, 0, 1, 2, 3, 4,
5, 6, 7, 8]).reshape(4, 4)
# KNV or YT are not called here, it's a specific case with only
# one unique solution
P = np.array([-0.2, -0.5, -5.0566, -8.6659])
fsf = self._check(A, B, P)
# rtol and nb_iter should be set to np.nan as the identity can be
# used as transfer matrix
assert_equal(fsf.rtol, np.nan)
assert_equal(fsf.nb_iter, np.nan)
# check with complex poles too as they trigger a specific case in
# the specific case :-)
P = np.array((-2+1j,-2-1j,-3,-2))
fsf = self._check(A, B, P)
assert_equal(fsf.rtol, np.nan)
assert_equal(fsf.nb_iter, np.nan)
#now test with a B matrix with only one column (no optimisation)
B = B[:,0].reshape(4,1)
P = np.array((-2+1j,-2-1j,-3,-2))
fsf = self._check(A, B, P)
# we can't optimize anything, check they are set to 0 as expected
assert_equal(fsf.rtol, 0)
assert_equal(fsf.nb_iter, 0)
def test_errors(self):
# Test input mistakes from user
A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4)
B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2)
#should fail as the method keyword is invalid
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
method="foo")
#should fail as poles are not 1D array
assert_raises(ValueError, place_poles, A, B,
np.array((-2.1,-2.2,-2.3,-2.4)).reshape(4,1))
#should fail as A is not a 2D array
assert_raises(ValueError, place_poles, A[:,:,np.newaxis], B,
(-2.1,-2.2,-2.3,-2.4))
#should fail as B is not a 2D array
assert_raises(ValueError, place_poles, A, B[:,:,np.newaxis],
(-2.1,-2.2,-2.3,-2.4))
#should fail as there are too many poles
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4,-3))
#should fail as there are not enough poles
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3))
#should fail as the rtol is greater than 1
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
rtol=42)
#should fail as maxiter is smaller than 1
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
maxiter=-42)
# should fail as rank(B) is two
assert_raises(ValueError, place_poles, A, B, (-2,-2,-2,-2))
#unctrollable system
assert_raises(ValueError, place_poles, np.ones((4,4)),
np.ones((4,2)), (1,2,3,4))
# Should not raise ValueError as the poles can be placed but should
# raise a warning as the convergence is not reached
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fsf = place_poles(A, B, (-1,-2,-3,-4), rtol=1e-16, maxiter=42)
assert_(len(w) == 1)
assert_(issubclass(w[-1].category, UserWarning))
assert_("Convergence was not reached after maxiter iterations"
in str(w[-1].message))
assert_equal(fsf.nb_iter, 42)
# should fail as a complex misses its conjugate
assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2))
# should fail as A is not square
assert_raises(ValueError, place_poles, A[:,:3], B, (-2,-3,-4,-5))
# should fail as B has not the same number of lines as A
assert_raises(ValueError, place_poles, A, B[:3,:], (-2,-3,-4,-5))
# should fail as KNV0 does not support complex poles
assert_raises(ValueError, place_poles, A, B,
(-2+1j,-2-1j,-2+3j,-2-3j), method="KNV0")
class TestSS2TF:
def check_matrix_shapes(self, p, q, r):
ss2tf(np.zeros((p, p)),
np.zeros((p, q)),
np.zeros((r, p)),
np.zeros((r, q)), 0)
def test_shapes(self):
# Each tuple holds:
# number of states, number of inputs, number of outputs
for p, q, r in [(3, 3, 3), (1, 3, 3), (1, 1, 1)]:
self.check_matrix_shapes(p, q, r)
def test_basic(self):
# Test a round trip through tf2ss and ss2tf.
b = np.array([1.0, 3.0, 5.0])
a = np.array([1.0, 2.0, 3.0])
A, B, C, D = tf2ss(b, a)
assert_allclose(A, [[-2, -3], [1, 0]], rtol=1e-13)
assert_allclose(B, [[1], [0]], rtol=1e-13)
assert_allclose(C, [[1, 2]], rtol=1e-13)
assert_allclose(D, [[1]], rtol=1e-14)
bb, aa = ss2tf(A, B, C, D)
assert_allclose(bb[0], b, rtol=1e-13)
assert_allclose(aa, a, rtol=1e-13)
def test_zero_order_round_trip(self):
# See gh-5760
tf = (2, 1)
A, B, C, D = tf2ss(*tf)
assert_allclose(A, [[0]], rtol=1e-13)
assert_allclose(B, [[0]], rtol=1e-13)
assert_allclose(C, [[0]], rtol=1e-13)
assert_allclose(D, [[2]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
assert_allclose(num, [[2, 0]], rtol=1e-13)
assert_allclose(den, [1, 0], rtol=1e-13)
tf = ([[5], [2]], 1)
A, B, C, D = tf2ss(*tf)
assert_allclose(A, [[0]], rtol=1e-13)
assert_allclose(B, [[0]], rtol=1e-13)
assert_allclose(C, [[0], [0]], rtol=1e-13)
assert_allclose(D, [[5], [2]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
assert_allclose(num, [[5, 0], [2, 0]], rtol=1e-13)
assert_allclose(den, [1, 0], rtol=1e-13)
def test_simo_round_trip(self):
# See gh-5753
tf = ([[1, 2], [1, 1]], [1, 2])
A, B, C, D = tf2ss(*tf)
assert_allclose(A, [[-2]], rtol=1e-13)
assert_allclose(B, [[1]], rtol=1e-13)
assert_allclose(C, [[0], [-1]], rtol=1e-13)
assert_allclose(D, [[1], [1]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
assert_allclose(num, [[1, 2], [1, 1]], rtol=1e-13)
assert_allclose(den, [1, 2], rtol=1e-13)
tf = ([[1, 0, 1], [1, 1, 1]], [1, 1, 1])
A, B, C, D = tf2ss(*tf)
assert_allclose(A, [[-1, -1], [1, 0]], rtol=1e-13)
assert_allclose(B, [[1], [0]], rtol=1e-13)
assert_allclose(C, [[-1, 0], [0, 0]], rtol=1e-13)
assert_allclose(D, [[1], [1]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
assert_allclose(num, [[1, 0, 1], [1, 1, 1]], rtol=1e-13)
assert_allclose(den, [1, 1, 1], rtol=1e-13)
tf = ([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 4])
A, B, C, D = tf2ss(*tf)
assert_allclose(A, [[-2, -3, -4], [1, 0, 0], [0, 1, 0]], rtol=1e-13)
assert_allclose(B, [[1], [0], [0]], rtol=1e-13)
assert_allclose(C, [[1, 2, 3], [1, 2, 3]], rtol=1e-13)
assert_allclose(D, [[0], [0]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
assert_allclose(num, [[0, 1, 2, 3], [0, 1, 2, 3]], rtol=1e-13)
assert_allclose(den, [1, 2, 3, 4], rtol=1e-13)
tf = ([1, [2, 3]], [1, 6])
A, B, C, D = tf2ss(*tf)
assert_allclose(A, [[-6]], rtol=1e-31)
assert_allclose(B, [[1]], rtol=1e-31)
assert_allclose(C, [[1], [-9]], rtol=1e-31)
assert_allclose(D, [[0], [2]], rtol=1e-31)
num, den = ss2tf(A, B, C, D)
assert_allclose(num, [[0, 1], [2, 3]], rtol=1e-13)
assert_allclose(den, [1, 6], rtol=1e-13)
tf = ([[1, -3], [1, 2, 3]], [1, 6, 5])
A, B, C, D = tf2ss(*tf)
assert_allclose(A, [[-6, -5], [1, 0]], rtol=1e-13)
assert_allclose(B, [[1], [0]], rtol=1e-13)
assert_allclose(C, [[1, -3], [-4, -2]], rtol=1e-13)
assert_allclose(D, [[0], [1]], rtol=1e-13)
num, den = ss2tf(A, B, C, D)
assert_allclose(num, [[0, 1, -3], [1, 2, 3]], rtol=1e-13)
assert_allclose(den, [1, 6, 5], rtol=1e-13)
def test_multioutput(self):
# Regression test for gh-2669.
# 4 states
A = np.array([[-1.0, 0.0, 1.0, 0.0],
[-1.0, 0.0, 2.0, 0.0],
[-4.0, 0.0, 3.0, 0.0],
[-8.0, 8.0, 0.0, 4.0]])
# 1 input
B = np.array([[0.3],
[0.0],
[7.0],
[0.0]])
# 3 outputs
C = np.array([[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[8.0, 8.0, 0.0, 0.0]])
D = np.array([[0.0],
[0.0],
[1.0]])
# Get the transfer functions for all the outputs in one call.
b_all, a = ss2tf(A, B, C, D)
# Get the transfer functions for each output separately.
b0, a0 = ss2tf(A, B, C[0], D[0])
b1, a1 = ss2tf(A, B, C[1], D[1])
b2, a2 = ss2tf(A, B, C[2], D[2])
# Check that we got the same results.
assert_allclose(a0, a, rtol=1e-13)
assert_allclose(a1, a, rtol=1e-13)
assert_allclose(a2, a, rtol=1e-13)
assert_allclose(b_all, np.vstack((b0, b1, b2)), rtol=1e-13, atol=1e-14)
class TestLsim(object):
def lti_nowarn(self, *args):
with suppress_warnings() as sup:
sup.filter(BadCoefficients)
system = lti(*args)
return system
def test_first_order(self):
# y' = -y
# exact solution is y(t) = exp(-t)
system = self.lti_nowarn(-1.,1.,1.,0.)
t = np.linspace(0,5)
u = np.zeros_like(t)
tout, y, x = lsim(system, u, t, X0=[1.0])
expected_x = np.exp(-tout)
assert_almost_equal(x, expected_x)
assert_almost_equal(y, expected_x)
def test_integrator(self):
# integrator: y' = u
system = self.lti_nowarn(0., 1., 1., 0.)
t = np.linspace(0,5)
u = t
tout, y, x = lsim(system, u, t)
expected_x = 0.5 * tout**2
assert_almost_equal(x, expected_x)
assert_almost_equal(y, expected_x)
def test_double_integrator(self):
# double integrator: y'' = 2u
A = np.mat("0. 1.; 0. 0.")
B = np.mat("0.; 1.")
C = np.mat("2. 0.")
system = self.lti_nowarn(A, B, C, 0.)
t = np.linspace(0,5)
u = np.ones_like(t)
tout, y, x = lsim(system, u, t)
expected_x = np.transpose(np.array([0.5 * tout**2, tout]))
expected_y = tout**2
assert_almost_equal(x, expected_x)
assert_almost_equal(y, expected_y)
def test_jordan_block(self):
# Non-diagonalizable A matrix
# x1' + x1 = x2
# x2' + x2 = u
# y = x1
# Exact solution with u = 0 is y(t) = t exp(-t)
A = np.mat("-1. 1.; 0. -1.")
B = np.mat("0.; 1.")
C = np.mat("1. 0.")
system = self.lti_nowarn(A, B, C, 0.)
t = np.linspace(0,5)
u = np.zeros_like(t)
tout, y, x = lsim(system, u, t, X0=[0.0, 1.0])
expected_y = tout * np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_miso(self):
# A system with two state variables, two inputs, and one output.
A = np.array([[-1.0, 0.0], [0.0, -2.0]])
B = np.array([[1.0, 0.0], [0.0, 1.0]])
C = np.array([1.0, 0.0])
D = np.zeros((1,2))
system = self.lti_nowarn(A, B, C, D)
t = np.linspace(0, 5.0, 101)
u = np.zeros_like(t)
tout, y, x = lsim(system, u, t, X0=[1.0, 1.0])
expected_y = np.exp(-tout)
expected_x0 = np.exp(-tout)
expected_x1 = np.exp(-2.0*tout)
assert_almost_equal(y, expected_y)
assert_almost_equal(x[:,0], expected_x0)
assert_almost_equal(x[:,1], expected_x1)
def test_nonzero_initial_time(self):
system = self.lti_nowarn(-1.,1.,1.,0.)
t = np.linspace(1,2)
u = np.zeros_like(t)
tout, y, x = lsim(system, u, t, X0=[1.0])
expected_y = np.exp(-tout)
assert_almost_equal(y, expected_y)
class Test_lsim2(object):
def test_01(self):
t = np.linspace(0,10,1001)
u = np.zeros_like(t)
# First order system: x'(t) + x(t) = u(t), x(0) = 1.
# Exact solution is x(t) = exp(-t).
system = ([1.0],[1.0,1.0])
tout, y, x = lsim2(system, u, t, X0=[1.0])
expected_x = np.exp(-tout)
assert_almost_equal(x[:,0], expected_x)
def test_02(self):
t = np.array([0.0, 1.0, 1.0, 3.0])
u = np.array([0.0, 0.0, 1.0, 1.0])
# Simple integrator: x'(t) = u(t)
system = ([1.0],[1.0,0.0])
tout, y, x = lsim2(system, u, t, X0=[1.0])
expected_x = np.maximum(1.0, tout)
assert_almost_equal(x[:,0], expected_x)
def test_03(self):
t = np.array([0.0, 1.0, 1.0, 1.1, 1.1, 2.0])
u = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0])
# Simple integrator: x'(t) = u(t)
system = ([1.0],[1.0, 0.0])
tout, y, x = lsim2(system, u, t, hmax=0.01)
expected_x = np.array([0.0, 0.0, 0.0, 0.1, 0.1, 0.1])
assert_almost_equal(x[:,0], expected_x)
def test_04(self):
t = np.linspace(0, 10, 1001)
u = np.zeros_like(t)
# Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0.
# With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution
# is (1-t)*exp(-t).
system = ([1.0], [1.0, 2.0, 1.0])
tout, y, x = lsim2(system, u, t, X0=[1.0, 0.0])
expected_x = (1.0 - tout) * np.exp(-tout)
assert_almost_equal(x[:,0], expected_x)
def test_05(self):
# The call to lsim2 triggers a "BadCoefficients" warning from
# scipy.signal.filter_design, but the test passes. I think the warning
# is related to the incomplete handling of multi-input systems in
# scipy.signal.
# A system with two state variables, two inputs, and one output.
A = np.array([[-1.0, 0.0], [0.0, -2.0]])
B = np.array([[1.0, 0.0], [0.0, 1.0]])
C = np.array([1.0, 0.0])
D = np.zeros((1, 2))
t = np.linspace(0, 10.0, 101)
with suppress_warnings() as sup:
sup.filter(BadCoefficients)
tout, y, x = lsim2((A,B,C,D), T=t, X0=[1.0, 1.0])
expected_y = np.exp(-tout)
expected_x0 = np.exp(-tout)
expected_x1 = np.exp(-2.0 * tout)
assert_almost_equal(y, expected_y)
assert_almost_equal(x[:,0], expected_x0)
assert_almost_equal(x[:,1], expected_x1)
def test_06(self):
# Test use of the default values of the arguments `T` and `U`.
# Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0.
# With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution
# is (1-t)*exp(-t).
system = ([1.0], [1.0, 2.0, 1.0])
tout, y, x = lsim2(system, X0=[1.0, 0.0])
expected_x = (1.0 - tout) * np.exp(-tout)
assert_almost_equal(x[:,0], expected_x)
class _TestImpulseFuncs(object):
# Common tests for impulse/impulse2 (= self.func)
def test_01(self):
# First order system: x'(t) + x(t) = u(t)
# Exact impulse response is x(t) = exp(-t).
system = ([1.0], [1.0,1.0])
tout, y = self.func(system)
expected_y = np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_02(self):
# Specify the desired time values for the output.
# First order system: x'(t) + x(t) = u(t)
# Exact impulse response is x(t) = exp(-t).
system = ([1.0], [1.0,1.0])
n = 21
t = np.linspace(0, 2.0, n)
tout, y = self.func(system, T=t)
assert_equal(tout.shape, (n,))
assert_almost_equal(tout, t)
expected_y = np.exp(-t)
assert_almost_equal(y, expected_y)
def test_03(self):
# Specify an initial condition as a scalar.
# First order system: x'(t) + x(t) = u(t), x(0)=3.0
# Exact impulse response is x(t) = 4*exp(-t).
system = ([1.0], [1.0,1.0])
tout, y = self.func(system, X0=3.0)
expected_y = 4.0 * np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_04(self):
# Specify an initial condition as a list.
# First order system: x'(t) + x(t) = u(t), x(0)=3.0
# Exact impulse response is x(t) = 4*exp(-t).
system = ([1.0], [1.0,1.0])
tout, y = self.func(system, X0=[3.0])
expected_y = 4.0 * np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_05(self):
# Simple integrator: x'(t) = u(t)
system = ([1.0], [1.0,0.0])
tout, y = self.func(system)
expected_y = np.ones_like(tout)
assert_almost_equal(y, expected_y)
def test_06(self):
# Second order system with a repeated root:
# x''(t) + 2*x(t) + x(t) = u(t)
# The exact impulse response is t*exp(-t).
system = ([1.0], [1.0, 2.0, 1.0])
tout, y = self.func(system)
expected_y = tout * np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_array_like(self):
# Test that function can accept sequences, scalars.
system = ([1.0], [1.0, 2.0, 1.0])
# TODO: add meaningful test where X0 is a list
tout, y = self.func(system, X0=[3], T=[5, 6])
tout, y = self.func(system, X0=[3], T=[5])
def test_array_like2(self):
system = ([1.0], [1.0, 2.0, 1.0])
tout, y = self.func(system, X0=3, T=5)
class TestImpulse2(_TestImpulseFuncs):
def setup_method(self):
self.func = impulse2
class TestImpulse(_TestImpulseFuncs):
def setup_method(self):
self.func = impulse
class _TestStepFuncs(object):
def test_01(self):
# First order system: x'(t) + x(t) = u(t)
# Exact step response is x(t) = 1 - exp(-t).
system = ([1.0], [1.0,1.0])
tout, y = self.func(system)
expected_y = 1.0 - np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_02(self):
# Specify the desired time values for the output.
# First order system: x'(t) + x(t) = u(t)
# Exact step response is x(t) = 1 - exp(-t).
system = ([1.0], [1.0,1.0])
n = 21
t = np.linspace(0, 2.0, n)
tout, y = self.func(system, T=t)
assert_equal(tout.shape, (n,))
assert_almost_equal(tout, t)
expected_y = 1 - np.exp(-t)
assert_almost_equal(y, expected_y)
def test_03(self):
# Specify an initial condition as a scalar.
# First order system: x'(t) + x(t) = u(t), x(0)=3.0
# Exact step response is x(t) = 1 + 2*exp(-t).
system = ([1.0], [1.0,1.0])
tout, y = self.func(system, X0=3.0)
expected_y = 1 + 2.0*np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_04(self):
# Specify an initial condition as a list.
# First order system: x'(t) + x(t) = u(t), x(0)=3.0
# Exact step response is x(t) = 1 + 2*exp(-t).
system = ([1.0], [1.0,1.0])
tout, y = self.func(system, X0=[3.0])
expected_y = 1 + 2.0*np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_05(self):
# Simple integrator: x'(t) = u(t)
# Exact step response is x(t) = t.
system = ([1.0],[1.0,0.0])
tout, y = self.func(system)
expected_y = tout
assert_almost_equal(y, expected_y)
def test_06(self):
# Second order system with a repeated root:
# x''(t) + 2*x(t) + x(t) = u(t)
# The exact step response is 1 - (1 + t)*exp(-t).
system = ([1.0], [1.0, 2.0, 1.0])
tout, y = self.func(system)
expected_y = 1 - (1 + tout) * np.exp(-tout)
assert_almost_equal(y, expected_y)
def test_array_like(self):
# Test that function can accept sequences, scalars.
system = ([1.0], [1.0, 2.0, 1.0])
# TODO: add meaningful test where X0 is a list
tout, y = self.func(system, T=[5, 6])
class TestStep2(_TestStepFuncs):
def setup_method(self):
self.func = step2
def test_05(self):
# This test is almost the same as the one it overwrites in the base
# class. The only difference is the tolerances passed to step2:
# the default tolerances are not accurate enough for this test
# Simple integrator: x'(t) = u(t)
# Exact step response is x(t) = t.
system = ([1.0], [1.0,0.0])
tout, y = self.func(system, atol=1e-10, rtol=1e-8)
expected_y = tout
assert_almost_equal(y, expected_y)
class TestStep(_TestStepFuncs):
def setup_method(self):
self.func = step
def test_complex_input(self):
# Test that complex input doesn't raise an error.
# `step` doesn't seem to have been designed for complex input, but this
# works and may be used, so add regression test. See gh-2654.
step(([], [-1], 1+0j))
class TestLti(object):
def test_lti_instantiation(self):
# Test that lti can be instantiated with sequences, scalars.
# See PR-225.
# TransferFunction
s = lti([1], [-1])
assert_(isinstance(s, TransferFunction))
assert_(isinstance(s, lti))
assert_(not isinstance(s, dlti))
assert_(s.dt is None)
# ZerosPolesGain
s = lti(np.array([]), np.array([-1]), 1)
assert_(isinstance(s, ZerosPolesGain))
assert_(isinstance(s, lti))
assert_(not isinstance(s, dlti))
assert_(s.dt is None)
# StateSpace
s = lti([], [-1], 1)
s = lti([1], [-1], 1, 3)
assert_(isinstance(s, StateSpace))
assert_(isinstance(s, lti))
assert_(not isinstance(s, dlti))
assert_(s.dt is None)
class TestStateSpace(object):
def test_initialization(self):
# Check that all initializations work
s = StateSpace(1, 1, 1, 1)
s = StateSpace([1], [2], [3], [4])
s = StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
np.array([[1, 0]]), np.array([[0]]))
def test_conversion(self):
# Check the conversion functions
s = StateSpace(1, 2, 3, 4)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(StateSpace(s) is not s)
assert_(s.to_ss() is not s)
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_tf() and to_zpk()
# Getters
s = StateSpace(1, 1, 1, 1)
assert_equal(s.poles, [1])
assert_equal(s.zeros, [0])
assert_(s.dt is None)
def test_operators(self):
# Test +/-/* operators on systems
class BadType(object):
pass
s1 = StateSpace(np.array([[-0.5, 0.7], [0.3, -0.8]]),
np.array([[1], [0]]),
np.array([[1, 0]]),
np.array([[0]]),
)
s2 = StateSpace(np.array([[-0.2, -0.1], [0.4, -0.1]]),
np.array([[1], [0]]),
np.array([[1, 0]]),
np.array([[0]])
)
s_discrete = s1.to_discrete(0.1)
s2_discrete = s2.to_discrete(0.2)
# Impulse response
t = np.linspace(0, 1, 100)
u = np.zeros_like(t)
u[0] = 1
# Test multiplication
for typ in six.integer_types + (float, complex, np.float32,
np.complex128, np.array):
assert_allclose(lsim(typ(2) * s1, U=u, T=t)[1],
typ(2) * lsim(s1, U=u, T=t)[1])
assert_allclose(lsim(s1 * typ(2), U=u, T=t)[1],
lsim(s1, U=u, T=t)[1] * typ(2))
assert_allclose(lsim(s1 / typ(2), U=u, T=t)[1],
lsim(s1, U=u, T=t)[1] / typ(2))
with assert_raises(TypeError):
typ(2) / s1
assert_allclose(lsim(s1 * 2, U=u, T=t)[1],
lsim(s1, U=2 * u, T=t)[1])
assert_allclose(lsim(s1 * s2, U=u, T=t)[1],
lsim(s1, U=lsim(s2, U=u, T=t)[1], T=t)[1],
atol=1e-5)
with assert_raises(TypeError):
s1 / s1
with assert_raises(TypeError):
s1 * s_discrete
with assert_raises(TypeError):
# Check different discretization constants
s_discrete * s2_discrete
with assert_raises(TypeError):
s1 * BadType()
with assert_raises(TypeError):
BadType() * s1
with assert_raises(TypeError):
s1 / BadType()
with assert_raises(TypeError):
BadType() / s1
# Test addition
assert_allclose(lsim(s1 + 2, U=u, T=t)[1],
2 * u + lsim(s1, U=u, T=t)[1])
# Check for dimension missmatch
with assert_raises(ValueError):
s1 + np.array([1, 2])
with assert_raises(ValueError):
np.array([1, 2]) + s1
with assert_raises(TypeError):
s1 + s_discrete
with assert_raises(ValueError):
s1 / np.array([[1, 2], [3, 4]])
with assert_raises(TypeError):
# Check different discretization constants
s_discrete + s2_discrete
with assert_raises(TypeError):
s1 + BadType()
with assert_raises(TypeError):
BadType() + s1
assert_allclose(lsim(s1 + s2, U=u, T=t)[1],
lsim(s1, U=u, T=t)[1] + lsim(s2, U=u, T=t)[1])
# Test substraction
assert_allclose(lsim(s1 - 2, U=u, T=t)[1],
-2 * u + lsim(s1, U=u, T=t)[1])
assert_allclose(lsim(2 - s1, U=u, T=t)[1],
2 * u + lsim(-s1, U=u, T=t)[1])
assert_allclose(lsim(s1 - s2, U=u, T=t)[1],
lsim(s1, U=u, T=t)[1] - lsim(s2, U=u, T=t)[1])
with assert_raises(TypeError):
s1 - BadType()
with assert_raises(TypeError):
BadType() - s1
class TestTransferFunction(object):
def test_initialization(self):
# Check that all initializations work
s = TransferFunction(1, 1)
s = TransferFunction([1], [2])
s = TransferFunction(np.array([1]), np.array([2]))
def test_conversion(self):
# Check the conversion functions
s = TransferFunction([1, 0], [1, -1])
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(TransferFunction(s) is not s)
assert_(s.to_tf() is not s)
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_ss() and to_zpk()
# Getters
s = TransferFunction([1, 0], [1, -1])
assert_equal(s.poles, [1])
assert_equal(s.zeros, [0])
class TestZerosPolesGain(object):
def test_initialization(self):
# Check that all initializations work
s = ZerosPolesGain(1, 1, 1)
s = ZerosPolesGain([1], [2], 1)
s = ZerosPolesGain(np.array([1]), np.array([2]), 1)
def test_conversion(self):
#Check the conversion functions
s = ZerosPolesGain(1, 2, 3)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(ZerosPolesGain(s) is not s)
assert_(s.to_zpk() is not s)
class Test_abcd_normalize(object):
def setup_method(self):
self.A = np.array([[1.0, 2.0], [3.0, 4.0]])
self.B = np.array([[-1.0], [5.0]])
self.C = np.array([[4.0, 5.0]])
self.D = np.array([[2.5]])
def test_no_matrix_fails(self):
assert_raises(ValueError, abcd_normalize)
def test_A_nosquare_fails(self):
assert_raises(ValueError, abcd_normalize, [1, -1],
self.B, self.C, self.D)
def test_AB_mismatch_fails(self):
assert_raises(ValueError, abcd_normalize, self.A, [-1, 5],
self.C, self.D)
def test_AC_mismatch_fails(self):
assert_raises(ValueError, abcd_normalize, self.A, self.B,
[[4.0], [5.0]], self.D)
def test_CD_mismatch_fails(self):
assert_raises(ValueError, abcd_normalize, self.A, self.B,
self.C, [2.5, 0])
def test_BD_mismatch_fails(self):
assert_raises(ValueError, abcd_normalize, self.A, [-1, 5],
self.C, self.D)
def test_normalized_matrices_unchanged(self):
A, B, C, D = abcd_normalize(self.A, self.B, self.C, self.D)
assert_equal(A, self.A)
assert_equal(B, self.B)
assert_equal(C, self.C)
assert_equal(D, self.D)
def test_shapes(self):
A, B, C, D = abcd_normalize(self.A, self.B, [1, 0], 0)
assert_equal(A.shape[0], A.shape[1])
assert_equal(A.shape[0], B.shape[0])
assert_equal(A.shape[0], C.shape[1])
assert_equal(C.shape[0], D.shape[0])
assert_equal(B.shape[1], D.shape[1])
def test_zero_dimension_is_not_none1(self):
B_ = np.zeros((2, 0))
D_ = np.zeros((0, 0))
A, B, C, D = abcd_normalize(A=self.A, B=B_, D=D_)
assert_equal(A, self.A)
assert_equal(B, B_)
assert_equal(D, D_)
assert_equal(C.shape[0], D_.shape[0])
assert_equal(C.shape[1], self.A.shape[0])
def test_zero_dimension_is_not_none2(self):
B_ = np.zeros((2, 0))
C_ = np.zeros((0, 2))
A, B, C, D = abcd_normalize(A=self.A, B=B_, C=C_)
assert_equal(A, self.A)
assert_equal(B, B_)
assert_equal(C, C_)
assert_equal(D.shape[0], C_.shape[0])
assert_equal(D.shape[1], B_.shape[1])
def test_missing_A(self):
A, B, C, D = abcd_normalize(B=self.B, C=self.C, D=self.D)
assert_equal(A.shape[0], A.shape[1])
assert_equal(A.shape[0], B.shape[0])
assert_equal(A.shape, (self.B.shape[0], self.B.shape[0]))
def test_missing_B(self):
A, B, C, D = abcd_normalize(A=self.A, C=self.C, D=self.D)
assert_equal(B.shape[0], A.shape[0])
assert_equal(B.shape[1], D.shape[1])
assert_equal(B.shape, (self.A.shape[0], self.D.shape[1]))
def test_missing_C(self):
A, B, C, D = abcd_normalize(A=self.A, B=self.B, D=self.D)
assert_equal(C.shape[0], D.shape[0])
assert_equal(C.shape[1], A.shape[0])
assert_equal(C.shape, (self.D.shape[0], self.A.shape[0]))
def test_missing_D(self):
A, B, C, D = abcd_normalize(A=self.A, B=self.B, C=self.C)
assert_equal(D.shape[0], C.shape[0])
assert_equal(D.shape[1], B.shape[1])
assert_equal(D.shape, (self.C.shape[0], self.B.shape[1]))
def test_missing_AB(self):
A, B, C, D = abcd_normalize(C=self.C, D=self.D)
assert_equal(A.shape[0], A.shape[1])
assert_equal(A.shape[0], B.shape[0])
assert_equal(B.shape[1], D.shape[1])
assert_equal(A.shape, (self.C.shape[1], self.C.shape[1]))
assert_equal(B.shape, (self.C.shape[1], self.D.shape[1]))
def test_missing_AC(self):
A, B, C, D = abcd_normalize(B=self.B, D=self.D)
assert_equal(A.shape[0], A.shape[1])
assert_equal(A.shape[0], B.shape[0])
assert_equal(C.shape[0], D.shape[0])
assert_equal(C.shape[1], A.shape[0])
assert_equal(A.shape, (self.B.shape[0], self.B.shape[0]))
assert_equal(C.shape, (self.D.shape[0], self.B.shape[0]))
def test_missing_AD(self):
A, B, C, D = abcd_normalize(B=self.B, C=self.C)
assert_equal(A.shape[0], A.shape[1])
assert_equal(A.shape[0], B.shape[0])
assert_equal(D.shape[0], C.shape[0])
assert_equal(D.shape[1], B.shape[1])
assert_equal(A.shape, (self.B.shape[0], self.B.shape[0]))
assert_equal(D.shape, (self.C.shape[0], self.B.shape[1]))
def test_missing_BC(self):
A, B, C, D = abcd_normalize(A=self.A, D=self.D)
assert_equal(B.shape[0], A.shape[0])
assert_equal(B.shape[1], D.shape[1])
assert_equal(C.shape[0], D.shape[0])
assert_equal(C.shape[1], A.shape[0])
assert_equal(B.shape, (self.A.shape[0], self.D.shape[1]))
assert_equal(C.shape, (self.D.shape[0], self.A.shape[0]))
def test_missing_ABC_fails(self):
assert_raises(ValueError, abcd_normalize, D=self.D)
def test_missing_BD_fails(self):
assert_raises(ValueError, abcd_normalize, A=self.A, C=self.C)
def test_missing_CD_fails(self):
assert_raises(ValueError, abcd_normalize, A=self.A, B=self.B)
class Test_bode(object):
def test_01(self):
# Test bode() magnitude calculation (manual sanity check).
# 1st order low-pass filter: H(s) = 1 / (s + 1),
# cutoff: 1 rad/s, slope: -20 dB/decade
# H(s=0.1) ~= 0 dB
# H(s=1) ~= -3 dB
# H(s=10) ~= -20 dB
# H(s=100) ~= -40 dB
system = lti([1], [1, 1])
w = [0.1, 1, 10, 100]
w, mag, phase = bode(system, w=w)
expected_mag = [0, -3, -20, -40]
assert_almost_equal(mag, expected_mag, decimal=1)
def test_02(self):
# Test bode() phase calculation (manual sanity check).
# 1st order low-pass filter: H(s) = 1 / (s + 1),
# angle(H(s=0.1)) ~= -5.7 deg
# angle(H(s=1)) ~= -45 deg
# angle(H(s=10)) ~= -84.3 deg
system = lti([1], [1, 1])
w = [0.1, 1, 10]
w, mag, phase = bode(system, w=w)
expected_phase = [-5.7, -45, -84.3]
assert_almost_equal(phase, expected_phase, decimal=1)
def test_03(self):
# Test bode() magnitude calculation.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
system = lti([1], [1, 1])
w = [0.1, 1, 10, 100]
w, mag, phase = bode(system, w=w)
jw = w * 1j
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
expected_mag = 20.0 * np.log10(abs(y))
assert_almost_equal(mag, expected_mag)
def test_04(self):
# Test bode() phase calculation.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
system = lti([1], [1, 1])
w = [0.1, 1, 10, 100]
w, mag, phase = bode(system, w=w)
jw = w * 1j
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
expected_phase = np.arctan2(y.imag, y.real) * 180.0 / np.pi
assert_almost_equal(phase, expected_phase)
def test_05(self):
# Test that bode() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
system = lti([1], [1, 1])
n = 10
# Expected range is from 0.01 to 10.
expected_w = np.logspace(-2, 1, n)
w, mag, phase = bode(system, n=n)
assert_almost_equal(w, expected_w)
def test_06(self):
# Test that bode() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = lti([1], [1, 0])
w, mag, phase = bode(system, n=2)
assert_equal(w[0], 0.01) # a fail would give not-a-number
def test_07(self):
# bode() should not fail on a system with pure imaginary poles.
# The test passes if bode doesn't raise an exception.
system = lti([1], [1, 0, 100])
w, mag, phase = bode(system, n=2)
def test_08(self):
# Test that bode() return continuous phase, issues/2331.
system = lti([], [-10, -30, -40, -60, -70], 1)
w, mag, phase = system.bode(w=np.logspace(-3, 40, 100))
assert_almost_equal(min(phase), -450, decimal=15)
def test_from_state_space(self):
# Ensure that bode works with a system that was created from the
# state space representation matrices A, B, C, D. In this case,
# system.num will be a 2-D array with shape (1, n+1), where (n,n)
# is the shape of A.
# A Butterworth lowpass filter is used, so we know the exact
# frequency response.
a = np.array([1.0, 2.0, 2.0, 1.0])
A = linalg.companion(a).T
B = np.array([[0.0], [0.0], [1.0]])
C = np.array([[1.0, 0.0, 0.0]])
D = np.array([[0.0]])
with suppress_warnings() as sup:
sup.filter(BadCoefficients)
system = lti(A, B, C, D)
w, mag, phase = bode(system, n=100)
expected_magnitude = 20 * np.log10(np.sqrt(1.0 / (1.0 + w**6)))
assert_almost_equal(mag, expected_magnitude)
class Test_freqresp(object):
def test_output_manual(self):
# Test freqresp() output calculation (manual sanity check).
# 1st order low-pass filter: H(s) = 1 / (s + 1),
# re(H(s=0.1)) ~= 0.99
# re(H(s=1)) ~= 0.5
# re(H(s=10)) ~= 0.0099
system = lti([1], [1, 1])
w = [0.1, 1, 10]
w, H = freqresp(system, w=w)
expected_re = [0.99, 0.5, 0.0099]
expected_im = [-0.099, -0.5, -0.099]
assert_almost_equal(H.real, expected_re, decimal=1)
assert_almost_equal(H.imag, expected_im, decimal=1)
def test_output(self):
# Test freqresp() output calculation.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
system = lti([1], [1, 1])
w = [0.1, 1, 10, 100]
w, H = freqresp(system, w=w)
s = w * 1j
expected = np.polyval(system.num, s) / np.polyval(system.den, s)
assert_almost_equal(H.real, expected.real)
assert_almost_equal(H.imag, expected.imag)
def test_freq_range(self):
# Test that freqresp() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
# Expected range is from 0.01 to 10.
system = lti([1], [1, 1])
n = 10
expected_w = np.logspace(-2, 1, n)
w, H = freqresp(system, n=n)
assert_almost_equal(w, expected_w)
def test_pole_zero(self):
# Test that freqresp() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = lti([1], [1, 0])
w, H = freqresp(system, n=2)
assert_equal(w[0], 0.01) # a fail would give not-a-number
def test_from_state_space(self):
# Ensure that freqresp works with a system that was created from the
# state space representation matrices A, B, C, D. In this case,
# system.num will be a 2-D array with shape (1, n+1), where (n,n) is
# the shape of A.
# A Butterworth lowpass filter is used, so we know the exact
# frequency response.
a = np.array([1.0, 2.0, 2.0, 1.0])
A = linalg.companion(a).T
B = np.array([[0.0],[0.0],[1.0]])
C = np.array([[1.0, 0.0, 0.0]])
D = np.array([[0.0]])
with suppress_warnings() as sup:
sup.filter(BadCoefficients)
system = lti(A, B, C, D)
w, H = freqresp(system, n=100)
s = w * 1j
expected = (1.0 / (1.0 + 2*s + 2*s**2 + s**3))
assert_almost_equal(H.real, expected.real)
assert_almost_equal(H.imag, expected.imag)
def test_from_zpk(self):
# 4th order low-pass filter: H(s) = 1 / (s + 1)
system = lti([],[-1]*4,[1])
w = [0.1, 1, 10, 100]
w, H = freqresp(system, w=w)
s = w * 1j
expected = 1 / (s + 1)**4
assert_almost_equal(H.real, expected.real)
assert_almost_equal(H.imag, expected.imag)
| 46,336 | 35.688044 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_peak_finding.py
|
from __future__ import division, print_function, absolute_import
import copy
import numpy as np
from numpy.testing import (assert_, assert_equal, assert_allclose,
assert_array_equal)
import pytest
from pytest import raises
from scipy._lib.six import xrange
from scipy.signal._peak_finding import (argrelmax, argrelmin,
peak_prominences, peak_widths, _unpack_condition_args, find_peaks,
find_peaks_cwt, _identify_ridge_lines)
from scipy.signal._peak_finding_utils import _argmaxima1d
def _gen_gaussians(center_locs, sigmas, total_length):
xdata = np.arange(0, total_length).astype(float)
out_data = np.zeros(total_length, dtype=float)
for ind, sigma in enumerate(sigmas):
tmp = (xdata - center_locs[ind]) / sigma
out_data += np.exp(-(tmp**2))
return out_data
def _gen_gaussians_even(sigmas, total_length):
num_peaks = len(sigmas)
delta = total_length / (num_peaks + 1)
center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int)
out_data = _gen_gaussians(center_locs, sigmas, total_length)
return out_data, center_locs
def _gen_ridge_line(start_locs, max_locs, length, distances, gaps):
"""
Generate coordinates for a ridge line.
Will be a series of coordinates, starting a start_loc (length 2).
The maximum distance between any adjacent columns will be
`max_distance`, the max distance between adjacent rows
will be `map_gap'.
`max_locs` should be the size of the intended matrix. The
ending coordinates are guaranteed to be less than `max_locs`,
although they may not approach `max_locs` at all.
"""
def keep_bounds(num, max_val):
out = max(num, 0)
out = min(out, max_val)
return out
gaps = copy.deepcopy(gaps)
distances = copy.deepcopy(distances)
locs = np.zeros([length, 2], dtype=int)
locs[0, :] = start_locs
total_length = max_locs[0] - start_locs[0] - sum(gaps)
if total_length < length:
raise ValueError('Cannot generate ridge line according to constraints')
dist_int = length / len(distances) - 1
gap_int = length / len(gaps) - 1
for ind in xrange(1, length):
nextcol = locs[ind - 1, 1]
nextrow = locs[ind - 1, 0] + 1
if (ind % dist_int == 0) and (len(distances) > 0):
nextcol += ((-1)**ind)*distances.pop()
if (ind % gap_int == 0) and (len(gaps) > 0):
nextrow += gaps.pop()
nextrow = keep_bounds(nextrow, max_locs[0])
nextcol = keep_bounds(nextcol, max_locs[1])
locs[ind, :] = [nextrow, nextcol]
return [locs[:, 0], locs[:, 1]]
class TestArgmaxima1d(object):
def test_empty(self):
"""Test with empty signal."""
x = np.array([], dtype=np.float64)
maxima = _argmaxima1d(x)
assert_equal(maxima, np.array([]))
assert_(maxima.base is None)
def test_linear(self):
"""Test with linear signal."""
x = np.linspace(0, 100)
maxima = _argmaxima1d(x)
assert_equal(maxima, np.array([]))
assert_(maxima.base is None)
def test_simple(self):
"""Test with simple signal."""
x = np.linspace(-10, 10, 50)
x[2::3] += 1
maxima = _argmaxima1d(x)
assert_equal(maxima, np.arange(2, 50, 3))
assert_(maxima.base is None)
def test_flat_maxima(self):
"""Test if flat maxima are detected correctly."""
x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 0, 4, 4, 4, 4, 0, 5])
maxima = _argmaxima1d(x)
assert_equal(maxima, np.array([2, 4, 8, 12]))
assert_(maxima.base is None)
@pytest.mark.parametrize(
'x', [np.array([1., 0, 2]), np.array([3., 3, 0, 4, 4]),
np.array([5., 5, 5, 0, 6, 6, 6])])
def test_signal_edges(self, x):
"""Test if correct behavior on signal edges."""
maxima = _argmaxima1d(x)
assert_equal(maxima, np.array([]))
assert_(maxima.base is None)
def test_exceptions(self):
"""Test input validation and raised exceptions."""
with raises(ValueError, match="wrong number of dimensions"):
_argmaxima1d(np.ones((1, 1)))
with raises(ValueError, match="expected 'float64_t'"):
_argmaxima1d(np.ones(1, dtype=int))
with raises(TypeError, match="list"):
_argmaxima1d([1., 2.])
with raises(TypeError, match="'x' must not be None"):
_argmaxima1d(None)
class TestRidgeLines(object):
def test_empty(self):
test_matr = np.zeros([20, 100])
lines = _identify_ridge_lines(test_matr, 2*np.ones(20), 1)
assert_(len(lines) == 0)
def test_minimal(self):
test_matr = np.zeros([20, 100])
test_matr[0, 10] = 1
lines = _identify_ridge_lines(test_matr, 2*np.ones(20), 1)
assert_(len(lines) == 1)
test_matr = np.zeros([20, 100])
test_matr[0:2, 10] = 1
lines = _identify_ridge_lines(test_matr, 2*np.ones(20), 1)
assert_(len(lines) == 1)
def test_single_pass(self):
distances = [0, 1, 2, 5]
gaps = [0, 1, 2, 0, 1]
test_matr = np.zeros([20, 50]) + 1e-12
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_distances = max(distances)*np.ones(20)
identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
assert_array_equal(identified_lines, [line])
def test_single_bigdist(self):
distances = [0, 1, 2, 5]
gaps = [0, 1, 2, 4]
test_matr = np.zeros([20, 50])
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 3
max_distances = max_dist*np.ones(20)
#This should get 2 lines, since the distance is too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
assert_(len(identified_lines) == 2)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
def test_single_biggap(self):
distances = [0, 1, 2, 5]
max_gap = 3
gaps = [0, 4, 2, 1]
test_matr = np.zeros([20, 50])
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 6
max_distances = max_dist*np.ones(20)
#This should get 2 lines, since the gap is too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
assert_(len(identified_lines) == 2)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
def test_single_biggaps(self):
distances = [0]
max_gap = 1
gaps = [3, 6]
test_matr = np.zeros([50, 50])
length = 30
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 1
max_distances = max_dist*np.ones(50)
#This should get 3 lines, since the gaps are too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
assert_(len(identified_lines) == 3)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
class TestArgrel(object):
def test_empty(self):
# Regression test for gh-2832.
# When there are no relative extrema, make sure that
# the number of empty arrays returned matches the
# dimension of the input.
empty_array = np.array([], dtype=int)
z1 = np.zeros(5)
i = argrelmin(z1)
assert_equal(len(i), 1)
assert_array_equal(i[0], empty_array)
z2 = np.zeros((3,5))
row, col = argrelmin(z2, axis=0)
assert_array_equal(row, empty_array)
assert_array_equal(col, empty_array)
row, col = argrelmin(z2, axis=1)
assert_array_equal(row, empty_array)
assert_array_equal(col, empty_array)
def test_basic(self):
# Note: the docstrings for the argrel{min,max,extrema} functions
# do not give a guarantee of the order of the indices, so we'll
# sort them before testing.
x = np.array([[1, 2, 2, 3, 2],
[2, 1, 2, 2, 3],
[3, 2, 1, 2, 2],
[2, 3, 2, 1, 2],
[1, 2, 3, 2, 1]])
row, col = argrelmax(x, axis=0)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [4, 0, 1])
row, col = argrelmax(x, axis=1)
order = np.argsort(row)
assert_equal(row[order], [0, 3, 4])
assert_equal(col[order], [3, 1, 2])
row, col = argrelmin(x, axis=0)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [1, 2, 3])
row, col = argrelmin(x, axis=1)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [1, 2, 3])
def test_highorder(self):
order = 2
sigmas = [1.0, 2.0, 10.0, 5.0, 15.0]
test_data, act_locs = _gen_gaussians_even(sigmas, 500)
test_data[act_locs + order] = test_data[act_locs]*0.99999
test_data[act_locs - order] = test_data[act_locs]*0.99999
rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0]
assert_(len(rel_max_locs) == len(act_locs))
assert_((rel_max_locs == act_locs).all())
def test_2d_gaussians(self):
sigmas = [1.0, 2.0, 10.0]
test_data, act_locs = _gen_gaussians_even(sigmas, 100)
rot_factor = 20
rot_range = np.arange(0, len(test_data)) - rot_factor
test_data_2 = np.vstack([test_data, test_data[rot_range]])
rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1)
for rw in xrange(0, test_data_2.shape[0]):
inds = (rel_max_rows == rw)
assert_(len(rel_max_cols[inds]) == len(act_locs))
assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all())
class TestPeakProminences(object):
def test_empty(self):
"""
Test if an empty array is returned if no peaks are provided.
"""
out = peak_prominences([1, 2, 3], [])
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
assert_(arr.size == 0)
assert_(arr.dtype == dtype)
out = peak_prominences([], [])
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
assert_(arr.size == 0)
assert_(arr.dtype == dtype)
def test_basic(self):
"""
Test if height of prominences is correctly calculated in signal with
rising baseline (peak widths are 1 sample).
"""
# Prepare basic signal
x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1])
peaks = np.array([1, 2, 4, 6])
lbases = np.array([0, 0, 0, 5])
rbases = np.array([3, 3, 5, 7])
proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0)
# Test if calculation matches handcrafted result
out = peak_prominences(x, peaks)
assert_equal(out[0], proms)
assert_equal(out[1], lbases)
assert_equal(out[2], rbases)
def test_edge_cases(self):
"""
Test edge cases.
"""
# Peaks have same height, prominence and bases
x = [0, 2, 1, 2, 1, 2, 0]
peaks = [1, 3, 5]
proms, lbases, rbases = peak_prominences(x, peaks)
assert_equal(proms, [2, 2, 2])
assert_equal(lbases, [0, 0, 0])
assert_equal(rbases, [6, 6, 6])
# Peaks have same height & prominence but different bases
x = [0, 1, 0, 1, 0, 1, 0]
peaks = np.array([1, 3, 5])
proms, lbases, rbases = peak_prominences(x, peaks)
assert_equal(proms, [1, 1, 1])
assert_equal(lbases, peaks - 1)
assert_equal(rbases, peaks + 1)
def test_non_contiguous(self):
"""
Test with non-C-contiguous input arrays.
"""
x = np.repeat([-9, 9, 9, 0, 3, 1], 2)
peaks = np.repeat([1, 2, 4], 2)
proms, lbases, rbases = peak_prominences(x[::2], peaks[::2])
assert_equal(proms, [9, 9, 2])
assert_equal(lbases, [0, 0, 3])
assert_equal(rbases, [3, 3, 5])
def test_wlen(self):
"""
Test if wlen actually shrinks the evaluation range correctly.
"""
x = [0, 1, 2, 3, 1, 0, -1]
peak = [3]
# Test rounding behavior of wlen
assert_equal(peak_prominences(x, peak), [3., 0, 6])
for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]:
assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i])
def test_raises(self):
"""
Verfiy that argument validation works as intended.
"""
# x with dimension > 1
with raises(ValueError, match='dimension'):
peak_prominences([[0, 1, 1, 0]], [1, 2])
# peaks with dimension > 1
with raises(ValueError, match='dimension'):
peak_prominences([0, 1, 1, 0], [[1, 2]])
# x with dimension < 1
with raises(ValueError, match='dimension'):
peak_prominences(3, [0,])
# empty x with peaks supplied
with raises(ValueError, match='not a valid peak'):
peak_prominences([], [1, 2])
# invalid peaks
for p in [-1, 0, 1, 2, 3]:
with raises(ValueError, match=str(p) + ' is not a valid peak'):
peak_prominences([1, 0, 2], [p,])
# peaks is not cast-able to np.intp
with raises(TypeError, match='Cannot safely cast'):
peak_prominences([0, 1, 1, 0], [1.1, 2.3])
# wlen < 3
with raises(ValueError, match='wlen'):
peak_prominences(np.arange(10), [3, 5], wlen=1)
class TestPeakWidths(object):
def test_empty(self):
"""
Test if an empty array is returned if no peaks are provided.
"""
widths = peak_widths([], [])[0]
assert_(isinstance(widths, np.ndarray))
assert_equal(widths.size, 0)
widths = peak_widths([1, 2, 3], [])[0]
assert_(isinstance(widths, np.ndarray))
assert_equal(widths.size, 0)
out = peak_widths([], [])
for arr in out:
assert_(isinstance(arr, np.ndarray))
assert_equal(arr.size, 0)
def test_basic(self):
"""
Test a simple use case with easy to verify results at different relative
heights.
"""
x = np.array([1, 0, 1, 2, 1, 0, -1])
prominence = 2
for rel_height, width_true, lip_true, rip_true in [
(0., 0., 3., 3.),
(0.25, 1., 2.5, 3.5),
(0.5, 2., 2., 4.),
(0.75, 3., 1.5, 4.5),
(1., 4., 1., 5.),
(2., 5., 1., 6.),
(3., 5., 1., 6.)
]:
width_calc, height, lip_calc, rip_calc = peak_widths(
x, [3], rel_height)
assert_allclose(width_calc, width_true)
assert_allclose(height, 2 - rel_height * prominence)
assert_allclose(lip_calc, lip_true)
assert_allclose(rip_calc, rip_true)
def test_non_contiguous(self):
"""
Test with non-C-contiguous input arrays.
"""
x = np.repeat([0, 100, 50], 4)
peaks = np.repeat([1], 3)
result = peak_widths(x[::4], peaks[::3])
assert_equal(result, [0.75, 75, 0.75, 1.5])
def test_exceptions(self):
"""
Verfiy that argument validation works as intended.
"""
with raises(ValueError, match='dimension'):
# x with dimension > 1
peak_widths(np.zeros((3, 4)), np.ones(3))
with raises(ValueError, match='dimension'):
# x with dimension < 1
peak_widths(3, [0])
with raises(ValueError, match='dimension'):
# peaks with dimension > 1
peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp))
with raises(ValueError, match='dimension'):
# peaks with dimension < 1
peak_widths(np.arange(10), 3)
with raises(ValueError, match='not a valid peak'):
# peak pos exceeds x.size
peak_widths(np.arange(10), [8, 11])
with raises(ValueError, match='not a valid peak'):
# empty x with peaks supplied
peak_widths([], [1, 2])
with raises(TypeError, match='Cannot safely cast'):
# peak cannot be safely casted to intp
peak_widths(np.arange(10), [1.1, 2.3])
with raises(ValueError, match='rel_height'):
# rel_height is < 0
peak_widths(np.arange(10), [1, 2], rel_height=-1)
with raises(TypeError, match='None'):
# prominence data contains None
peak_widths([1, 2, 1], [1], prominence_data=(None, None, None))
def test_mismatching_prominence_data(self):
"""Test with mismatching peak and / or prominence data."""
x = [0, 1, 0]
peak = [1]
for i, (peaks, left_bases, right_bases) in enumerate([
((1.,), (-1,), (2,)), # left base not in x
((1.,), (0,), (3,)), # right base not in x
((1.,), (1,), (2,)), # left base same as peak
((1.,), (0,), (1,)), # right base same as peak
((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks
((1., 1.), (0,), (2,)), # arrays with different shapes
((1.,), (0, 0), (2,)), # arrays with different shapes
((1.,), (0,), (2, 2)) # arrays with different shapes
]):
# Make sure input is matches output of signal.prominences
prominence_data = (np.array(peaks, dtype=np.float64),
np.array(left_bases, dtype=np.intp),
np.array(right_bases, dtype=np.intp))
# Test for correct exception
if i < 4:
match = "prominence data is invalid for peak"
else:
match = "arrays in `prominence_data` must have the same shape"
with raises(ValueError, match=match):
peak_widths(x, peak, prominence_data=prominence_data)
def test_intersection_rules(self):
"""Test if x == eval_height counts as an intersection."""
# Flatt peak with two possible intersection points if evaluated at 1
x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0]
# relative height is 0 -> width is 0 as well
assert_allclose(peak_widths(x, peaks=[5], rel_height=0),
[(0.,), (3.,), (5.,), (5.,)])
# width_height == x counts as intersection -> nearest 1 is chosen
assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3),
[(4.,), (1.,), (3.,), (7.,)])
def test_unpack_condition_args():
"""
Verify parsing of condition arguments for `scipy.signal.find_peaks` function.
"""
x = np.arange(10)
amin_true = x
amax_true = amin_true + 10
peaks = amin_true[1::2]
# Test unpacking with None or interval
assert_((None, None) == _unpack_condition_args((None, None), x, peaks))
assert_((1, None) == _unpack_condition_args(1, x, peaks))
assert_((1, None) == _unpack_condition_args((1, None), x, peaks))
assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks))
assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks))
# Test if borders are correctly reduced with `peaks`
amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks)
assert_equal(amin_calc, amin_true[peaks])
assert_equal(amax_calc, amax_true[peaks])
# Test raises if array borders don't match x
with raises(ValueError, match="array size of lower"):
_unpack_condition_args(amin_true, np.arange(11), peaks)
with raises(ValueError, match="array size of upper"):
_unpack_condition_args((None, amin_true), np.arange(11), peaks)
class TestFindPeaks(object):
# Keys of optionally returned properties
property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds',
'prominences', 'left_bases', 'right_bases', 'widths',
'width_heights', 'left_ips', 'right_ips'}
def test_constant(self):
"""
Test behavior for signal without local maxima.
"""
open_interval = (None, None)
peaks, props = find_peaks(np.ones(10),
height=open_interval, threshold=open_interval,
prominence=open_interval, width=open_interval)
assert_(peaks.size == 0)
for key in self.property_keys:
assert_(props[key].size == 0)
def test_height_condition(self):
"""
Test height condition for peaks.
"""
x = (0., 1/3, 0., 2.5, 0, 4., 0)
peaks, props = find_peaks(x, height=(None, None))
assert_equal(peaks, np.array([1, 3, 5]))
assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]))
assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]))
assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]))
assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]))
def test_threshold_condition(self):
"""
Test threshold condition for peaks.
"""
x = (0, 2, 1, 4, -1)
peaks, props = find_peaks(x, threshold=(None, None))
assert_equal(peaks, np.array([1, 3]))
assert_equal(props['left_thresholds'], np.array([2, 3]))
assert_equal(props['right_thresholds'], np.array([1, 5]))
assert_equal(find_peaks(x, threshold=2)[0], np.array([3]))
assert_equal(find_peaks(x, threshold=3.5)[0], np.array([]))
assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]))
assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]))
assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([]))
def test_distance_condition(self):
"""
Test distance condition for peaks.
"""
# Peaks of different height with constant distance 3
peaks_all = np.arange(1, 21, 3)
x = np.zeros(21)
x[peaks_all] += np.linspace(1, 2, peaks_all.size)
# Test if peaks with "minimal" distance are still selected (distance = 3)
assert_equal(find_peaks(x, distance=3)[0], peaks_all)
# Select every second peak (distance > 3)
peaks_subset = find_peaks(x, distance=3.0001)[0]
# Test if peaks_subset is subset of peaks_all
assert_(
np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0
)
# Test if every second peak was removed
assert_equal(np.diff(peaks_subset), 6)
# Test priority of peak removal
x = [-2, 1, -1, 0, -3]
peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size
assert_(peaks_subset.size == 1 and peaks_subset[0] == 1)
def test_prominence_condition(self):
"""
Test prominence condition for peaks.
"""
x = np.linspace(0, 10, 100)
peaks_true = np.arange(1, 99, 2)
offset = np.linspace(1, 10, peaks_true.size)
x[peaks_true] += offset
prominences = x[peaks_true] - x[peaks_true + 1]
interval = (3, 9)
keep = np.where(
(interval[0] <= prominences) & (prominences <= interval[1]))
peaks_calc, properties = find_peaks(x, prominence=interval)
assert_equal(peaks_calc, peaks_true[keep])
assert_equal(properties['prominences'], prominences[keep])
assert_equal(properties['left_bases'], 0)
assert_equal(properties['right_bases'], peaks_true[keep] + 1)
def test_width_condition(self):
"""
Test width condition for peaks.
"""
x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0])
peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75)
assert_equal(peaks.size, 1)
assert_equal(peaks, 7)
assert_allclose(props['widths'], 1.35)
assert_allclose(props['width_heights'], 1.)
assert_allclose(props['left_ips'], 6.4)
assert_allclose(props['right_ips'], 7.75)
def test_properties(self):
"""
Test returned properties.
"""
open_interval = (None, None)
x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9]
peaks, props = find_peaks(x,
height=open_interval, threshold=open_interval,
prominence=open_interval, width=open_interval)
assert_(len(props) == len(self.property_keys))
for key in self.property_keys:
assert_(peaks.size == props[key].size)
def test_raises(self):
"""
Test exceptions raised by function.
"""
with raises(ValueError, match="dimension"):
find_peaks(np.array(1))
with raises(ValueError, match="dimension"):
find_peaks(np.ones((2, 2)))
with raises(ValueError, match="distance"):
find_peaks(np.arange(10), distance=-1)
class TestFindPeaksCwt(object):
def test_find_peaks_exact(self):
"""
Generate a series of gaussians and attempt to find the peak locations.
"""
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
num_points = 500
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
widths = np.arange(0.1, max(sigmas))
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,
min_length=None)
np.testing.assert_array_equal(found_locs, act_locs,
"Found maximum locations did not equal those expected")
def test_find_peaks_withnoise(self):
"""
Verify that peak locations are (approximately) found
for a series of gaussians with added noise.
"""
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
num_points = 500
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
widths = np.arange(0.1, max(sigmas))
noise_amp = 0.07
np.random.seed(18181911)
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
found_locs = find_peaks_cwt(test_data, widths, min_length=15,
gap_thresh=1, min_snr=noise_amp / 5)
np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' +
'of peaks found than expected')
diffs = np.abs(found_locs - act_locs)
max_diffs = np.array(sigmas) / 5
np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +
'by more than %s' % (max_diffs))
def test_find_peaks_nopeak(self):
"""
Verify that no peak is found in
data that's just noise.
"""
noise_amp = 1.0
num_points = 100
np.random.seed(181819141)
test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)
widths = np.arange(10, 50)
found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)
np.testing.assert_equal(len(found_locs), 0)
| 28,103 | 37.288828 | 89 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_spectral.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_approx_equal,
assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal_nulp)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy import signal, fftpack
from scipy.signal import (periodogram, welch, lombscargle, csd, coherence,
spectrogram, stft, istft, check_COLA)
from scipy.signal.spectral import _spectral_helper
class TestPeriodogram(object):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_odd(self):
x = np.zeros(15)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, scaling='spectrum')
g, q = periodogram(x, scaling='density')
assert_allclose(f, np.linspace(0, 0.5, 9))
assert_allclose(p, q/16.0)
def test_integer_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_integer_odd(self):
x = np.zeros(15, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = 5.0*np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_unk_scaling(self):
assert_raises(ValueError, periodogram, np.zeros(4, np.complex128),
scaling='foo')
def test_nd_axis_m1(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((2,1,10))
x[:,:,0] = 1.0
f, p = periodogram(x)
assert_array_equal(p.shape, (2, 1, 6))
assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60)
f0, p0 = periodogram(x[0,0,:])
assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)
def test_nd_axis_0(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((10,2,1))
x[0,:,:] = 1.0
f, p = periodogram(x, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60)
f0, p0 = periodogram(x[:,0,0])
assert_array_almost_equal_nulp(p0, p[:,1,0])
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, 10, 'hann')
win = signal.get_window('hann', 16)
fe, pe = periodogram(x, 10, win)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, periodogram, x,
10, win_err) # win longer than signal
def test_padded_fft(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
fp, pp = periodogram(x, nfft=32)
assert_allclose(f, fp[::2])
assert_allclose(p, pp[::2])
assert_array_equal(pp.shape, (17,))
def test_empty_input(self):
f, p = periodogram([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_nfft(self):
x = np.zeros(18)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_nfft_is_xshape(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9, 'f')
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(15, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8, 'f')
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16, 'f')/16.0
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = 5.0*np.ones(16, 'f')/16.0
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
class TestWelch(object):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, welch, np.zeros(4, np.complex128),
scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = welch(x, nperseg=10, detrend=False)
f2, p2 = welch(x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = welch(x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = welch(x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = welch(x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, 10, 'hann', nperseg=8)
win = signal.get_window('hann', 8)
fe, pe = welch(x, 10, win, nperseg=None)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
assert_array_equal(fe.shape, (5,)) # because win length used as nperseg
assert_array_equal(pe.shape, (5,))
assert_raises(ValueError, welch, x,
10, win, nperseg=4) # because nperseg != win.shape[-1]
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, welch, x,
10, win_err, nperseg=None) # win longer than signal
def test_empty_input(self):
f, p = welch([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = welch(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = welch(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8")
f, p = welch(x,window='hann') # default nperseg
f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg
f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f2)
assert_allclose(p1, p2)
def test_window_long_or_nd(self):
assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1]))
assert_raises(ValueError, welch, np.zeros(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = welch(x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
0.17072113], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
nfft = 24
f = fftpack.fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftpack.fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
def test_window_correction(self):
A = 20
fs = 1e4
nperseg = int(fs//10)
fsig = 300
ii = int(fsig*nperseg//fs) # Freq index of fsig
tt = np.arange(fs)/fs
x = A*np.sin(2*np.pi*fsig*tt)
for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']:
_, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window,
scaling='spectrum')
freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window,
scaling='density')
# Check peak height at signal frequency for 'spectrum'
assert_allclose(p_spec[ii], A**2/2.0)
# Check integrated spectrum RMS for 'density'
assert_allclose(np.sqrt(np.trapz(p_dens, freq)), A*np.sqrt(2)/2,
rtol=1e-3)
def test_axis_rolling(self):
np.random.seed(1234)
x_flat = np.random.randn(1024)
_, p_flat = welch(x_flat)
for a in range(3):
newshape = [1,]*3
newshape[a] = -1
x = x_flat.reshape(newshape)
_, p_plus = welch(x, axis=a) # Positive axis index
_, p_minus = welch(x, axis=a-x.ndim) # Negative axis index
assert_equal(p_flat, p_plus.squeeze(), err_msg=a)
assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim)
class TestCSD:
def test_pad_shorter_x(self):
x = np.zeros(8)
y = np.zeros(12)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_pad_shorter_y(self):
x = np.zeros(12)
y = np.zeros(8)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, csd, np.zeros(4, np.complex128),
np.ones(4, np.complex128), scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = csd(x, x, nperseg=10, detrend=False)
f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = csd(x, x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = csd(x, x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = csd(x, x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, 10, 'hann', 8)
win = signal.get_window('hann', 8)
fe, pe = csd(x, x, 10, win, nperseg=None)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
assert_array_equal(fe.shape, (5,)) # because win length used as nperseg
assert_array_equal(pe.shape, (5,))
assert_raises(ValueError, csd, x, x,
10, win, nperseg=256) # because nperseg != win.shape[-1]
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, csd, x, x,
10, win_err, nperseg=None) # because win longer than signal
def test_empty_input(self):
f, p = csd([],np.zeros(10))
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
f, p = csd(np.zeros(10),[])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = csd(np.empty(shape), np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
f, p = csd(np.ones(10), np.empty((5,0)))
assert_array_equal(f.shape, (5,0))
assert_array_equal(p.shape, (5,0))
f, p = csd(np.empty((5,0)), np.ones(10))
assert_array_equal(f.shape, (5,0))
assert_array_equal(p.shape, (5,0))
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = csd(np.empty(shape), np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1)
assert_array_equal(f.shape, (10,0,3))
assert_array_equal(p.shape, (10,0,3))
f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1)
assert_array_equal(f.shape, (10,0,3))
assert_array_equal(p.shape, (10,0,3))
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8")
f, p = csd(x, x, window='hann') # default nperseg
f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg
f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f2)
assert_allclose(p1, p2)
def test_window_long_or_nd(self):
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
np.array([1,1,1,1,1]))
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = csd(x, x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann',
2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3,
nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
0.17072113], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
y = np.ones(12)
nfft = 24
f = fftpack.fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
feven, _ = csd(x, y, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftpack.fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
feven, _ = csd(x, y, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
class TestCoherence(object):
def test_identical_input(self):
x = np.random.randn(20)
y = np.copy(x) # So `y is x` -> False
f = np.linspace(0, 0.5, 6)
C = np.ones(6)
f1, C1 = coherence(x, y, nperseg=10)
assert_allclose(f, f1)
assert_allclose(C, C1)
def test_phase_shifted_input(self):
x = np.random.randn(20)
y = -x
f = np.linspace(0, 0.5, 6)
C = np.ones(6)
f1, C1 = coherence(x, y, nperseg=10)
assert_allclose(f, f1)
assert_allclose(C, C1)
class TestSpectrogram(object):
def test_average_all_segments(self):
x = np.random.randn(1024)
fs = 1.0
window = ('tukey', 0.25)
nperseg = 16
noverlap = 2
f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
fw, Pw = welch(x, fs, window, nperseg, noverlap)
assert_allclose(f, fw)
assert_allclose(np.mean(P, axis=-1), Pw)
def test_window_external(self):
x = np.random.randn(1024)
fs = 1.0
window = ('tukey', 0.25)
nperseg = 16
noverlap = 2
f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
win = signal.get_window(('tukey', 0.25), 16)
fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2)
assert_array_equal(fe.shape, (9,)) # because win length used as nperseg
assert_array_equal(Pe.shape, (9,73))
assert_raises(ValueError, spectrogram, x,
fs, win, nperseg=8) # because nperseg != win.shape[-1]
win_err = signal.get_window(('tukey', 0.25), 2048)
assert_raises(ValueError, spectrogram, x,
fs, win_err, nperseg=None) # win longer than signal
def test_short_data(self):
x = np.random.randn(1024)
fs = 1.0
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
f, _, p = spectrogram(x, fs, window=('tukey',0.25)) # default nperseg
with suppress_warnings() as sup:
sup.filter(UserWarning,
"nperseg = 1025 is greater than input length = 1024, using nperseg = 1024")
f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25),
nperseg=1025) # user-specified nperseg
f2, _, p2 = spectrogram(x, fs, nperseg=256) # to compare w/default
f3, _, p3 = spectrogram(x, fs, nperseg=1024) # compare w/user-spec'd
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f3)
assert_allclose(p1, p3)
class TestLombscargle(object):
def test_frequency(self):
"""Test if frequency location of peak corresponds to frequency of
generated input signal.
"""
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
P = lombscargle(t, x, f)
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
delta = f[1] - f[0]
assert_(w - f[np.argmax(P)] < (delta/2.))
def test_amplitude(self):
# Test if height of peak in normalized Lomb-Scargle periodogram
# corresponds to amplitude of the generated input signal.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f)
# Normalize
pgram = np.sqrt(4 * pgram / t.shape[0])
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
assert_approx_equal(np.max(pgram), ampl, significant=2)
def test_precenter(self):
# Test if precenter gives the same result as manually precentering.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
offset = 0.15 # Offset to be subtracted in pre-centering
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi) + offset
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f, precenter=True)
pgram2 = lombscargle(t, x - x.mean(), f, precenter=False)
# check if centering worked
assert_allclose(pgram, pgram2)
def test_normalize(self):
# Test normalize option of Lomb-Scarge.
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f)
pgram2 = lombscargle(t, x, f, normalize=True)
# check if normalization works as expected
assert_allclose(pgram * 2 / np.dot(x, x), pgram2)
assert_approx_equal(np.max(pgram2), 1.0, significant=2)
def test_wrong_shape(self):
t = np.linspace(0, 1, 1)
x = np.linspace(0, 1, 2)
f = np.linspace(0, 1, 3)
assert_raises(ValueError, lombscargle, t, x, f)
def test_zero_division(self):
t = np.zeros(1)
x = np.zeros(1)
f = np.zeros(1)
assert_raises(ZeroDivisionError, lombscargle, t, x, f)
def test_lombscargle_atan_vs_atan2(self):
# https://github.com/scipy/scipy/issues/3787
# This raised a ZeroDivisionError.
t = np.linspace(0, 10, 1000, endpoint=False)
x = np.sin(4*t)
f = np.linspace(0, 50, 500, endpoint=False) + 0.1
q = lombscargle(t, x, f*2*np.pi)
class TestSTFT(object):
def test_input_validation(self):
assert_raises(ValueError, check_COLA, 'hann', -10, 0)
assert_raises(ValueError, check_COLA, 'hann', 10, 20)
assert_raises(ValueError, check_COLA, np.ones((2,2)), 10, 0)
assert_raises(ValueError, check_COLA, np.ones(20), 10, 0)
x = np.empty(1024)
z = stft(x)
assert_raises(ValueError, stft, x, window=np.ones((2,2)))
assert_raises(ValueError, stft, x, window=np.ones(10), nperseg=256)
assert_raises(ValueError, stft, x, nperseg=-256)
assert_raises(ValueError, stft, x, nperseg=256, noverlap=1024)
assert_raises(ValueError, stft, x, nperseg=256, nfft=8)
assert_raises(ValueError, istft, x) # Not 2d
assert_raises(ValueError, istft, z, window=np.ones((2,2)))
assert_raises(ValueError, istft, z, window=np.ones(10), nperseg=256)
assert_raises(ValueError, istft, z, nperseg=-256)
assert_raises(ValueError, istft, z, nperseg=256, noverlap=1024)
assert_raises(ValueError, istft, z, nperseg=256, nfft=8)
assert_raises(ValueError, istft, z, nperseg=256, noverlap=0,
window='hann') # Doesn't meet COLA
assert_raises(ValueError, istft, z, time_axis=0, freq_axis=0)
assert_raises(ValueError, _spectral_helper, x, x, mode='foo')
assert_raises(ValueError, _spectral_helper, x[:512], x[512:],
mode='stft')
assert_raises(ValueError, _spectral_helper, x, x, boundary='foo')
def test_check_COLA(self):
settings = [
('boxcar', 10, 0),
('boxcar', 10, 9),
('bartlett', 51, 26),
('hann', 256, 128),
('hann', 256, 192),
('blackman', 300, 200),
(('tukey', 0.5), 256, 64),
('hann', 256, 255),
]
for set in settings:
msg = '{0}, {1}, {2}'.format(*set)
assert_equal(True, check_COLA(*set), err_msg=msg)
def test_average_all_segments(self):
np.random.seed(1234)
x = np.random.randn(1024)
fs = 1.0
window = 'hann'
nperseg = 16
noverlap = 8
# Compare twosided, because onesided welch doubles non-DC terms to
# account for power at negative frequencies. stft doesn't do this,
# because it breaks invertibility.
f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False,
return_onesided=False, boundary=None)
fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False,
scaling='spectrum', detrend=False)
assert_allclose(f, fw)
assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw)
def test_permute_axes(self):
np.random.seed(1234)
x = np.random.randn(1024)
fs = 1.0
window = 'hann'
nperseg = 16
noverlap = 8
f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap)
f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap,
axis=0)
t3, x1 = istft(Z1, fs, window, nperseg, noverlap)
t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0,
freq_axis=-1)
assert_allclose(f1, f2)
assert_allclose(t1, t2)
assert_allclose(t3, t4)
assert_allclose(Z1, Z2[:, 0, 0, :])
assert_allclose(x1, x2[:, 0, 0])
def test_roundtrip_real(self):
np.random.seed(1234)
settings = [
('boxcar', 100, 10, 0), # Test no overlap
('boxcar', 100, 10, 9), # Test high overlap
('bartlett', 101, 51, 26), # Test odd nperseg
('hann', 1024, 256, 128), # Test defaults
(('tukey', 0.5), 1152, 256, 64), # Test Tukey
('hann', 1024, 256, 255), # Test overlapped hann
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window)
msg = '{0}, {1}'.format(window, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
@pytest.mark.xfail(reason="Needs complex rfft from fftpack, see gh-2487 + gh-6058")
def test_roundtrip_float32(self):
np.random.seed(1234)
settings = [('hann', 1024, 256, 128)]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
x = x.astype(np.float32)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window)
msg = '{0}, {1}'.format(window, noverlap)
assert_allclose(t, t, err_msg=msg)
assert_allclose(x, xr, err_msg=msg, rtol=1e-4)
assert_(x.dtype == xr.dtype)
def test_roundtrip_complex(self):
np.random.seed(1234)
settings = [
('boxcar', 100, 10, 0), # Test no overlap
('boxcar', 100, 10, 9), # Test high overlap
('bartlett', 101, 51, 26), # Test odd nperseg
('hann', 1024, 256, 128), # Test defaults
(('tukey', 0.5), 1152, 256, 64), # Test Tukey
('hann', 1024, 256, 255), # Test overlapped hann
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size) + 10j*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False,
return_onesided=False)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window, input_onesided=False)
msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
# Check that asking for onesided switches to twosided
with suppress_warnings() as sup:
sup.filter(UserWarning,
"Input data is complex, switching to return_onesided=False")
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=False,
return_onesided=True)
tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
window=window, input_onesided=False)
msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
def test_roundtrip_boundary_extension(self):
np.random.seed(1234)
# Test against boxcar, since window is all ones, and thus can be fully
# recovered with no boundary extension
settings = [
('boxcar', 100, 10, 0), # Test no overlap
('boxcar', 100, 10, 9), # Test high overlap
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=True,
boundary=None)
_, xr = istft(zz, noverlap=noverlap, window=window, boundary=False)
for boundary in ['even', 'odd', 'constant', 'zeros']:
_, _, zz_ext = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=True,
boundary=boundary)
_, xr_ext = istft(zz_ext, noverlap=noverlap, window=window,
boundary=True)
msg = '{0}, {1}, {2}'.format(window, noverlap, boundary)
assert_allclose(x, xr, err_msg=msg)
assert_allclose(x, xr_ext, err_msg=msg)
def test_roundtrip_padded_signal(self):
np.random.seed(1234)
settings = [
('boxcar', 101, 10, 0),
('hann', 1000, 256, 128),
]
for window, N, nperseg, noverlap in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
_, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
window=window, detrend=None, padded=True)
tr, xr = istft(zz, noverlap=noverlap, window=window)
msg = '{0}, {1}'.format(window, noverlap)
# Account for possible zero-padding at the end
assert_allclose(t, tr[:t.size], err_msg=msg)
assert_allclose(x, xr[:x.size], err_msg=msg)
def test_roundtrip_padded_FFT(self):
np.random.seed(1234)
settings = [
('hann', 1024, 256, 128, 512),
('hann', 1024, 256, 128, 501),
('boxcar', 100, 10, 0, 33),
(('tukey', 0.5), 1152, 256, 64, 1024),
]
for window, N, nperseg, noverlap, nfft in settings:
t = np.arange(N)
x = 10*np.random.randn(t.size)
xc = x*np.exp(1j*np.pi/4)
# real signal
_, _, z = stft(x, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
window=window, detrend=None, padded=True)
# complex signal
_, _, zc = stft(xc, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
window=window, detrend=None, padded=True,
return_onesided=False)
tr, xr = istft(z, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
window=window)
tr, xcr = istft(zc, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
window=window, input_onesided=False)
msg = '{0}, {1}'.format(window, noverlap)
assert_allclose(t, tr, err_msg=msg)
assert_allclose(x, xr, err_msg=msg)
assert_allclose(xc, xcr, err_msg=msg)
def test_axis_rolling(self):
np.random.seed(1234)
x_flat = np.random.randn(1024)
_, _, z_flat = stft(x_flat)
for a in range(3):
newshape = [1,]*3
newshape[a] = -1
x = x_flat.reshape(newshape)
_, _, z_plus = stft(x, axis=a) # Positive axis index
_, _, z_minus = stft(x, axis=a-x.ndim) # Negative axis index
assert_equal(z_flat, z_plus.squeeze(), err_msg=a)
assert_equal(z_flat, z_minus.squeeze(), err_msg=a-x.ndim)
# z_flat has shape [n_freq, n_time]
# Test vs. transpose
_, x_transpose_m = istft(z_flat.T, time_axis=-2, freq_axis=-1)
_, x_transpose_p = istft(z_flat.T, time_axis=0, freq_axis=1)
assert_allclose(x_flat, x_transpose_m, err_msg='istft transpose minus')
assert_allclose(x_flat, x_transpose_p, err_msg='istft transpose plus')
| 49,326 | 35.32327 | 105 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_wavelets.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, \
assert_array_equal, assert_array_almost_equal, assert_array_less, assert_
from scipy._lib.six import xrange
from scipy.signal import wavelets
class TestWavelets(object):
def test_qmf(self):
assert_array_equal(wavelets.qmf([1, 1]), [1, -1])
def test_daub(self):
for i in xrange(1, 15):
assert_equal(len(wavelets.daub(i)), i * 2)
def test_cascade(self):
for J in xrange(1, 7):
for i in xrange(1, 5):
lpcoef = wavelets.daub(i)
k = len(lpcoef)
x, phi, psi = wavelets.cascade(lpcoef, J)
assert_(len(x) == len(phi) == len(psi))
assert_equal(len(x), (k - 1) * 2 ** J)
def test_morlet(self):
x = wavelets.morlet(50, 4.1, complete=True)
y = wavelets.morlet(50, 4.1, complete=False)
# Test if complete and incomplete wavelet have same lengths:
assert_equal(len(x), len(y))
# Test if complete wavelet is less than incomplete wavelet:
assert_array_less(x, y)
x = wavelets.morlet(10, 50, complete=False)
y = wavelets.morlet(10, 50, complete=True)
# For large widths complete and incomplete wavelets should be
# identical within numerical precision:
assert_equal(x, y)
# miscellaneous tests:
x = np.array([1.73752399e-09 + 9.84327394e-25j,
6.49471756e-01 + 0.00000000e+00j,
1.73752399e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=True)
assert_array_almost_equal(x, y)
x = np.array([2.00947715e-09 + 9.84327394e-25j,
7.51125544e-01 + 0.00000000e+00j,
2.00947715e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=False)
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=True)
y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=True)
y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=True)
y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=False)
assert_array_almost_equal(x, y, decimal=2)
y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
def test_ricker(self):
w = wavelets.ricker(1.0, 1)
expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))
assert_array_equal(w, expected)
lengths = [5, 11, 15, 51, 101]
for length in lengths:
w = wavelets.ricker(length, 1.0)
assert_(len(w) == length)
max_loc = np.argmax(w)
assert_(max_loc == (length // 2))
points = 100
w = wavelets.ricker(points, 2.0)
half_vec = np.arange(0, points // 2)
#Wavelet should be symmetric
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
#Check zeros
aas = [5, 10, 15, 20, 30]
points = 99
for a in aas:
w = wavelets.ricker(points, a)
vec = np.arange(0, points) - (points - 1.0) / 2
exp_zero1 = np.argmin(np.abs(vec - a))
exp_zero2 = np.argmin(np.abs(vec + a))
assert_array_almost_equal(w[exp_zero1], 0)
assert_array_almost_equal(w[exp_zero2], 0)
def test_cwt(self):
widths = [1.0]
delta_wavelet = lambda s, t: np.array([1])
len_data = 100
test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)
#Test delta function input gives same data as output
cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths)
assert_(cwt_dat.shape == (len(widths), len_data))
assert_array_almost_equal(test_data, cwt_dat.flatten())
#Check proper shape on output
widths = [1, 3, 4, 5, 10]
cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths)
assert_(cwt_dat.shape == (len(widths), len_data))
widths = [len_data * 10]
#Note: this wavelet isn't defined quite right, but is fine for this test
flat_wavelet = lambda l, w: np.ones(w) / w
cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths)
assert_array_almost_equal(cwt_dat, np.mean(test_data))
| 5,192 | 38.045113 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_windows.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose,
assert_equal, assert_, assert_array_less)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy import fftpack
from scipy.signal import windows, get_window, resample, hann as dep_hann
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('hanning', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('dpss', (2,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('slepian', (2,)),
('cosine', ()),
('hann', ()),
('exponential', ()),
('tukey', (0.5,)),
]
class TestBartHann(object):
def test_basic(self):
assert_allclose(windows.barthann(6, sym=True),
[0, 0.35857354213752, 0.8794264578624801,
0.8794264578624801, 0.3585735421375199, 0])
assert_allclose(windows.barthann(7),
[0, 0.27, 0.73, 1.0, 0.73, 0.27, 0])
assert_allclose(windows.barthann(6, False),
[0, 0.27, 0.73, 1.0, 0.73, 0.27])
class TestBartlett(object):
def test_basic(self):
assert_allclose(windows.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0])
assert_allclose(windows.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0])
assert_allclose(windows.bartlett(6, False),
[0, 1/3, 2/3, 1.0, 2/3, 1/3])
class TestBlackman(object):
def test_basic(self):
assert_allclose(windows.blackman(6, sym=False),
[0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14)
assert_allclose(windows.blackman(7, sym=False),
[0, 0.09045342435412804, 0.4591829575459636,
0.9203636180999081, 0.9203636180999081,
0.4591829575459636, 0.09045342435412804], atol=1e-8)
assert_allclose(windows.blackman(6),
[0, 0.2007701432625305, 0.8492298567374694,
0.8492298567374694, 0.2007701432625305, 0],
atol=1e-14)
assert_allclose(windows.blackman(7, True),
[0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14)
class TestBlackmanHarris(object):
def test_basic(self):
assert_allclose(windows.blackmanharris(6, False),
[6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])
assert_allclose(windows.blackmanharris(7, sym=False),
[6.0e-05, 0.03339172347815117, 0.332833504298565,
0.8893697722232837, 0.8893697722232838,
0.3328335042985652, 0.03339172347815122])
assert_allclose(windows.blackmanharris(6),
[6.0e-05, 0.1030114893456638, 0.7938335106543362,
0.7938335106543364, 0.1030114893456638, 6.0e-05])
assert_allclose(windows.blackmanharris(7, sym=True),
[6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645,
6.0e-05])
class TestBohman(object):
def test_basic(self):
assert_allclose(windows.bohman(6),
[0, 0.1791238937062839, 0.8343114522576858,
0.8343114522576858, 0.1791238937062838, 0])
assert_allclose(windows.bohman(7, sym=True),
[0, 0.1089977810442293, 0.6089977810442293, 1.0,
0.6089977810442295, 0.1089977810442293, 0])
assert_allclose(windows.bohman(6, False),
[0, 0.1089977810442293, 0.6089977810442293, 1.0,
0.6089977810442295, 0.1089977810442293])
class TestBoxcar(object):
def test_basic(self):
assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1])
assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1])
assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1])
cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938])
cheb_even_true = array([0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894])
class TestChebWin(object):
def test_basic(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
assert_allclose(windows.chebwin(6, 100),
[0.1046401879356917, 0.5075781475823447, 1.0, 1.0,
0.5075781475823447, 0.1046401879356917])
assert_allclose(windows.chebwin(7, 100),
[0.05650405062850233, 0.316608530648474,
0.7601208123539079, 1.0, 0.7601208123539079,
0.316608530648474, 0.05650405062850233])
assert_allclose(windows.chebwin(6, 10),
[1.0, 0.6071201674458373, 0.6808391469897297,
0.6808391469897297, 0.6071201674458373, 1.0])
assert_allclose(windows.chebwin(7, 10),
[1.0, 0.5190521247588651, 0.5864059018130382,
0.6101519801307441, 0.5864059018130382,
0.5190521247588651, 1.0])
assert_allclose(windows.chebwin(6, 10, False),
[1.0, 0.5190521247588651, 0.5864059018130382,
0.6101519801307441, 0.5864059018130382,
0.5190521247588651])
def test_cheb_odd_high_attenuation(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_odd = windows.chebwin(53, at=-40)
assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
def test_cheb_even_high_attenuation(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_even = windows.chebwin(54, at=40)
assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
def test_cheb_odd_low_attenuation(self):
cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,
0.610151, 0.586405, 0.519052,
1.000000])
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_odd = windows.chebwin(7, at=10)
assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
def test_cheb_even_low_attenuation(self):
cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,
0.541338, 0.541338, 0.51027,
0.451924, 1.000000])
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
cheb_even = windows.chebwin(8, at=-10)
assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
exponential_data = {
(4, None, 0.2, False):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03]),
(4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988,
0.0820849986238988, 0.00055308437014783]),
(4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342,
0.60653065971263342, 0.22313016014842982]),
(4, 2, 0.2, False):
array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03]),
(4, 2, 0.2, True): None,
(4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, 2, 1.0, True): None,
(5, None, 0.2, True):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 0.2, True): None,
(5, 2, 1.0, True): None
}
def test_exponential():
for k, v in exponential_data.items():
if v is None:
assert_raises(ValueError, windows.exponential, *k)
else:
win = windows.exponential(*k)
assert_allclose(win, v, rtol=1e-14)
class TestFlatTop(object):
def test_basic(self):
assert_allclose(windows.flattop(6, sym=False),
[-0.000421051, -0.051263156, 0.19821053, 1.0,
0.19821053, -0.051263156])
assert_allclose(windows.flattop(7, sym=False),
[-0.000421051, -0.03684078115492348,
0.01070371671615342, 0.7808739149387698,
0.7808739149387698, 0.01070371671615342,
-0.03684078115492348])
assert_allclose(windows.flattop(6),
[-0.000421051, -0.0677142520762119, 0.6068721525762117,
0.6068721525762117, -0.0677142520762119,
-0.000421051])
assert_allclose(windows.flattop(7, True),
[-0.000421051, -0.051263156, 0.19821053, 1.0,
0.19821053, -0.051263156, -0.000421051])
class TestGaussian(object):
def test_basic(self):
assert_allclose(windows.gaussian(6, 1.0),
[0.04393693362340742, 0.3246524673583497,
0.8824969025845955, 0.8824969025845955,
0.3246524673583497, 0.04393693362340742])
assert_allclose(windows.gaussian(7, 1.2),
[0.04393693362340742, 0.2493522087772962,
0.7066482778577162, 1.0, 0.7066482778577162,
0.2493522087772962, 0.04393693362340742])
assert_allclose(windows.gaussian(7, 3),
[0.6065306597126334, 0.8007374029168081,
0.9459594689067654, 1.0, 0.9459594689067654,
0.8007374029168081, 0.6065306597126334])
assert_allclose(windows.gaussian(6, 3, False),
[0.6065306597126334, 0.8007374029168081,
0.9459594689067654, 1.0, 0.9459594689067654,
0.8007374029168081])
class TestGeneralCosine(object):
def test_basic(self):
assert_allclose(windows.general_cosine(5, [0.5, 0.3, 0.2]),
[0.4, 0.3, 1, 0.3, 0.4])
assert_allclose(windows.general_cosine(4, [0.5, 0.3, 0.2], sym=False),
[0.4, 0.3, 1, 0.3])
class TestGeneralHamming(object):
def test_basic(self):
assert_allclose(windows.general_hamming(5, 0.7),
[0.4, 0.7, 1.0, 0.7, 0.4])
assert_allclose(windows.general_hamming(5, 0.75, sym=False),
[0.5, 0.6727457514, 0.9522542486,
0.9522542486, 0.6727457514])
assert_allclose(windows.general_hamming(6, 0.75, sym=True),
[0.5, 0.6727457514, 0.9522542486,
0.9522542486, 0.6727457514, 0.5])
class TestHamming(object):
def test_basic(self):
assert_allclose(windows.hamming(6, False),
[0.08, 0.31, 0.77, 1.0, 0.77, 0.31])
assert_allclose(windows.hamming(7, sym=False),
[0.08, 0.2531946911449826, 0.6423596296199047,
0.9544456792351128, 0.9544456792351128,
0.6423596296199047, 0.2531946911449826])
assert_allclose(windows.hamming(6),
[0.08, 0.3978521825875242, 0.9121478174124757,
0.9121478174124757, 0.3978521825875242, 0.08])
assert_allclose(windows.hamming(7, sym=True),
[0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08])
class TestHann(object):
def test_basic(self):
assert_allclose(windows.hann(6, sym=False),
[0, 0.25, 0.75, 1.0, 0.75, 0.25])
assert_allclose(windows.hann(7, sym=False),
[0, 0.1882550990706332, 0.6112604669781572,
0.9504844339512095, 0.9504844339512095,
0.6112604669781572, 0.1882550990706332])
assert_allclose(windows.hann(6, True),
[0, 0.3454915028125263, 0.9045084971874737,
0.9045084971874737, 0.3454915028125263, 0])
assert_allclose(windows.hann(7),
[0, 0.25, 0.75, 1.0, 0.75, 0.25, 0])
class TestKaiser(object):
def test_basic(self):
assert_allclose(windows.kaiser(6, 0.5),
[0.9403061933191572, 0.9782962393705389,
0.9975765035372042, 0.9975765035372042,
0.9782962393705389, 0.9403061933191572])
assert_allclose(windows.kaiser(7, 0.5),
[0.9403061933191572, 0.9732402256999829,
0.9932754654413773, 1.0, 0.9932754654413773,
0.9732402256999829, 0.9403061933191572])
assert_allclose(windows.kaiser(6, 2.7),
[0.2603047507678832, 0.6648106293528054,
0.9582099802511439, 0.9582099802511439,
0.6648106293528054, 0.2603047507678832])
assert_allclose(windows.kaiser(7, 2.7),
[0.2603047507678832, 0.5985765418119844,
0.8868495172060835, 1.0, 0.8868495172060835,
0.5985765418119844, 0.2603047507678832])
assert_allclose(windows.kaiser(6, 2.7, False),
[0.2603047507678832, 0.5985765418119844,
0.8868495172060835, 1.0, 0.8868495172060835,
0.5985765418119844])
class TestNuttall(object):
def test_basic(self):
assert_allclose(windows.nuttall(6, sym=False),
[0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345])
assert_allclose(windows.nuttall(7, sym=False),
[0.0003628, 0.03777576895352025, 0.3427276199688195,
0.8918518610776603, 0.8918518610776603,
0.3427276199688196, 0.0377757689535203])
assert_allclose(windows.nuttall(6),
[0.0003628, 0.1105152530498718, 0.7982580969501282,
0.7982580969501283, 0.1105152530498719, 0.0003628])
assert_allclose(windows.nuttall(7, True),
[0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345, 0.0003628])
class TestParzen(object):
def test_basic(self):
assert_allclose(windows.parzen(6),
[0.009259259259259254, 0.25, 0.8611111111111112,
0.8611111111111112, 0.25, 0.009259259259259254])
assert_allclose(windows.parzen(7, sym=True),
[0.00583090379008747, 0.1574344023323616,
0.6501457725947521, 1.0, 0.6501457725947521,
0.1574344023323616, 0.00583090379008747])
assert_allclose(windows.parzen(6, False),
[0.00583090379008747, 0.1574344023323616,
0.6501457725947521, 1.0, 0.6501457725947521,
0.1574344023323616])
class TestTriang(object):
def test_basic(self):
assert_allclose(windows.triang(6, True),
[1/6, 1/2, 5/6, 5/6, 1/2, 1/6])
assert_allclose(windows.triang(7),
[1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4])
assert_allclose(windows.triang(6, sym=False),
[1/4, 1/2, 3/4, 1, 3/4, 1/2])
tukey_data = {
(4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]),
(4, 0.9, True): array([0.0, 0.84312081893436686,
0.84312081893436686, 0.0]),
(4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]),
(4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]),
(4, 0.9, False): array([0.0, 0.58682408883346526,
1.0, 0.58682408883346526]),
(4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]),
(5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]),
(5, 0.8, True): array([0.0, 0.69134171618254492,
1.0, 0.69134171618254492, 0.0]),
(5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]),
(6, 0): [1, 1, 1, 1, 1, 1],
(7, 0): [1, 1, 1, 1, 1, 1, 1],
(6, .25): [0, 1, 1, 1, 1, 0],
(7, .25): [0, 1, 1, 1, 1, 1, 0],
(6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0],
(7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0],
(6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0],
(7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0,
0.9698463103929542, 0.4131759111665347, 0],
(6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737,
0.3454915028125263, 0],
(7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0],
}
class TestTukey(object):
def test_basic(self):
# Test against hardcoded data
for k, v in tukey_data.items():
if v is None:
assert_raises(ValueError, windows.tukey, *k)
else:
win = windows.tukey(*k)
assert_allclose(win, v, rtol=1e-14)
def test_extremes(self):
# Test extremes of alpha correspond to boxcar and hann
tuk0 = windows.tukey(100, 0)
box0 = windows.boxcar(100)
assert_array_almost_equal(tuk0, box0)
tuk1 = windows.tukey(100, 1)
han1 = windows.hann(100)
assert_array_almost_equal(tuk1, han1)
dpss_data = {
# All values from MATLAB:
# * taper[1] of (3, 1.4, 3) sign-flipped
# * taper[3] of (5, 1.5, 5) sign-flipped
(4, 0.1, 2): ([[0.497943898, 0.502047681, 0.502047681, 0.497943898], [0.670487993, 0.224601537, -0.224601537, -0.670487993]], [0.197961815, 0.002035474]), # noqa
(3, 1.4, 3): ([[0.410233151, 0.814504464, 0.410233151], [0.707106781, 0.0, -0.707106781], [0.575941629, -0.580157287, 0.575941629]], [0.999998093, 0.998067480, 0.801934426]), # noqa
(5, 1.5, 5): ([[0.1745071052, 0.4956749177, 0.669109327, 0.495674917, 0.174507105], [0.4399493348, 0.553574369, 0.0, -0.553574369, -0.439949334], [0.631452756, 0.073280238, -0.437943884, 0.073280238, 0.631452756], [0.553574369, -0.439949334, 0.0, 0.439949334, -0.553574369], [0.266110290, -0.498935248, 0.600414741, -0.498935248, 0.266110290147157]], [0.999728571, 0.983706916, 0.768457889, 0.234159338, 0.013947282907567]), # noqa: E501
(100, 2, 4): ([[0.0030914414, 0.0041266922, 0.005315076, 0.006665149, 0.008184854, 0.0098814158, 0.011761239, 0.013829809, 0.016091597, 0.018549973, 0.02120712, 0.02406396, 0.027120092, 0.030373728, 0.033821651, 0.037459181, 0.041280145, 0.045276872, 0.049440192, 0.053759447, 0.058222524, 0.062815894, 0.067524661, 0.072332638, 0.077222418, 0.082175473, 0.087172252, 0.092192299, 0.097214376, 0.1022166, 0.10717657, 0.11207154, 0.11687856, 0.12157463, 0.12613686, 0.13054266, 0.13476986, 0.13879691, 0.14260302, 0.14616832, 0.14947401, 0.1525025, 0.15523755, 0.15766438, 0.15976981, 0.16154233, 0.16297223, 0.16405162, 0.16477455, 0.16513702, 0.16513702, 0.16477455, 0.16405162, 0.16297223, 0.16154233, 0.15976981, 0.15766438, 0.15523755, 0.1525025, 0.14947401, 0.14616832, 0.14260302, 0.13879691, 0.13476986, 0.13054266, 0.12613686, 0.12157463, 0.11687856, 0.11207154, 0.10717657, 0.1022166, 0.097214376, 0.092192299, 0.087172252, 0.082175473, 0.077222418, 0.072332638, 0.067524661, 0.062815894, 0.058222524, 0.053759447, 0.049440192, 0.045276872, 0.041280145, 0.037459181, 0.033821651, 0.030373728, 0.027120092, 0.02406396, 0.02120712, 0.018549973, 0.016091597, 0.013829809, 0.011761239, 0.0098814158, 0.008184854, 0.006665149, 0.005315076, 0.0041266922, 0.0030914414], [0.018064449, 0.022040342, 0.026325013, 0.030905288, 0.035764398, 0.040881982, 0.046234148, 0.051793558, 0.057529559, 0.063408356, 0.069393216, 0.075444716, 0.081521022, 0.087578202, 0.093570567, 0.099451049, 0.10517159, 0.11068356, 0.11593818, 0.12088699, 0.12548227, 0.12967752, 0.1334279, 0.13669069, 0.13942569, 0.1415957, 0.14316686, 0.14410905, 0.14439626, 0.14400686, 0.14292389, 0.1411353, 0.13863416, 0.13541876, 0.13149274, 0.12686516, 0.12155045, 0.1155684, 0.10894403, 0.10170748, 0.093893752, 0.08554251, 0.076697768, 0.067407559, 0.057723559, 0.04770068, 0.037396627, 0.026871428, 0.016186944, 0.0054063557, -0.0054063557, -0.016186944, -0.026871428, -0.037396627, -0.04770068, -0.057723559, -0.067407559, -0.076697768, -0.08554251, -0.093893752, -0.10170748, -0.10894403, -0.1155684, -0.12155045, -0.12686516, -0.13149274, -0.13541876, -0.13863416, -0.1411353, -0.14292389, -0.14400686, -0.14439626, -0.14410905, -0.14316686, -0.1415957, -0.13942569, -0.13669069, -0.1334279, -0.12967752, -0.12548227, -0.12088699, -0.11593818, -0.11068356, -0.10517159, -0.099451049, -0.093570567, -0.087578202, -0.081521022, -0.075444716, -0.069393216, -0.063408356, -0.057529559, -0.051793558, -0.046234148, -0.040881982, -0.035764398, -0.030905288, -0.026325013, -0.022040342, -0.018064449], [0.064817553, 0.072567801, 0.080292992, 0.087918235, 0.095367076, 0.10256232, 0.10942687, 0.1158846, 0.12186124, 0.12728523, 0.13208858, 0.13620771, 0.13958427, 0.14216587, 0.14390678, 0.14476863, 0.1447209, 0.14374148, 0.14181704, 0.13894336, 0.13512554, 0.13037812, 0.1247251, 0.11819984, 0.11084487, 0.10271159, 0.093859853, 0.084357497, 0.074279719, 0.063708406, 0.052731374, 0.041441525, 0.029935953, 0.018314987, 0.0066811877, -0.0048616765, -0.016209689, -0.027259848, -0.037911124, -0.048065512, -0.05762905, -0.066512804, -0.0746338, -0.081915903, -0.088290621, -0.09369783, -0.098086416, -0.10141482, -0.10365146, -0.10477512, -0.10477512, -0.10365146, -0.10141482, -0.098086416, -0.09369783, -0.088290621, -0.081915903, -0.0746338, -0.066512804, -0.05762905, -0.048065512, -0.037911124, -0.027259848, -0.016209689, -0.0048616765, 0.0066811877, 0.018314987, 0.029935953, 0.041441525, 0.052731374, 0.063708406, 0.074279719, 0.084357497, 0.093859853, 0.10271159, 0.11084487, 0.11819984, 0.1247251, 0.13037812, 0.13512554, 0.13894336, 0.14181704, 0.14374148, 0.1447209, 0.14476863, 0.14390678, 0.14216587, 0.13958427, 0.13620771, 0.13208858, 0.12728523, 0.12186124, 0.1158846, 0.10942687, 0.10256232, 0.095367076, 0.087918235, 0.080292992, 0.072567801, 0.064817553], [0.14985551, 0.15512305, 0.15931467, 0.16236806, 0.16423291, 0.16487165, 0.16426009, 0.1623879, 0.1592589, 0.15489114, 0.14931693, 0.14258255, 0.13474785, 0.1258857, 0.11608124, 0.10543095, 0.094041635, 0.082029213, 0.069517411, 0.056636348, 0.043521028, 0.030309756, 0.017142511, 0.0041592774, -0.0085016282, -0.020705223, -0.032321494, -0.043226982, -0.053306291, -0.062453515, -0.070573544, -0.077583253, -0.083412547, -0.088005244, -0.091319802, -0.093329861, -0.094024602, -0.093408915, -0.091503383, -0.08834406, -0.08398207, -0.078483012, -0.071926192, -0.064403681, -0.056019215, -0.046886954, -0.037130106, -0.026879442, -0.016271713, -0.005448, 0.005448, 0.016271713, 0.026879442, 0.037130106, 0.046886954, 0.056019215, 0.064403681, 0.071926192, 0.078483012, 0.08398207, 0.08834406, 0.091503383, 0.093408915, 0.094024602, 0.093329861, 0.091319802, 0.088005244, 0.083412547, 0.077583253, 0.070573544, 0.062453515, 0.053306291, 0.043226982, 0.032321494, 0.020705223, 0.0085016282, -0.0041592774, -0.017142511, -0.030309756, -0.043521028, -0.056636348, -0.069517411, -0.082029213, -0.094041635, -0.10543095, -0.11608124, -0.1258857, -0.13474785, -0.14258255, -0.14931693, -0.15489114, -0.1592589, -0.1623879, -0.16426009, -0.16487165, -0.16423291, -0.16236806, -0.15931467, -0.15512305, -0.14985551]], [0.999943140, 0.997571533, 0.959465463, 0.721862496]), # noqa: E501
}
class TestDPSS(object):
def test_basic(self):
# Test against hardcoded data
for k, v in dpss_data.items():
win, ratios = windows.dpss(*k, return_ratios=True)
assert_allclose(win, v[0], atol=1e-7, err_msg=k)
assert_allclose(ratios, v[1], rtol=1e-5, atol=1e-7, err_msg=k)
def test_unity(self):
# Test unity value handling (gh-2221)
for M in range(1, 21):
# corrected w/approximation (default)
win = windows.dpss(M, M / 2.1)
expected = M % 2 # one for odd, none for even
assert_equal(np.isclose(win, 1.).sum(), expected,
err_msg='%s' % (win,))
# corrected w/subsample delay (slower)
win_sub = windows.dpss(M, M / 2.1, norm='subsample')
if M > 2:
# @M=2 the subsample doesn't do anything
assert_equal(np.isclose(win_sub, 1.).sum(), expected,
err_msg='%s' % (win_sub,))
assert_allclose(win, win_sub, rtol=0.03) # within 3%
# not the same, l2-norm
win_2 = windows.dpss(M, M / 2.1, norm=2)
expected = 1 if M == 1 else 0
assert_equal(np.isclose(win_2, 1.).sum(), expected,
err_msg='%s' % (win_2,))
def test_extremes(self):
# Test extremes of alpha
lam = windows.dpss(31, 6, 4, return_ratios=True)[1]
assert_array_almost_equal(lam, 1.)
lam = windows.dpss(31, 7, 4, return_ratios=True)[1]
assert_array_almost_equal(lam, 1.)
lam = windows.dpss(31, 8, 4, return_ratios=True)[1]
assert_array_almost_equal(lam, 1.)
def test_degenerate(self):
# Test failures
assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax
assert_raises(ValueError, windows.dpss, 4, 1.5, -5)
assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1)
assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2.
assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos
assert_raises(ValueError, windows.dpss, 3, 0, 3)
assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M
class TestGetWindow(object):
def test_boxcar(self):
w = windows.get_window('boxcar', 12)
assert_array_equal(w, np.ones_like(w))
# window is a tuple of len 1
w = windows.get_window(('boxcar',), 16)
assert_array_equal(w, np.ones_like(w))
def test_cheb_odd(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
w = windows.get_window(('chebwin', -40), 53, fftbins=False)
assert_array_almost_equal(w, cheb_odd_true, decimal=4)
def test_cheb_even(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
w = windows.get_window(('chebwin', 40), 54, fftbins=False)
assert_array_almost_equal(w, cheb_even_true, decimal=4)
def test_kaiser_float(self):
win1 = windows.get_window(7.2, 64)
win2 = windows.kaiser(64, 7.2, False)
assert_allclose(win1, win2)
def test_invalid_inputs(self):
# Window is not a float, tuple, or string
assert_raises(ValueError, windows.get_window, set('hann'), 8)
# Unknown window type error
assert_raises(ValueError, windows.get_window, 'broken', 4)
def test_array_as_window(self):
# github issue 3603
osfactor = 128
sig = np.arange(128)
win = windows.get_window(('kaiser', 8.0), osfactor // 2)
assert_raises(ValueError, resample,
(sig, len(sig) * osfactor), {'window': win})
def test_windowfunc_basics():
for window_name, params in window_funcs:
window = getattr(windows, window_name)
with suppress_warnings() as sup:
sup.filter(UserWarning, "This window is not suitable")
if window_name in ('slepian', 'hanning'):
sup.filter(DeprecationWarning)
# Check symmetry for odd and even lengths
w1 = window(8, *params, sym=True)
w2 = window(7, *params, sym=False)
assert_array_almost_equal(w1[:-1], w2)
w1 = window(9, *params, sym=True)
w2 = window(8, *params, sym=False)
assert_array_almost_equal(w1[:-1], w2)
# Check that functions run and output lengths are correct
assert_equal(len(window(6, *params, sym=True)), 6)
assert_equal(len(window(6, *params, sym=False)), 6)
assert_equal(len(window(7, *params, sym=True)), 7)
assert_equal(len(window(7, *params, sym=False)), 7)
# Check invalid lengths
assert_raises(ValueError, window, 5.5, *params)
assert_raises(ValueError, window, -7, *params)
# Check degenerate cases
assert_array_equal(window(0, *params, sym=True), [])
assert_array_equal(window(0, *params, sym=False), [])
assert_array_equal(window(1, *params, sym=True), [1])
assert_array_equal(window(1, *params, sym=False), [1])
# Check dtype
assert_(window(0, *params, sym=True).dtype == 'float')
assert_(window(0, *params, sym=False).dtype == 'float')
assert_(window(1, *params, sym=True).dtype == 'float')
assert_(window(1, *params, sym=False).dtype == 'float')
assert_(window(6, *params, sym=True).dtype == 'float')
assert_(window(6, *params, sym=False).dtype == 'float')
# Check normalization
assert_array_less(window(10, *params, sym=True), 1.01)
assert_array_less(window(10, *params, sym=False), 1.01)
assert_array_less(window(9, *params, sym=True), 1.01)
assert_array_less(window(9, *params, sym=False), 1.01)
# Check that DFT-even spectrum is purely real for odd and even
assert_allclose(fftpack.fft(window(10, *params, sym=False)).imag,
0, atol=1e-14)
assert_allclose(fftpack.fft(window(11, *params, sym=False)).imag,
0, atol=1e-14)
def test_needs_params():
for winstr in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss', 'dpss',
'chebwin', 'cheb', 'exponential', 'poisson', 'tukey',
'tuk', 'dpss']:
assert_raises(ValueError, get_window, winstr, 7)
def test_deprecation():
if dep_hann.__doc__ is not None: # can be None with `-OO` mode
assert_('signal.hann is deprecated' in dep_hann.__doc__)
assert_('deprecated' not in windows.hann.__doc__)
| 33,134 | 51.263407 | 5,222 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_upfirdn.py
|
# Code adapted from "upfirdn" python library with permission:
#
# Copyright (c) 2009, Motorola, Inc
#
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Motorola nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from itertools import product
from numpy.testing import assert_equal, assert_allclose
from pytest import raises as assert_raises
from scipy.signal import upfirdn, firwin, lfilter
from scipy.signal._upfirdn import _output_len
def upfirdn_naive(x, h, up=1, down=1):
"""Naive upfirdn processing in Python
Note: arg order (x, h) differs to facilitate apply_along_axis use.
"""
h = np.asarray(h)
out = np.zeros(len(x) * up, x.dtype)
out[::up] = x
out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)]
return out
class UpFIRDnCase(object):
"""Test _UpFIRDn object"""
def __init__(self, up, down, h, x_dtype):
self.up = up
self.down = down
self.h = np.atleast_1d(h)
self.x_dtype = x_dtype
self.rng = np.random.RandomState(17)
def __call__(self):
# tiny signal
self.scrub(np.ones(1, self.x_dtype))
# ones
self.scrub(np.ones(10, self.x_dtype)) # ones
# randn
x = self.rng.randn(10).astype(self.x_dtype)
if self.x_dtype in (np.complex64, np.complex128):
x += 1j * self.rng.randn(10)
self.scrub(x)
# ramp
self.scrub(np.arange(10).astype(self.x_dtype))
# 3D, random
size = (2, 3, 5)
x = self.rng.randn(*size).astype(self.x_dtype)
if self.x_dtype in (np.complex64, np.complex128):
x += 1j * self.rng.randn(*size)
for axis in range(len(size)):
self.scrub(x, axis=axis)
x = x[:, ::2, 1::3].T
for axis in range(len(size)):
self.scrub(x, axis=axis)
def scrub(self, x, axis=-1):
yr = np.apply_along_axis(upfirdn_naive, axis, x,
self.h, self.up, self.down)
y = upfirdn(self.h, x, self.up, self.down, axis=axis)
dtypes = (self.h.dtype, x.dtype)
if all(d == np.complex64 for d in dtypes):
assert_equal(y.dtype, np.complex64)
elif np.complex64 in dtypes and np.float32 in dtypes:
assert_equal(y.dtype, np.complex64)
elif all(d == np.float32 for d in dtypes):
assert_equal(y.dtype, np.float32)
elif np.complex128 in dtypes or np.complex64 in dtypes:
assert_equal(y.dtype, np.complex128)
else:
assert_equal(y.dtype, np.float64)
assert_allclose(yr, y)
class TestUpfirdn(object):
def test_valid_input(self):
assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1
assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1
assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1)
def test_vs_lfilter(self):
# Check that up=1.0 gives same answer as lfilter + slicing
random_state = np.random.RandomState(17)
try_types = (int, np.float32, np.complex64, float, complex)
size = 10000
down_factors = [2, 11, 79]
for dtype in try_types:
x = random_state.randn(size).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
for down in down_factors:
h = firwin(31, 1. / down, window='hamming')
yl = lfilter(h, 1.0, x)[::down]
y = upfirdn(h, x, up=1, down=down)
assert_allclose(yl, y[:yl.size], atol=1e-7, rtol=1e-7)
def test_vs_naive(self):
tests = []
try_types = (int, np.float32, np.complex64, float, complex)
# Simple combinations of factors
for x_dtype, h in product(try_types, (1., 1j)):
tests.append(UpFIRDnCase(1, 1, h, x_dtype))
tests.append(UpFIRDnCase(2, 2, h, x_dtype))
tests.append(UpFIRDnCase(3, 2, h, x_dtype))
tests.append(UpFIRDnCase(2, 3, h, x_dtype))
# mixture of big, small, and both directions (net up and net down)
# use all combinations of data and filter dtypes
factors = (100, 10) # up/down factors
cases = product(factors, factors, try_types, try_types)
for case in cases:
tests += self._random_factors(*case)
for test in tests:
test()
def _random_factors(self, p_max, q_max, h_dtype, x_dtype):
n_rep = 3
longest_h = 25
random_state = np.random.RandomState(17)
tests = []
for _ in range(n_rep):
# Randomize the up/down factors somewhat
p_add = q_max if p_max > q_max else 1
q_add = p_max if q_max > p_max else 1
p = random_state.randint(p_max) + p_add
q = random_state.randint(q_max) + q_add
# Generate random FIR coefficients
len_h = random_state.randint(longest_h) + 1
h = np.atleast_1d(random_state.randint(len_h))
h = h.astype(h_dtype)
if h_dtype == complex:
h += 1j * random_state.randint(len_h)
tests.append(UpFIRDnCase(p, q, h, x_dtype))
return tests
| 6,659 | 36.840909 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_savitzky_golay.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_almost_equal, assert_array_equal,
assert_array_almost_equal)
from scipy.ndimage import convolve1d
from scipy.signal import savgol_coeffs, savgol_filter
from scipy.signal._savitzky_golay import _polyder
def check_polyder(p, m, expected):
dp = _polyder(p, m)
assert_array_equal(dp, expected)
def test_polyder():
cases = [
([5], 0, [5]),
([5], 1, [0]),
([3, 2, 1], 0, [3, 2, 1]),
([3, 2, 1], 1, [6, 2]),
([3, 2, 1], 2, [6]),
([3, 2, 1], 3, [0]),
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
]
for p, m, expected in cases:
check_polyder(np.array(p).T, m, np.array(expected).T)
#--------------------------------------------------------------------
# savgol_coeffs tests
#--------------------------------------------------------------------
def alt_sg_coeffs(window_length, polyorder, pos):
"""This is an alternative implementation of the SG coefficients.
It uses numpy.polyfit and numpy.polyval. The results should be
equivalent to those of savgol_coeffs(), but this implementation
is slower.
window_length should be odd.
"""
if pos is None:
pos = window_length // 2
t = np.arange(window_length)
unit = (t == pos).astype(int)
h = np.polyval(np.polyfit(t, unit, polyorder), t)
return h
def test_sg_coeffs_trivial():
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
h = savgol_coeffs(1, 0)
assert_allclose(h, [1])
h = savgol_coeffs(3, 2)
assert_allclose(h, [0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4)
assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1)
assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1, use='dot')
assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
def compare_coeffs_to_alt(window_length, order):
# For the given window_length and order, compare the results
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
# Also include pos=None.
for pos in [None] + list(range(window_length)):
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
h2 = alt_sg_coeffs(window_length, order, pos=pos)
assert_allclose(h1, h2, atol=1e-10,
err_msg=("window_length = %d, order = %d, pos = %s" %
(window_length, order, pos)))
def test_sg_coeffs_compare():
# Compare savgol_coeffs() to alt_sg_coeffs().
for window_length in range(1, 8, 2):
for order in range(window_length):
compare_coeffs_to_alt(window_length, order)
def test_sg_coeffs_exact():
polyorder = 4
window_length = 9
halflen = window_length // 2
x = np.linspace(0, 21, 43)
delta = x[1] - x[0]
# The data is a cubic polynomial. We'll use an order 4
# SG filter, so the filtered values should equal the input data
# (except within half window_length of the edges).
y = 0.5 * x ** 3 - x
h = savgol_coeffs(window_length, polyorder)
y0 = convolve1d(y, h)
assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
# Check the same input, but use deriv=1. dy is the exact result.
dy = 1.5 * x ** 2 - 1
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
y1 = convolve1d(y, h)
assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
# Check the same input, but use deriv=2. d2y is the exact result.
d2y = 3.0 * x
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
y2 = convolve1d(y, h)
assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
def test_sg_coeffs_deriv():
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
# order 2 or higher polynomial should give exact results.
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
x = i ** 2 / 4
dx = i / 2
d2x = 0.5 * np.ones_like(i)
for pos in range(x.size):
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
def test_sg_coeffs_large():
# Test that for large values of window_length and polyorder the array of
# coefficients returned is symmetric. The aim is to ensure that
# no potential numeric overflow occurs.
coeffs0 = savgol_coeffs(31, 9)
assert_array_almost_equal(coeffs0, coeffs0[::-1])
coeffs1 = savgol_coeffs(31, 9, deriv=1)
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
#--------------------------------------------------------------------
# savgol_filter tests
#--------------------------------------------------------------------
def test_sg_filter_trivial():
""" Test some trivial edge cases for savgol_filter()."""
x = np.array([1.0])
y = savgol_filter(x, 1, 0)
assert_equal(y, [1.0])
# Input is a single value. With a window length of 3 and polyorder 1,
# the value in y is from the straight-line fit of (-1,0), (0,3) and
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_almost_equal(y, [1.0], decimal=15)
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='nearest')
assert_almost_equal(y, [3.0], decimal=15)
x = np.array([1.0] * 3)
y = savgol_filter(x, 3, 1, mode='wrap')
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
def test_sg_filter_basic():
# Some basic test cases for savgol_filter().
x = np.array([1.0, 2.0, 1.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, [1.0, 4.0 / 3, 1.0])
y = savgol_filter(x, 3, 1, mode='mirror')
assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
y = savgol_filter(x, 3, 1, mode='wrap')
assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
def test_sg_filter_2d():
x = np.array([[1.0, 2.0, 1.0],
[2.0, 4.0, 2.0]])
expected = np.array([[1.0, 4.0 / 3, 1.0],
[2.0, 8.0 / 3, 2.0]])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, expected)
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
assert_allclose(y, expected.T)
def test_sg_filter_interp_edges():
# Another test with low degree polynomial data, for which we can easily
# give the exact results. In this test, we use mode='interp', so
# savgol_filter should match the exact solution for the entire data set,
# including the edges.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
# Polynomial test data.
x = np.array([t,
3 * t ** 2,
t ** 3 - t])
dx = np.array([np.ones_like(t),
6 * t,
3 * t ** 2 - 1.0])
d2x = np.array([np.zeros_like(t),
6 * np.ones_like(t),
6 * t])
window_length = 7
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
# Transpose everything, and test again with axis=0.
x = x.T
dx = dx.T
d2x = d2x.T
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
def test_sg_filter_interp_edges_3d():
# Test mode='interp' with a 3-D array.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
x1 = np.array([t, -t])
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
dx2 = np.array([2 * t, 6 * t])
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
# z has shape (3, 2, 21)
z = np.array([x1, x2, x3])
dz = np.array([dx1, dx2, dx3])
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (3, 21, 2)
z = np.array([x1.T, x2.T, x3.T])
dz = np.array([dx1.T, dx2.T, dx3.T])
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (21, 3, 2)
z = z.swapaxes(0, 1).copy()
dz = dz.swapaxes(0, 1).copy()
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
| 9,843 | 32.712329 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/mpsig.py
|
"""
Some signal functions implemented using mpmath.
"""
from __future__ import division
try:
import mpmath
except ImportError:
mpmath = None
def _prod(seq):
"""Returns the product of the elements in the sequence `seq`."""
p = 1
for elem in seq:
p *= elem
return p
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles.
This is simply len(p) - len(z), which must be nonnegative.
A ValueError is raised if len(p) < len(z).
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
return degree
def _zpkbilinear(z, p, k, fs):
"""Bilinear transformation to convert a filter from analog to digital."""
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z.extend([-1] * degree)
# Compensate for gain change
numer = _prod(fs2 - z1 for z1 in z)
denom = _prod(fs2 - p1 for p1 in p)
k_z = k * numer / denom
return z_z, p_z, k_z.real
def _zpklp2lp(z, p, k, wo=1):
"""Transform a lowpass filter to a different cutoff frequency."""
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = [wo * z1 for z1 in z]
p_lp = [wo * p1 for p1 in p]
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _butter_analog_poles(n):
"""
Poles of an analog Butterworth lowpass filter.
This is the same calculation as scipy.signal.buttap(n) or
scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
and only the poles are returned.
"""
poles = []
for k in range(-n+1, n, 2):
poles.append(-mpmath.exp(1j*mpmath.pi*k/(2*n)))
return poles
def butter_lp(n, Wn):
"""
Lowpass Butterworth digital filter design.
This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
but it uses mpmath, and the results are returned in lists instead of numpy
arrays.
"""
zeros = []
poles = _butter_analog_poles(n)
k = 1
fs = 2
warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
z, p, k = _zpkbilinear(z, p, k, fs=fs)
return z, p, k
def zpkfreqz(z, p, k, worN=None):
"""
Frequency response of a filter in zpk format, using mpmath.
This is the same calculation as scipy.signal.freqz, but the input is in
zpk format, the calculation is performed using mpath, and the results are
returned in lists instead of numpy arrays.
"""
if worN is None or isinstance(worN, int):
N = worN or 512
ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
else:
ws = worN
h = []
for wk in ws:
zm1 = mpmath.exp(1j * wk)
numer = _prod([zm1 - t for t in z])
denom = _prod([zm1 - t for t in p])
hk = k * numer / denom
h.append(hk)
return ws, h
| 3,369 | 25.535433 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_cont2discrete.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import \
assert_array_almost_equal, assert_almost_equal, \
assert_allclose, assert_equal
import warnings
from scipy.signal import cont2discrete as c2d
from scipy.signal import dlsim, ss2tf, ss2zpk, lsim2, lti
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# March 29, 2011
class TestC2D(object):
def test_zoh(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
ad_truth = 1.648721270700128 * np.eye(2)
bd_truth = 0.324360635350064 * np.ones((2, 1))
# c and d in discrete should be equal to their continuous counterparts
dt_requested = 0.5
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cc, cd)
assert_array_almost_equal(dc, dd)
assert_almost_equal(dt_requested, dt)
def test_gbt(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
alpha = 1.0 / 3.0
ad_truth = 1.6 * np.eye(2)
bd_truth = 0.3 * np.ones((2, 1))
cd_truth = np.array([[0.9, 1.2],
[1.2, 1.2],
[1.2, 0.3]])
dd_truth = np.array([[0.175],
[0.2],
[-0.205]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='gbt', alpha=alpha)
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
def test_euler(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = 1.5 * np.eye(2)
bd_truth = 0.25 * np.ones((2, 1))
cd_truth = np.array([[0.75, 1.0],
[1.0, 1.0],
[1.0, 0.25]])
dd_truth = dc
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='euler')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
def test_backward_diff(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = 2.0 * np.eye(2)
bd_truth = 0.5 * np.ones((2, 1))
cd_truth = np.array([[1.5, 2.0],
[2.0, 2.0],
[2.0, 0.5]])
dd_truth = np.array([[0.875],
[1.0],
[0.295]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='backward_diff')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
def test_bilinear(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = (5.0 / 3.0) * np.eye(2)
bd_truth = (1.0 / 3.0) * np.ones((2, 1))
cd_truth = np.array([[1.0, 4.0 / 3.0],
[4.0 / 3.0, 4.0 / 3.0],
[4.0 / 3.0, 1.0 / 3.0]])
dd_truth = np.array([[0.291666666666667],
[1.0 / 3.0],
[-0.121666666666667]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='bilinear')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
# Same continuous system again, but change sampling rate
ad_truth = 1.4 * np.eye(2)
bd_truth = 0.2 * np.ones((2, 1))
cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]])
dd_truth = np.array([[0.175], [0.2], [-0.205]])
dt_requested = 1.0 / 3.0
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='bilinear')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
def test_transferfunction(self):
numc = np.array([0.25, 0.25, 0.5])
denc = np.array([0.75, 0.75, 1.0])
numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]])
dend = np.array([1.0, -1.351394049721225, 0.606530659712634])
dt_requested = 0.5
num, den, dt = c2d((numc, denc), dt_requested, method='zoh')
assert_array_almost_equal(numd, num)
assert_array_almost_equal(dend, den)
assert_almost_equal(dt_requested, dt)
def test_zerospolesgain(self):
zeros_c = np.array([0.5, -0.5])
poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
k_c = 1.0
zeros_d = [1.23371727305860, 0.735356894461267]
polls_d = [0.938148335039729 + 0.346233593780536j,
0.938148335039729 - 0.346233593780536j]
k_d = 1.0
dt_requested = 0.5
zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested,
method='zoh')
assert_array_almost_equal(zeros_d, zeros)
assert_array_almost_equal(polls_d, poles)
assert_almost_equal(k_d, k)
assert_almost_equal(dt_requested, dt)
def test_gbt_with_sio_tf_and_zpk(self):
"""Test method='gbt' with alpha=0.25 for tf and zpk cases."""
# State space coefficients for the continuous SIO system.
A = -1.0
B = 1.0
C = 1.0
D = 0.5
# The continuous transfer function coefficients.
cnum, cden = ss2tf(A, B, C, D)
# Continuous zpk representation
cz, cp, ck = ss2zpk(A, B, C, D)
h = 1.0
alpha = 0.25
# Explicit formulas, in the scalar case.
Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A)
Bd = h * B / (1 - alpha * h * A)
Cd = C / (1 - alpha * h * A)
Dd = D + alpha * C * Bd
# Convert the explicit solution to tf
dnum, dden = ss2tf(Ad, Bd, Cd, Dd)
# Compute the discrete tf using cont2discrete.
c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha)
assert_allclose(dnum, c2dnum)
assert_allclose(dden, c2dden)
# Convert explicit solution to zpk.
dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd)
# Compute the discrete zpk using cont2discrete.
c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha)
assert_allclose(dz, c2dz)
assert_allclose(dp, c2dp)
assert_allclose(dk, c2dk)
def test_discrete_approx(self):
"""
Test that the solution to the discrete approximation of a continuous
system actually approximates the solution to the continuous system.
This is an indirect test of the correctness of the implementation
of cont2discrete.
"""
def u(t):
return np.sin(2.5 * t)
a = np.array([[-0.01]])
b = np.array([[1.0]])
c = np.array([[1.0]])
d = np.array([[0.2]])
x0 = 1.0
t = np.linspace(0, 10.0, 101)
dt = t[1] - t[0]
u1 = u(t)
# Use lsim2 to compute the solution to the continuous system.
t, yout, xout = lsim2((a, b, c, d), T=t, U=u1, X0=x0,
rtol=1e-9, atol=1e-11)
# Convert the continuous system to a discrete approximation.
dsys = c2d((a, b, c, d), dt, method='bilinear')
# Use dlsim with the pairwise averaged input to compute the output
# of the discrete system.
u2 = 0.5 * (u1[:-1] + u1[1:])
t2 = t[:-1]
td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0)
# ymid is the average of consecutive terms of the "exact" output
# computed by lsim2. This is what the discrete approximation
# actually approximates.
ymid = 0.5 * (yout[:-1] + yout[1:])
assert_allclose(yd2.ravel(), ymid, rtol=1e-4)
def test_simo_tf(self):
# See gh-5753
tf = ([[1, 0], [1, 1]], [1, 1])
num, den, dt = c2d(tf, 0.01)
assert_equal(dt, 0.01) # sanity check
assert_allclose(den, [1, -0.990404983], rtol=1e-3)
assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3)
def test_multioutput(self):
ts = 0.01 # time step
tf = ([[1, -3], [1, 5]], [1, 1])
num, den, dt = c2d(tf, ts)
tf1 = (tf[0][0], tf[1])
num1, den1, dt1 = c2d(tf1, ts)
tf2 = (tf[0][1], tf[1])
num2, den2, dt2 = c2d(tf2, ts)
# Sanity checks
assert_equal(dt, dt1)
assert_equal(dt, dt2)
# Check that we get the same results
assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13)
# Single input, so the denominator should
# not be multidimensional like the numerator
assert_allclose(den, den1, rtol=1e-13)
assert_allclose(den, den2, rtol=1e-13)
class TestC2dLti(object):
def test_c2d_ss(self):
# StateSpace
A = np.array([[-0.3, 0.1], [0.2, -0.7]])
B = np.array([[0], [1]])
C = np.array([[1, 0]])
D = 0
A_res = np.array([[0.985136404135682, 0.004876671474795],
[0.009753342949590, 0.965629718236502]])
B_res = np.array([[0.000122937599964], [0.049135527547844]])
sys_ssc = lti(A, B, C, D)
sys_ssd = sys_ssc.to_discrete(0.05)
assert_allclose(sys_ssd.A, A_res)
assert_allclose(sys_ssd.B, B_res)
assert_allclose(sys_ssd.C, C)
assert_allclose(sys_ssd.D, D)
def test_c2d_tf(self):
sys = lti([0.5, 0.3], [1.0, 0.4])
sys = sys.to_discrete(0.005)
# Matlab results
num_res = np.array([0.5, -0.485149004980066])
den_res = np.array([1.0, -0.980198673306755])
# Somehow a lot of numerical errors
assert_allclose(sys.den, den_res, atol=0.02)
assert_allclose(sys.num, num_res, atol=0.02)
class TestC2dLti(object):
def test_c2d_ss(self):
# StateSpace
A = np.array([[-0.3, 0.1], [0.2, -0.7]])
B = np.array([[0], [1]])
C = np.array([[1, 0]])
D = 0
A_res = np.array([[0.985136404135682, 0.004876671474795],
[0.009753342949590, 0.965629718236502]])
B_res = np.array([[0.000122937599964], [0.049135527547844]])
sys_ssc = lti(A, B, C, D)
sys_ssd = sys_ssc.to_discrete(0.05)
assert_allclose(sys_ssd.A, A_res)
assert_allclose(sys_ssd.B, B_res)
assert_allclose(sys_ssd.C, C)
assert_allclose(sys_ssd.D, D)
def test_c2d_tf(self):
sys = lti([0.5, 0.3], [1.0, 0.4])
sys = sys.to_discrete(0.005)
# Matlab results
num_res = np.array([0.5, -0.485149004980066])
den_res = np.array([1.0, -0.980198673306755])
# Somehow a lot of numerical errors
assert_allclose(sys.den, den_res, atol=0.02)
assert_allclose(sys.num, num_res, atol=0.02)
| 12,323 | 32.218329 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_fir_filter_design.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal, assert_,
assert_allclose, assert_warns)
from pytest import raises as assert_raises
from scipy.special import sinc
from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \
firwin, firwin2, freqz, remez, firls, minimum_phase
def test_kaiser_beta():
b = kaiser_beta(58.7)
assert_almost_equal(b, 0.1102 * 50.0)
b = kaiser_beta(22.0)
assert_almost_equal(b, 0.5842 + 0.07886)
b = kaiser_beta(21.0)
assert_equal(b, 0.0)
b = kaiser_beta(10.0)
assert_equal(b, 0.0)
def test_kaiser_atten():
a = kaiser_atten(1, 1.0)
assert_equal(a, 7.95)
a = kaiser_atten(2, 1/np.pi)
assert_equal(a, 2.285 + 7.95)
def test_kaiserord():
assert_raises(ValueError, kaiserord, 1.0, 1.0)
numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi)
assert_equal((numtaps, beta), (2, 0.0))
class TestFirwin(object):
def check_response(self, h, expected_response, tol=.05):
N = len(h)
alpha = 0.5 * (N-1)
m = np.arange(0,N) - alpha # time indices of taps
for freq, expected in expected_response:
actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq)))
mse = abs(actual-expected)**2
assert_(mse < tol, 'response not as expected, mse=%g > %g'
% (mse, tol))
def test_response(self):
N = 51
f = .5
# increase length just to try even/odd
h = firwin(N, f) # low-pass from 0 to f
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+1, f, window='nuttall') # specific window
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass
self.check_response(h, [(.25,0), (.75,1)])
f1, f2, f3, f4 = .2, .4, .6, .8
h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter
self.check_response(h, [(.1,0), (.3,1), (.5,0)])
h = firwin(N+4, [f1, f2]) # band-stop filter
self.check_response(h, [(.1,1), (.3,0), (.5,1)])
h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter
self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
h = firwin(N+7, 0.1, width=.03) # low-pass
self.check_response(h, [(.05,1), (.75,0)])
h = firwin(N+8, 0.1, pass_zero=False) # high-pass
self.check_response(h, [(.05,0), (.75,1)])
def mse(self, h, bands):
"""Compute mean squared error versus ideal response across frequency
band.
h -- coefficients
bands -- list of (left, right) tuples relative to 1==Nyquist of
passbands
"""
w, H = freqz(h, worN=1024)
f = w/np.pi
passIndicator = np.zeros(len(w), bool)
for left, right in bands:
passIndicator |= (f >= left) & (f < right)
Hideal = np.where(passIndicator, 1, 0)
mse = np.mean(abs(abs(H)-Hideal)**2)
return mse
def test_scaling(self):
"""
For one lowpass, bandpass, and highpass example filter, this test
checks two things:
- the mean squared error over the frequency domain of the unscaled
filter is smaller than the scaled filter (true for rectangular
window)
- the response of the scaled filter is exactly unity at the center
of the first passband
"""
N = 11
cases = [
([.5], True, (0, 1)),
([0.2, .6], False, (.4, 1)),
([.5], False, (1, 1)),
]
for cutoff, pass_zero, expected_response in cases:
h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
if len(cutoff) == 1:
if pass_zero:
cutoff = [0] + cutoff
else:
cutoff = cutoff + [1]
assert_(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]),
'least squares violation')
self.check_response(hs, [expected_response], 1e-12)
class TestFirWinMore(object):
"""Different author, different style, different tests..."""
def test_lowpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta), scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
def test_highpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
# Ensure that ntaps is odd.
ntaps |= 1
taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta),
pass_zero=False, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test_bandpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=[0.3, 0.7], window=('kaiser', beta),
pass_zero=False, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5,
0.7-width/2, 0.7+width/2, 0.8, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
def test_multi(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta),
pass_zero=True, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35,
0.5-width/2, 0.5+width/2, 0.65,
0.8-width/2, 0.8+width/2, 0.9, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
decimal=5)
def test_fs_nyq(self):
"""Test the fs and nyq keywords."""
nyquist = 1000
width = 40.0
relative_width = width/nyquist
ntaps, beta = kaiserord(120, relative_width)
taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
pass_zero=False, scale=False, fs=2*nyquist)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500,
700-width/2, 700+width/2, 800, 1000])
freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
taps2 = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
pass_zero=False, scale=False, nyq=nyquist)
assert_allclose(taps2, taps)
def test_bad_cutoff(self):
"""Test that invalid cutoff argument raises ValueError."""
# cutoff values must be greater than 0 and less than 1.
assert_raises(ValueError, firwin, 99, -0.5)
assert_raises(ValueError, firwin, 99, 1.5)
# Don't allow 0 or 1 in cutoff.
assert_raises(ValueError, firwin, 99, [0, 0.5])
assert_raises(ValueError, firwin, 99, [0.5, 1])
# cutoff values must be strictly increasing.
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
# Must have at least one cutoff value.
assert_raises(ValueError, firwin, 99, [])
# 2D array not allowed.
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
# cutoff values must be less than nyq.
assert_raises(ValueError, firwin, 99, 50.0, nyq=40)
assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25)
assert_raises(ValueError, firwin, 99, 50.0, fs=80)
assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50)
def test_even_highpass_raises_value_error(self):
"""Test that attempt to create a highpass filter with an even number
of taps raises a ValueError exception."""
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
assert_raises(ValueError, firwin, 40, [.25, 0.5])
class TestFirwin2(object):
def test_invalid_args(self):
# `freq` and `gain` have different lengths.
assert_raises(ValueError, firwin2, 50, [0, 0.5, 1], [0.0, 1.0])
# `nfreqs` is less than `ntaps`.
assert_raises(ValueError, firwin2, 50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
# Decreasing value in `freq`
assert_raises(ValueError, firwin2, 50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
# Value in `freq` repeated more than once.
assert_raises(ValueError, firwin2, 50, [0, .1, .1, .1, 1.0],
[0.0, 0.5, 0.75, 1.0, 1.0])
# `freq` does not start at 0.0.
assert_raises(ValueError, firwin2, 50, [0.5, 1.0], [0.0, 1.0])
# Type II filter, but the gain at nyquist frequency is not zero.
assert_raises(ValueError, firwin2, 16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0])
# Type III filter, but the gains at nyquist and zero rate are not zero.
assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0],
antisymmetric=True)
assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0],
antisymmetric=True)
assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0],
antisymmetric=True)
# Type VI filter, but the gain at zero rate is not zero.
assert_raises(ValueError, firwin2, 16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0],
antisymmetric=True)
def test01(self):
width = 0.04
beta = 12.0
ntaps = 400
# Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
# increases from w=0.5 to w=1 (w=1 is the Nyquist frequency).
freq = [0.0, 0.5, 1.0]
gain = [1.0, 1.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2,
0.75, 1.0-width/2])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5)
def test02(self):
width = 0.04
beta = 12.0
# ntaps must be odd for positive gain at Nyquist.
ntaps = 401
# An ideal highpass filter.
freq = [0.0, 0.5, 0.5, 1.0]
gain = [0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test03(self):
width = 0.02
ntaps, beta = kaiserord(120, width)
# ntaps must be odd for positive gain at Nyquist.
ntaps = int(ntaps) | 1
freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45,
0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test04(self):
"""Test firwin2 when window=None."""
ntaps = 5
# Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0]
freq = [0.0, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193)
alpha = 0.5 * (ntaps - 1)
m = np.arange(0, ntaps) - alpha
h = 0.5 * sinc(0.5 * m)
assert_array_almost_equal(h, taps)
def test05(self):
"""Test firwin2 for calculating Type IV filters"""
ntaps = 1500
freq = [0.0, 1.0]
gain = [0.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1])
freqs, response = freqz(taps, worN=2048)
assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4)
def test06(self):
"""Test firwin2 for calculating Type III filters"""
ntaps = 1501
freq = [0.0, 0.5, 0.55, 1.0]
gain = [0.0, 0.5, 0.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
assert_equal(taps[ntaps // 2], 0.0)
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1])
freqs, response1 = freqz(taps, worN=2048)
response2 = np.interp(freqs / np.pi, freq, gain)
assert_array_almost_equal(abs(response1), response2, decimal=3)
def test_fs_nyq(self):
taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0)
assert_array_almost_equal(taps1, taps2)
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0)
assert_array_almost_equal(taps1, taps2)
class TestRemez(object):
def test_bad_args(self):
assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
def test_hilbert(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design an unity gain hilbert bandpass filter from w to 0.5-w
h = remez(11, [a, 0.5-a], [1], type='hilbert')
# make sure the filter has correct # of taps
assert_(len(h) == N, "Number of Taps")
# make sure it is type III (anti-symmetric tap coefficients)
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
# Since the requested response is symmetric, all even coeffcients
# should be zero (or in this case really small)
assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero")
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = abs(H)
# should have a zero at 0 and pi (in this case close to zero)
assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi")
# check that the pass band is close to unity
idx = np.logical_and(f > a, f < 0.5-a)
assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity")
def test_compare(self):
# test comparison to MATLAB
k = [0.024590270518440, -0.041314581814658, -0.075943803756711,
-0.003530911231040, 0.193140296954975, 0.373400753484939,
0.373400753484939, 0.193140296954975, -0.003530911231040,
-0.075943803756711, -0.041314581814658, 0.024590270518440]
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.)
assert_allclose(h, k)
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
assert_allclose(h, k)
h = [-0.038976016082299, 0.018704846485491, -0.014644062687875,
0.002879152556419, 0.016849978528150, -0.043276706138248,
0.073641298245579, -0.103908158578635, 0.129770906801075,
-0.147163447297124, 0.153302248456347, -0.147163447297124,
0.129770906801075, -0.103908158578635, 0.073641298245579,
-0.043276706138248, 0.016849978528150, 0.002879152556419,
-0.014644062687875, 0.018704846485491, -0.038976016082299]
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], Hz=2.), h)
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h)
class TestFirls(object):
def test_bad_args(self):
# even numtaps
assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
# odd bands
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
# len(bands) != len(desired)
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
# non-monotonic bands
assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4)
assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4)
assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4)
# negative desired
assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1])
# len(weight) != len(pairs)
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2])
# negative weight
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [-1])
def test_firls(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design a halfband symmetric low-pass filter
h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0)
# make sure the filter has correct # of taps
assert_equal(len(h), N)
# make sure it is symmetric
midx = (N-1) // 2
assert_array_almost_equal(h[:midx], h[:-midx-1:-1])
# make sure the center tap is 0.5
assert_almost_equal(h[midx], 0.5)
# For halfband symmetric, odd coefficients (except the center)
# should be zero (really small)
hodd = np.hstack((h[1:midx:2], h[-midx+1::2]))
assert_array_almost_equal(hodd, 0)
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = np.abs(H)
# check that the pass band is close to unity
idx = np.logical_and(f > 0, f < a)
assert_array_almost_equal(Hmag[idx], 1, decimal=3)
# check that the stop band is close to zero
idx = np.logical_and(f > 0.5-a, f < 0.5)
assert_array_almost_equal(Hmag[idx], 0, decimal=3)
def test_compare(self):
# compare to OCTAVE output
taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2])
# >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]);
known_taps = [-6.26930101730182e-04, -1.03354450635036e-01,
-9.81576747564301e-03, 3.17271686090449e-01,
5.11409425599933e-01, 3.17271686090449e-01,
-9.81576747564301e-03, -1.03354450635036e-01,
-6.26930101730182e-04]
assert_allclose(taps, known_taps)
# compare to MATLAB output
taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2])
# >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]);
known_taps = [
0.058545300496815, -0.014233383714318, -0.104688258464392,
0.012403323025279, 0.317930861136062, 0.488047220029700,
0.317930861136062, 0.012403323025279, -0.104688258464392,
-0.014233383714318, 0.058545300496815]
assert_allclose(taps, known_taps)
# With linear changes:
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20)
# >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0])
known_taps = [
1.156090832768218, -4.1385894727395849, 7.5288619164321826,
-8.5530572592947856, 7.5288619164321826, -4.1385894727395849,
1.156090832768218]
assert_allclose(taps, known_taps)
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10)
assert_allclose(taps, known_taps)
class TestMinimumPhase(object):
def test_bad_args(self):
# not enough taps
assert_raises(ValueError, minimum_phase, [1.])
assert_raises(ValueError, minimum_phase, [1., 1.])
assert_raises(ValueError, minimum_phase, np.ones(10) * 1j)
assert_raises(ValueError, minimum_phase, 'foo')
assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8)
assert_raises(ValueError, minimum_phase, np.ones(10), method='foo')
assert_warns(RuntimeWarning, minimum_phase, np.arange(3))
def test_homomorphic(self):
# check that it can recover frequency responses of arbitrary
# linear-phase filters
# for some cases we can get the actual filter back
h = [1, -1]
h_new = minimum_phase(np.convolve(h, h[::-1]))
assert_allclose(h_new, h, rtol=0.05)
# but in general we only guarantee we get the magnitude back
rng = np.random.RandomState(0)
for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101):
h = rng.randn(n)
h_new = minimum_phase(np.convolve(h, h[::-1]))
assert_allclose(np.abs(np.fft.fft(h_new)),
np.abs(np.fft.fft(h)), rtol=1e-4)
def test_hilbert(self):
# compare to MATLAB output of reference implementation
# f=[0 0.3 0.5 1];
# a=[1 1 0 0];
# h=remez(11,f,a);
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
k = [0.349585548646686, 0.373552164395447, 0.326082685363438,
0.077152207480935, -0.129943946349364, -0.059355880509749]
m = minimum_phase(h, 'hilbert')
assert_allclose(m, k, rtol=2e-3)
# f=[0 0.8 0.9 1];
# a=[0 0 1 1];
# h=remez(20,f,a);
h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.)
k = [0.232486803906329, -0.133551833687071, 0.151871456867244,
-0.157957283165866, 0.151739294892963, -0.129293146705090,
0.100787844523204, -0.065832656741252, 0.035361328741024,
-0.014977068692269, -0.158416139047557]
m = minimum_phase(h, 'hilbert', n_fft=2**19)
assert_allclose(m, k, rtol=2e-3)
| 23,710 | 41.04078 | 90 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_max_len_seq.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from pytest import raises as assert_raises
from numpy.fft import fft, ifft
from scipy.signal import max_len_seq
class TestMLS(object):
def test_mls_inputs(self):
# can't all be zero state
assert_raises(ValueError, max_len_seq,
10, state=np.zeros(10))
# wrong size state
assert_raises(ValueError, max_len_seq, 10,
state=np.ones(3))
# wrong length
assert_raises(ValueError, max_len_seq, 10, length=-1)
assert_array_equal(max_len_seq(10, length=0)[0], [])
# unknown taps
assert_raises(ValueError, max_len_seq, 64)
# bad taps
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
def test_mls_output(self):
# define some alternate working taps
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
8: [7, 5, 3]}
# assume the other bit levels work, too slow to test higher orders...
for nbits in range(2, 8):
for state in [None, np.round(np.random.rand(nbits))]:
for taps in [None, alt_taps[nbits]]:
if state is not None and np.all(state == 0):
state[0] = 1 # they can't all be zero
orig_m = max_len_seq(nbits, state=state,
taps=taps)[0]
m = 2. * orig_m - 1. # convert to +/- 1 representation
# First, make sure we got all 1's or -1
err_msg = "mls had non binary terms"
assert_array_equal(np.abs(m), np.ones_like(m),
err_msg=err_msg)
# Test via circular cross-correlation, which is just mult.
# in the frequency domain with one signal conjugated
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
out_len = 2**nbits - 1
# impulse amplitude == test_len
err_msg = "mls impulse has incorrect value"
assert_allclose(tester[0], out_len, err_msg=err_msg)
# steady-state is -1
err_msg = "mls steady-state has incorrect value"
assert_allclose(tester[1:], -1 * np.ones(out_len - 1),
err_msg=err_msg)
# let's do the split thing using a couple options
for n in (1, 2**(nbits - 1)):
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
length=n)
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
length=1)
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
length=out_len - n - 1)
new_m = np.concatenate((m1, m2, m3))
assert_array_equal(orig_m, new_m)
| 3,181 | 45.794118 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_filter_design.py
|
from __future__ import division, print_function, absolute_import
import warnings
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import (assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_equal, assert_,
assert_allclose, assert_warns)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from numpy import array, spacing, sin, pi, sort, sqrt
from scipy.signal import (BadCoefficients, bessel, besselap, bilinear, buttap,
butter, buttord, cheb1ap, cheb1ord, cheb2ap,
cheb2ord, cheby1, cheby2, ellip, ellipap, ellipord,
firwin, freqs_zpk, freqs, freqz, freqz_zpk,
group_delay, iirfilter, iirnotch, iirpeak, lp2bp,
lp2bs, lp2hp, lp2lp, normalize, sos2tf, sos2zpk,
sosfreqz, tf2sos, tf2zpk, zpk2sos, zpk2tf,
bilinear_zpk, lp2lp_zpk, lp2hp_zpk, lp2bp_zpk,
lp2bs_zpk)
from scipy.signal.filter_design import (_cplxreal, _cplxpair, _norm_factor,
_bessel_poly, _bessel_zeros)
try:
import mpmath
except ImportError:
mpmath = None
def mpmath_check(min_ver):
return pytest.mark.skipif(mpmath is None or
LooseVersion(mpmath.__version__) < LooseVersion(min_ver),
reason="mpmath version >= %s required" % min_ver)
class TestCplxPair(object):
def test_trivial_input(self):
assert_equal(_cplxpair([]).size, 0)
assert_equal(_cplxpair(1), 1)
def test_output_order(self):
assert_allclose(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j])
a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2]
b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2]
assert_allclose(_cplxpair(a), b)
# points spaced around the unit circle
z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7)
z1 = np.copy(z)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
# Should be able to pair up all the conjugates
x = np.random.rand(10000) + 1j * np.random.rand(10000)
y = x.conj()
z = np.random.rand(10000)
x = np.concatenate((x, y, z))
np.random.shuffle(x)
c = _cplxpair(x)
# Every other element of head should be conjugates:
assert_allclose(c[0:20000:2], np.conj(c[1:20000:2]))
# Real parts of head should be in sorted order:
assert_allclose(c[0:20000:2].real, np.sort(c[0:20000:2].real))
# Tail should be sorted real numbers:
assert_allclose(c[20000:], np.sort(c[20000:]))
def test_real_integer_input(self):
assert_array_equal(_cplxpair([2, 0, 1]), [0, 1, 2])
def test_tolerances(self):
eps = spacing(1)
assert_allclose(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps),
[-1j, 1j, 1+1j*eps])
# sorting close to 0
assert_allclose(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j])
assert_allclose(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j])
assert_allclose(_cplxpair([+1j, -1j]), [-1j, +1j])
def test_unmatched_conjugates(self):
# 1+2j is unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j])
# 1+2j and 1-3j are unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j])
# 1+3j is unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j])
# Not conjugates
assert_raises(ValueError, _cplxpair, [4+5j, 4+5j])
assert_raises(ValueError, _cplxpair, [1-7j, 1-7j])
# No pairs
assert_raises(ValueError, _cplxpair, [1+3j])
assert_raises(ValueError, _cplxpair, [1-3j])
class TestCplxReal(object):
def test_trivial_input(self):
assert_equal(_cplxreal([]), ([], []))
assert_equal(_cplxreal(1), ([], [1]))
def test_output_order(self):
zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1])))
assert_allclose(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1])
eps = spacing(1)
a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j,
1, 4, 2, 3, 0, 0,
2+3j, 2-3j,
1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, # sorts out of order
3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j,
2-3j, 2+3j]
zc, zr = _cplxreal(a)
assert_allclose(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j,
3+1j])
assert_allclose(zr, [0, 0, 1, 2, 3, 4])
z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j,
0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j,
4+eps-2j, 4-1j, 4-eps+2j])
zc, zr = _cplxreal(z)
assert_allclose(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j,
4+2j])
assert_equal(zr, [])
def test_unmatched_conjugates(self):
# 1+2j is unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j])
# 1+2j and 1-3j are unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j])
# 1+3j is unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j])
# No pairs
assert_raises(ValueError, _cplxreal, [1+3j])
assert_raises(ValueError, _cplxreal, [1-3j])
def test_real_integer_input(self):
zc, zr = _cplxreal([2, 0, 1, 4])
assert_array_equal(zc, [])
assert_array_equal(zr, [0, 1, 2, 4])
class TestTf2zpk(object):
def test_simple(self):
z_r = np.array([0.5, -0.5])
p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
# Sort the zeros/poles so that we don't fail the test if the order
# changes
z_r.sort()
p_r.sort()
b = np.poly(z_r)
a = np.poly(p_r)
z, p, k = tf2zpk(b, a)
z.sort()
p.sort()
assert_array_almost_equal(z, z_r)
assert_array_almost_equal(p, p_r)
def test_bad_filter(self):
# Regression test for #651: better handling of badly conditioned
# filter coefficients.
with suppress_warnings():
warnings.simplefilter("error", BadCoefficients)
assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0])
class TestZpk2Tf(object):
def test_identity(self):
"""Test the identity transfer function."""
z = []
p = []
k = 1.
b, a = zpk2tf(z, p, k)
b_r = np.array([1.]) # desired result
a_r = np.array([1.]) # desired result
# The test for the *type* of the return values is a regression
# test for ticket #1095. In the case p=[], zpk2tf used to
# return the scalar 1.0 instead of array([1.0]).
assert_array_equal(b, b_r)
assert_(isinstance(b, np.ndarray))
assert_array_equal(a, a_r)
assert_(isinstance(a, np.ndarray))
class TestSos2Zpk(object):
def test_basic(self):
sos = [[1, 0, 1, 1, 0, -0.81],
[1, 0, 0, 1, 0, +0.49]]
z, p, k = sos2zpk(sos)
z2 = [1j, -1j, 0, 0]
p2 = [0.9, -0.9, 0.7j, -0.7j]
k2 = 1
assert_array_almost_equal(sort(z), sort(z2), decimal=4)
assert_array_almost_equal(sort(p), sort(p2), decimal=4)
assert_array_almost_equal(k, k2)
sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873],
[1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873],
[1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]]
z, p, k = sos2zpk(sos)
z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
0.8090 - 0.5878j, -1.0000 + 0.0000j, 0]
p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
0.7922 - 0.5755j, -0.9791 + 0.0000j, 0]
k2 = 1
assert_array_almost_equal(sort(z), sort(z2), decimal=4)
assert_array_almost_equal(sort(p), sort(p2), decimal=4)
sos = array([[1, 2, 3, 1, 0.2, 0.3],
[4, 5, 6, 1, 0.4, 0.5]])
z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j,
-0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j])
p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j,
-0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j])
k = 4
z2, p2, k2 = sos2zpk(sos)
assert_allclose(_cplxpair(z2), z)
assert_allclose(_cplxpair(p2), p)
assert_allclose(k2, k)
class TestSos2Tf(object):
def test_basic(self):
sos = [[1, 1, 1, 1, 0, -1],
[-2, 3, 1, 1, 10, 1]]
b, a = sos2tf(sos)
assert_array_almost_equal(b, [-2, 1, 2, 4, 1])
assert_array_almost_equal(a, [1, 10, 0, -10, -1])
class TestTf2Sos(object):
def test_basic(self):
num = [2, 16, 44, 56, 32]
den = [3, 3, -15, 18, -12]
sos = tf2sos(num, den)
sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000],
[1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]]
assert_array_almost_equal(sos, sos2, decimal=4)
b = [1, -3, 11, -27, 18]
a = [16, 12, 2, -4, -1]
sos = tf2sos(b, a)
sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250],
[1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]]
# assert_array_almost_equal(sos, sos2, decimal=4)
class TestZpk2Sos(object):
def test_basic(self):
for pairing in ('nearest', 'keep_odd'):
#
# Cases that match octave
#
z = [-1, -1]
p = [0.57149 + 0.29360j, 0.57149 - 0.29360j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = [1j, -1j]
p = [0.9, -0.9, 0.7j, -0.7j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 1, 1, 0, +0.49],
[1, 0, 0, 1, 0, -0.81]] # octave
# sos2 = [[0, 0, 1, 1, -0.9, 0],
# [1, 0, 1, 1, 0.9, 0]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = []
p = [0.8, -0.5+0.25j, -0.5-0.25j]
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., 1., 0.3125],
[1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
z = [1., 1., 0.9j, -0.9j]
p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 0.81, 1, -0.2, 0.82],
[1, -2, 1, 1, -1.98, 0.9802]] # octave
# sos2 = [[1, -2, 1, 1, -0.2, 0.82],
# [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = [0.9+0.1j, 0.9-0.1j, -0.9]
p = [0.75+0.25j, 0.75-0.25j, 0.9]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
if pairing == 'keep_odd':
sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625],
[1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
else: # pairing == 'nearest'
sos2 = [[1, 0.9, 0, 1, -1.5, 0.625],
[1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm
assert_array_almost_equal(sos, sos2, decimal=4)
#
# Cases that differ from octave:
#
z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
+0.8090 - 0.5878j, -1.0000 + 0.0000j]
p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
+0.7922 - 0.5755j, -0.9791 + 0.0000j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
# sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870],
# [1, -1.618, 1, 1, -1.5844, 0.95878],
# [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails
sos2 = [[1, 1, 0, 1, +0.97915, 0],
[1, 0.61803, 1, 1, +0.60515, 0.95873],
[1, -1.61803, 1, 1, -1.58430, 0.95873]]
assert_array_almost_equal(sos, sos2, decimal=4)
z = [-1 - 1.4142j, -1 + 1.4142j,
-0.625 - 1.0533j, -0.625 + 1.0533j]
p = [-0.2 - 0.6782j, -0.2 + 0.6782j,
-0.1 - 0.5385j, -0.1 + 0.5385j]
k = 4
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[4, 8, 12, 1, 0.2, 0.3],
[1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB
# sos2 = [[4, 8, 12, 1, 0.4, 0.5],
# [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave
assert_allclose(sos, sos2, rtol=1e-4, atol=1e-4)
z = []
p = [0.2, -0.5+0.25j, -0.5-0.25j]
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., -0.2, 0.],
[1., 0., 0., 1., 1., 0.3125]]
# sos2 = [[1., 0., 0., 1., 1., 0.3125],
# [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
# The next two examples are adapted from Leland B. Jackson,
# "Digital Filters and Signal Processing (1995) p.400:
# http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false
deg2rad = np.pi / 180.
k = 1.
# first example
thetas = [22.5, 45, 77.5]
mags = [0.8, 0.6, 0.9]
z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas])
z = np.concatenate((z, np.conj(z)))
p = np.array([mag * np.exp(theta * deg2rad * 1j)
for theta, mag in zip(thetas, mags)])
p = np.concatenate((p, np.conj(p)))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave,
# [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, -1.84776, 1, 1, -1.47821, 0.64]]
# Note that pole-zero pairing matches, but ordering is different
sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36],
[1, -1.84776, 1, 1, -1.47821, 0.64],
[1, -0.43288, 1, 1, -0.38959, 0.81]]
assert_array_almost_equal(sos, sos2, decimal=4)
# second example
z = np.array([np.exp(theta * deg2rad * 1j)
for theta in (85., 10.)])
z = np.concatenate((z, np.conj(z), [1, -1]))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong",
# [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, 0, -1, 1, -1.47821, 0.64000]]
# Our pole-zero pairing matches the text, Octave does not
sos2 = [[1, 0, -1, 1, -0.84853, 0.36],
[1, -1.96962, 1, 1, -1.47821, 0.64],
[1, -0.17431, 1, 1, -0.38959, 0.81]]
assert_array_almost_equal(sos, sos2, decimal=4)
class TestFreqs(object):
def test_basic(self):
_, h = freqs([1.0], [1.0], worN=8)
assert_array_almost_equal(h, np.ones(8))
def test_output(self):
# 1st order low-pass filter: H(s) = 1 / (s + 1)
w = [0.1, 1, 10, 100]
num = [1]
den = [1, 1]
w, H = freqs(num, den, worN=w)
s = w * 1j
expected = 1 / (s + 1)
assert_array_almost_equal(H.real, expected.real)
assert_array_almost_equal(H.imag, expected.imag)
def test_freq_range(self):
# Test that freqresp() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
# Expected range is from 0.01 to 10.
num = [1]
den = [1, 1]
n = 10
expected_w = np.logspace(-2, 1, n)
w, H = freqs(num, den, worN=n)
assert_array_almost_equal(w, expected_w)
def test_plot(self):
def plot(w, h):
assert_array_almost_equal(h, np.ones(8))
assert_raises(ZeroDivisionError, freqs, [1.0], [1.0], worN=8,
plot=lambda w, h: 1 / 0)
freqs([1.0], [1.0], worN=8, plot=plot)
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqs([1.0], [1.0])
w2, h2 = freqs([1.0], [1.0], None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
class TestFreqs_zpk(object):
def test_basic(self):
_, h = freqs_zpk([1.0], [1.0], [1.0], worN=8)
assert_array_almost_equal(h, np.ones(8))
def test_output(self):
# 1st order low-pass filter: H(s) = 1 / (s + 1)
w = [0.1, 1, 10, 100]
z = []
p = [-1]
k = 1
w, H = freqs_zpk(z, p, k, worN=w)
s = w * 1j
expected = 1 / (s + 1)
assert_array_almost_equal(H.real, expected.real)
assert_array_almost_equal(H.imag, expected.imag)
def test_freq_range(self):
# Test that freqresp() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
# Expected range is from 0.01 to 10.
z = []
p = [-1]
k = 1
n = 10
expected_w = np.logspace(-2, 1, n)
w, H = freqs_zpk(z, p, k, worN=n)
assert_array_almost_equal(w, expected_w)
def test_vs_freqs(self):
b, a = cheby1(4, 5, 100, analog=True, output='ba')
z, p, k = cheby1(4, 5, 100, analog=True, output='zpk')
w1, h1 = freqs(b, a)
w2, h2 = freqs_zpk(z, p, k)
assert_allclose(w1, w2)
assert_allclose(h1, h2, rtol=1e-6)
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqs_zpk([1.0], [1.0], [1.0])
w2, h2 = freqs_zpk([1.0], [1.0], [1.0], None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
class TestFreqz(object):
def test_ticket1441(self):
"""Regression test for ticket 1441."""
# Because freqz previously used arange instead of linspace,
# when N was large, it would return one more point than
# requested.
N = 100000
w, h = freqz([1.0], worN=N)
assert_equal(w.shape, (N,))
def test_basic(self):
w, h = freqz([1.0], worN=8)
assert_array_almost_equal(w, np.pi * np.arange(8) / 8.)
assert_array_almost_equal(h, np.ones(8))
w, h = freqz([1.0], worN=9)
assert_array_almost_equal(w, np.pi * np.arange(9) / 9.)
assert_array_almost_equal(h, np.ones(9))
for a in [1, np.ones(2)]:
w, h = freqz(np.ones(2), a, worN=0)
assert_equal(w.shape, (0,))
assert_equal(h.shape, (0,))
assert_equal(h.dtype, np.dtype('complex128'))
t = np.linspace(0, 1, 4, endpoint=False)
for b, a, h_whole in zip(
([1., 0, 0, 0], np.sin(2 * np.pi * t)),
([1., 0, 0, 0], [0.5, 0, 0, 0]),
([1., 1., 1., 1.], [0, -4j, 0, 4j])):
w, h = freqz(b, a, worN=4, whole=True)
expected_w = np.linspace(0, 2 * np.pi, 4, endpoint=False)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, h_whole)
# simultaneously check int-like support
w, h = freqz(b, a, worN=np.int32(4), whole=True)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, h_whole)
w, h = freqz(b, a, worN=w, whole=True)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, h_whole)
def test_basic_whole(self):
w, h = freqz([1.0], worN=8, whole=True)
assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_plot(self):
def plot(w, h):
assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
assert_raises(ZeroDivisionError, freqz, [1.0], worN=8,
plot=lambda w, h: 1 / 0)
freqz([1.0], worN=8, plot=plot)
def test_fft_wrapping(self):
# Some simple real FIR filters
bs = list() # filters
as_ = list()
hs_whole = list()
hs_half = list()
# 3 taps
t = np.linspace(0, 1, 3, endpoint=False)
bs.append(np.sin(2 * np.pi * t))
as_.append(3.)
hs_whole.append([0, -0.5j, 0.5j])
hs_half.append([0, np.sqrt(1./12.), -0.5j])
# 4 taps
t = np.linspace(0, 1, 4, endpoint=False)
bs.append(np.sin(2 * np.pi * t))
as_.append(0.5)
hs_whole.append([0, -4j, 0, 4j])
hs_half.append([0, np.sqrt(8), -4j, -np.sqrt(8)])
del t
for ii, b in enumerate(bs):
# whole
a = as_[ii]
expected_w = np.linspace(0, 2 * np.pi, len(b), endpoint=False)
w, h = freqz(b, a, worN=expected_w, whole=True) # polyval
err_msg = 'b = %s, a=%s' % (b, a)
assert_array_almost_equal(w, expected_w, err_msg=err_msg)
assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg)
w, h = freqz(b, a, worN=len(b), whole=True) # FFT
assert_array_almost_equal(w, expected_w, err_msg=err_msg)
assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg)
# non-whole
expected_w = np.linspace(0, np.pi, len(b), endpoint=False)
w, h = freqz(b, a, worN=expected_w, whole=False) # polyval
assert_array_almost_equal(w, expected_w, err_msg=err_msg)
assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg)
w, h = freqz(b, a, worN=len(b), whole=False) # FFT
assert_array_almost_equal(w, expected_w, err_msg=err_msg)
assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg)
# some random FIR filters (real + complex)
# assume polyval is accurate
rng = np.random.RandomState(0)
for ii in range(2, 10): # number of taps
b = rng.randn(ii)
for kk in range(2):
a = rng.randn(1) if kk == 0 else rng.randn(3)
for jj in range(2):
if jj == 1:
b = b + rng.randn(ii) * 1j
# whole
expected_w = np.linspace(0, 2 * np.pi, ii, endpoint=False)
w, expected_h = freqz(b, a, worN=expected_w, whole=True)
assert_array_almost_equal(w, expected_w)
w, h = freqz(b, a, worN=ii, whole=True)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, expected_h)
# half
expected_w = np.linspace(0, np.pi, ii, endpoint=False)
w, expected_h = freqz(b, a, worN=expected_w, whole=False)
assert_array_almost_equal(w, expected_w)
w, h = freqz(b, a, worN=ii, whole=False)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, expected_h)
def test_broadcasting1(self):
# Test broadcasting with worN an integer or a 1-D array,
# b and a are n-dimensional arrays.
np.random.seed(123)
b = np.random.rand(3, 5, 1)
a = np.random.rand(2, 1)
for whole in [False, True]:
# Test with worN being integers (one fast for FFT and one not),
# a 1-D array, and an empty array.
for worN in [16, 17, np.linspace(0, 1, 10), np.array([])]:
w, h = freqz(b, a, worN=worN, whole=whole)
for k in range(b.shape[1]):
bk = b[:, k, 0]
ak = a[:, 0]
ww, hh = freqz(bk, ak, worN=worN, whole=whole)
assert_allclose(ww, w)
assert_allclose(hh, h[k])
def test_broadcasting2(self):
# Test broadcasting with worN an integer or a 1-D array,
# b is an n-dimensional array, and a is left at the default value.
np.random.seed(123)
b = np.random.rand(3, 5, 1)
for whole in [False, True]:
for worN in [16, 17, np.linspace(0, 1, 10)]:
w, h = freqz(b, worN=worN, whole=whole)
for k in range(b.shape[1]):
bk = b[:, k, 0]
ww, hh = freqz(bk, worN=worN, whole=whole)
assert_allclose(ww, w)
assert_allclose(hh, h[k])
def test_broadcasting3(self):
# Test broadcasting where b.shape[-1] is the same length
# as worN, and a is left at the default value.
np.random.seed(123)
N = 16
b = np.random.rand(3, N)
for whole in [False, True]:
for worN in [N, np.linspace(0, 1, N)]:
w, h = freqz(b, worN=worN, whole=whole)
assert_equal(w.size, N)
for k in range(N):
bk = b[:, k]
ww, hh = freqz(bk, worN=w[k], whole=whole)
assert_allclose(ww, w[k])
assert_allclose(hh, h[k])
def test_broadcasting4(self):
# Test broadcasting with worN a 2-D array.
np.random.seed(123)
b = np.random.rand(4, 2, 1, 1)
a = np.random.rand(5, 2, 1, 1)
for whole in [False, True]:
for worN in [np.random.rand(6, 7), np.empty((6, 0))]:
w, h = freqz(b, a, worN=worN, whole=whole)
assert_array_equal(w, worN)
assert_equal(h.shape, (2,) + worN.shape)
for k in range(2):
ww, hh = freqz(b[:, k, 0, 0], a[:, k, 0, 0], worN=worN.ravel(),
whole=whole)
assert_equal(ww, worN.ravel())
assert_allclose(hh, h[k, :, :].ravel())
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqz([1.0], 1)
w2, h2 = freqz([1.0], 1, None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
class TestSOSFreqz(object):
def test_sosfreqz_basic(self):
# Compare the results of freqz and sosfreqz for a low order
# Butterworth filter.
N = 500
b, a = butter(4, 0.2)
sos = butter(4, 0.2, output='sos')
w, h = freqz(b, a, worN=N)
w2, h2 = sosfreqz(sos, worN=N)
assert_equal(w2, w)
assert_allclose(h2, h, rtol=1e-10, atol=1e-14)
b, a = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass')
sos = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass', output='sos')
w, h = freqz(b, a, worN=N)
w2, h2 = sosfreqz(sos, worN=N)
assert_equal(w2, w)
assert_allclose(h2, h, rtol=1e-10, atol=1e-14)
# must have at least one section
assert_raises(ValueError, sosfreqz, sos[:0])
def test_sosfrez_design(self):
# Compare sosfreqz output against expected values for different
# filter types
# from cheb2ord
N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
sos = cheby2(N, 60, Wn, 'stop', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(20 * np.log10(h[w <= 0.1]), 0, atol=3.01)
assert_allclose(20 * np.log10(h[w >= 0.6]), 0., atol=3.01)
assert_allclose(h[(w >= 0.2) & (w <= 0.5)], 0., atol=1e-3) # <= -60 dB
N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 150)
sos = cheby2(N, 150, Wn, 'stop', output='sos')
w, h = sosfreqz(sos)
dB = 20*np.log10(np.abs(h))
w /= np.pi
assert_allclose(dB[w <= 0.1], 0, atol=3.01)
assert_allclose(dB[w >= 0.6], 0., atol=3.01)
assert_array_less(dB[(w >= 0.2) & (w <= 0.5)], -149.9)
# from cheb1ord
N, Wn = cheb1ord(0.2, 0.3, 3, 40)
sos = cheby1(N, 3, Wn, 'low', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(20 * np.log10(h[w <= 0.2]), 0, atol=3.01)
assert_allclose(h[w >= 0.3], 0., atol=1e-2) # <= -40 dB
N, Wn = cheb1ord(0.2, 0.3, 1, 150)
sos = cheby1(N, 1, Wn, 'low', output='sos')
w, h = sosfreqz(sos)
dB = 20*np.log10(np.abs(h))
w /= np.pi
assert_allclose(dB[w <= 0.2], 0, atol=1.01)
assert_array_less(dB[w >= 0.3], -149.9)
# adapted from ellipord
N, Wn = ellipord(0.3, 0.2, 3, 60)
sos = ellip(N, 0.3, 60, Wn, 'high', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01)
assert_allclose(h[w <= 0.1], 0., atol=1.5e-3) # <= -60 dB (approx)
# adapted from buttord
N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 40)
sos = butter(N, Wn, 'band', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(h[w <= 0.14], 0., atol=1e-2) # <= -40 dB
assert_allclose(h[w >= 0.6], 0., atol=1e-2) # <= -40 dB
assert_allclose(20 * np.log10(h[(w >= 0.2) & (w <= 0.5)]),
0, atol=3.01)
N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 100)
sos = butter(N, Wn, 'band', output='sos')
w, h = sosfreqz(sos)
dB = 20*np.log10(np.maximum(np.abs(h), 1e-10))
w /= np.pi
assert_array_less(dB[(w > 0) & (w <= 0.14)], -99.9)
assert_array_less(dB[w >= 0.6], -99.9)
assert_allclose(dB[(w >= 0.2) & (w <= 0.5)], 0, atol=3.01)
@pytest.mark.xfail
def test_sosfreqz_design_ellip(self):
N, Wn = ellipord(0.3, 0.1, 3, 60)
sos = ellip(N, 0.3, 60, Wn, 'high', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01)
assert_allclose(h[w <= 0.1], 0., atol=1.5e-3) # <= -60 dB (approx)
N, Wn = ellipord(0.3, 0.2, .5, 150)
sos = ellip(N, .5, 150, Wn, 'high', output='sos')
w, h = sosfreqz(sos)
dB = 20*np.log10(np.maximum(np.abs(h), 1e-10))
w /= np.pi
assert_allclose(dB[w >= 0.3], 0, atol=.55)
# this is not great (147 instead of 150, could be ellip[ord] problem?)
assert_array_less(dB[(w > 0) & (w <= 0.25)], -147)
@mpmath_check("0.10")
def test_sos_freqz_against_mp(self):
# Compare the result of sosfreqz applied to a high order Butterworth
# filter against the result computed using mpmath. (signal.freqz fails
# miserably with such high order filters.)
from . import mpsig
N = 500
order = 25
Wn = 0.15
with mpmath.workdps(80):
z_mp, p_mp, k_mp = mpsig.butter_lp(order, Wn)
w_mp, h_mp = mpsig.zpkfreqz(z_mp, p_mp, k_mp, N)
w_mp = np.array([float(x) for x in w_mp])
h_mp = np.array([complex(x) for x in h_mp])
sos = butter(order, Wn, output='sos')
w, h = sosfreqz(sos, worN=N)
assert_allclose(w, w_mp, rtol=1e-12, atol=1e-14)
assert_allclose(h, h_mp, rtol=1e-12, atol=1e-14)
class TestFreqz_zpk(object):
def test_ticket1441(self):
"""Regression test for ticket 1441."""
# Because freqz previously used arange instead of linspace,
# when N was large, it would return one more point than
# requested.
N = 100000
w, h = freqz_zpk([0.5], [0.5], 1.0, worN=N)
assert_equal(w.shape, (N,))
def test_basic(self):
w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8)
assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_basic_whole(self):
w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8, whole=True)
assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_vs_freqz(self):
b, a = cheby1(4, 5, 0.5, analog=False, output='ba')
z, p, k = cheby1(4, 5, 0.5, analog=False, output='zpk')
w1, h1 = freqz(b, a)
w2, h2 = freqz_zpk(z, p, k)
assert_allclose(w1, w2)
assert_allclose(h1, h2, rtol=1e-6)
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqz_zpk([0.5], [0.5], 1.0)
w2, h2 = freqz_zpk([0.5], [0.5], 1.0, None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
class TestNormalize(object):
def test_allclose(self):
"""Test for false positive on allclose in normalize() in
filter_design.py"""
# Test to make sure the allclose call within signal.normalize does not
# choose false positives. Then check against a known output from MATLAB
# to make sure the fix doesn't break anything.
# These are the coefficients returned from
# `[b,a] = cheby1(8, 0.5, 0.048)'
# in MATLAB. There are at least 15 significant figures in each
# coefficient, so it makes sense to test for errors on the order of
# 1e-13 (this can always be relaxed if different platforms have
# different rounding errors)
b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11])
a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01])
# This is the input to signal.normalize after passing through the
# equivalent steps in signal.iirfilter as was done for MATLAB
b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05,
4.3520780422820447e-05, 8.7041560845640893e-05,
1.0880195105705122e-04, 8.7041560845640975e-05,
4.3520780422820447e-05, 1.2434508692234413e-05,
1.5543135865293012e-06])
a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05,
1.9182761917308895e+06, -3.7451128364682454e+06,
4.5776121393762771e+06, -3.5869706138592605e+06,
1.7596511818472347e+06, -4.9409793515707983e+05,
6.0799461347219651e+04])
b_output, a_output = normalize(b_norm_in, a_norm_in)
# The test on b works for decimal=14 but the one for a does not. For
# the sake of consistency, both of these are decimal=13. If something
# breaks on another platform, it is probably fine to relax this lower.
assert_array_almost_equal(b_matlab, b_output, decimal=13)
assert_array_almost_equal(a_matlab, a_output, decimal=13)
def test_errors(self):
"""Test the error cases."""
# all zero denominator
assert_raises(ValueError, normalize, [1, 2], 0)
# denominator not 1 dimensional
assert_raises(ValueError, normalize, [1, 2], [[1]])
# numerator too many dimensions
assert_raises(ValueError, normalize, [[[1, 2]]], 1)
class TestLp2lp(object):
def test_basic(self):
b = [1]
a = [1, np.sqrt(2), 1]
b_lp, a_lp = lp2lp(b, a, 0.38574256627112119)
assert_array_almost_equal(b_lp, [0.1488], decimal=4)
assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4)
class TestLp2hp(object):
def test_basic(self):
b = [0.25059432325190018]
a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018]
b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000)
assert_allclose(b_hp, [1, 0, 0, 0])
assert_allclose(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4)
class TestLp2bp(object):
def test_basic(self):
b = [1]
a = [1, 2, 2, 1]
b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000)
assert_allclose(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6)
assert_allclose(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13,
1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4)
class TestLp2bs(object):
def test_basic(self):
b = [1]
a = [1, 1]
b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251)
assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5)
assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5)
class TestBilinear(object):
def test_basic(self):
b = [0.14879732743343033]
a = [1, 0.54552236880522209, 0.14879732743343033]
b_z, a_z = bilinear(b, a, 0.5)
assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821],
decimal=5)
assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4)
b = [1, 0, 0.17407467530697837]
a = [1, 0.18460575326152251, 0.17407467530697837]
b_z, a_z = bilinear(b, a, 0.5)
assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413],
decimal=4)
assert_array_almost_equal(a_z, [1, -1.2158, 0.72826],
decimal=4)
class TestLp2lp_zpk(object):
def test_basic(self):
z = []
p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)]
k = 1
z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 5)
assert_array_equal(z_lp, [])
assert_allclose(sort(p_lp), sort(p)*5)
assert_allclose(k_lp, 25)
# Pseudo-Chebyshev with both poles and zeros
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 20)
assert_allclose(sort(z_lp), sort([-40j, +40j]))
assert_allclose(sort(p_lp), sort([-15, -10-10j, -10+10j]))
assert_allclose(k_lp, 60)
class TestLp2hp_zpk(object):
def test_basic(self):
z = []
p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)]
k = 1
z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 5)
assert_array_equal(z_hp, [0, 0])
assert_allclose(sort(p_hp), sort(p)*5)
assert_allclose(k_hp, 1)
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 6)
assert_allclose(sort(z_hp), sort([-3j, 0, +3j]))
assert_allclose(sort(p_hp), sort([-8, -6-6j, -6+6j]))
assert_allclose(k_hp, 32)
class TestLp2bp_zpk(object):
def test_basic(self):
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_bp, p_bp, k_bp = lp2bp_zpk(z, p, k, 15, 8)
assert_allclose(sort(z_bp), sort([-25j, -9j, 0, +9j, +25j]))
assert_allclose(sort(p_bp), sort([-3 + 6j*sqrt(6),
-3 - 6j*sqrt(6),
+2j+sqrt(-8j-225)-2,
-2j+sqrt(+8j-225)-2,
+2j-sqrt(-8j-225)-2,
-2j-sqrt(+8j-225)-2, ]))
assert_allclose(k_bp, 24)
class TestLp2bs_zpk(object):
def test_basic(self):
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_bs, p_bs, k_bs = lp2bs_zpk(z, p, k, 35, 12)
assert_allclose(sort(z_bs), sort([+35j, -35j,
+3j+sqrt(1234)*1j,
-3j+sqrt(1234)*1j,
+3j-sqrt(1234)*1j,
-3j-sqrt(1234)*1j]))
assert_allclose(sort(p_bs), sort([+3j*sqrt(129) - 8,
-3j*sqrt(129) - 8,
(-6 + 6j) - sqrt(-1225 - 72j),
(-6 - 6j) - sqrt(-1225 + 72j),
(-6 + 6j) + sqrt(-1225 - 72j),
(-6 - 6j) + sqrt(-1225 + 72j), ]))
assert_allclose(k_bs, 32)
class TestBilinear_zpk(object):
def test_basic(self):
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_d, p_d, k_d = bilinear_zpk(z, p, k, 10)
assert_allclose(sort(z_d), sort([(20-2j)/(20+2j), (20+2j)/(20-2j),
-1]))
assert_allclose(sort(p_d), sort([77/83,
(1j/2 + 39/2) / (41/2 - 1j/2),
(39/2 - 1j/2) / (1j/2 + 41/2), ]))
assert_allclose(k_d, 9696/69803)
class TestPrototypeType(object):
def test_output_type(self):
# Prototypes should consistently output arrays, not lists
# https://github.com/scipy/scipy/pull/441
for func in (buttap,
besselap,
lambda N: cheb1ap(N, 1),
lambda N: cheb2ap(N, 20),
lambda N: ellipap(N, 1, 20)):
for N in range(7):
z, p, k = func(N)
assert_(isinstance(z, np.ndarray))
assert_(isinstance(p, np.ndarray))
def dB(x):
# Return magnitude in decibels, avoiding divide-by-zero warnings
# (and deal with some "not less-ordered" errors when -inf shows up)
return 20 * np.log10(np.maximum(np.abs(x), np.finfo(np.float64).tiny))
class TestButtord(object):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'lowpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs)
assert_equal(N, 16)
assert_allclose(Wn, 2.0002776782743284e-01, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'highpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs)
assert_equal(N, 18)
assert_allclose(Wn, 2.9996603079132672e-01, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'bandpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 18)
assert_allclose(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01],
rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'bandstop', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs)
assert_equal(N, 20)
assert_allclose(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01],
rtol=1e-6)
def test_analog(self):
wp = 200
ws = 600
rp = 3
rs = 60
N, Wn = buttord(wp, ws, rp, rs, True)
b, a = butter(N, Wn, 'lowpass', True)
w, h = freqs(b, a)
assert_array_less(-rp, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs)
assert_equal(N, 7)
assert_allclose(Wn, 2.0006785355671877e+02, rtol=1e-15)
n, Wn = buttord(1, 550/450, 1, 26, analog=True)
assert_equal(n, 19)
assert_allclose(Wn, 1.0361980524629517, rtol=1e-15)
assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55)
class TestCheb1ord(object):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'low', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 0.2, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'high', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 0.3, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'band', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'stop', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 10)
assert_allclose(Wn, [0.14758232569947785, 0.6], rtol=1e-5)
def test_analog(self):
wp = 700
ws = 100
rp = 3
rs = 70
N, Wn = cheb1ord(wp, ws, rp, rs, True)
b, a = cheby1(N, rp, Wn, 'high', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 4)
assert_allclose(Wn, 700, rtol=1e-15)
assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17)
class TestCheb2ord(object):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 0.28647639976553163, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 0.20697492182903282, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, [0.14876937565923479, 0.59748447842351482],
rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 10)
assert_allclose(Wn, [0.19926249974781743, 0.50125246585567362],
rtol=1e-6)
def test_analog(self):
wp = [20, 50]
ws = [10, 60]
rp = 3
rs = 80
N, Wn = cheb2ord(wp, ws, rp, rs, True)
b, a = cheby2(N, rs, Wn, 'bp', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 11)
assert_allclose(Wn, [1.673740595370124e+01, 5.974641487254268e+01],
rtol=1e-15)
class TestEllipord(object):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 5)
assert_allclose(Wn, 0.2, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 6)
assert_allclose(Wn, 0.3, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 6)
assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 7)
assert_allclose(Wn, [0.14758232794342988, 0.6], rtol=1e-5)
def test_analog(self):
wp = [1000, 6000]
ws = [2000, 5000]
rp = 3
rs = 90
N, Wn = ellipord(wp, ws, rp, rs, True)
b, a = ellip(N, rp, rs, Wn, 'bs', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, [1666.6666, 6000])
assert_equal(ellipord(1, 1.2, 1, 80, analog=True)[0], 9)
class TestBessel(object):
def test_degenerate(self):
for norm in ('delay', 'phase', 'mag'):
# 0-order filter is just a passthrough
b, a = bessel(0, 1, analog=True, norm=norm)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = bessel(1, 1, analog=True, norm=norm)
assert_allclose(b, [1], rtol=1e-15)
assert_allclose(a, [1, 1], rtol=1e-15)
z, p, k = bessel(1, 0.3, analog=True, output='zpk', norm=norm)
assert_array_equal(z, [])
assert_allclose(p, [-0.3], rtol=1e-14)
assert_allclose(k, 0.3, rtol=1e-14)
def test_high_order(self):
# high even order, 'phase'
z, p, k = bessel(24, 100, analog=True, output='zpk')
z2 = []
p2 = [
-9.055312334014323e+01 + 4.844005815403969e+00j,
-8.983105162681878e+01 + 1.454056170018573e+01j,
-8.837357994162065e+01 + 2.426335240122282e+01j,
-8.615278316179575e+01 + 3.403202098404543e+01j,
-8.312326467067703e+01 + 4.386985940217900e+01j,
-7.921695461084202e+01 + 5.380628489700191e+01j,
-7.433392285433246e+01 + 6.388084216250878e+01j,
-6.832565803501586e+01 + 7.415032695116071e+01j,
-6.096221567378025e+01 + 8.470292433074425e+01j,
-5.185914574820616e+01 + 9.569048385258847e+01j,
-4.027853855197555e+01 + 1.074195196518679e+02j,
-2.433481337524861e+01 + 1.207298683731973e+02j,
]
k2 = 9.999999999999989e+47
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
# high odd order, 'phase'
z, p, k = bessel(23, 1000, analog=True, output='zpk')
z2 = []
p2 = [
-2.497697202208956e+02 + 1.202813187870698e+03j,
-4.126986617510172e+02 + 1.065328794475509e+03j,
-5.304922463809596e+02 + 9.439760364018479e+02j,
-9.027564978975828e+02 + 1.010534334242318e+02j,
-8.909283244406079e+02 + 2.023024699647598e+02j,
-8.709469394347836e+02 + 3.039581994804637e+02j,
-8.423805948131370e+02 + 4.062657947488952e+02j,
-8.045561642249877e+02 + 5.095305912401127e+02j,
-7.564660146766259e+02 + 6.141594859516342e+02j,
-6.965966033906477e+02 + 7.207341374730186e+02j,
-6.225903228776276e+02 + 8.301558302815096e+02j,
-9.066732476324988e+02]
k2 = 9.999999999999983e+68
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
# high even order, 'delay' (Orchard 1965 "The Roots of the
# Maximally Flat-Delay Polynomials" Table 1)
z, p, k = bessel(31, 1, analog=True, output='zpk', norm='delay')
p2 = [-20.876706,
-20.826543 + 1.735732j,
-20.675502 + 3.473320j,
-20.421895 + 5.214702j,
-20.062802 + 6.961982j,
-19.593895 + 8.717546j,
-19.009148 + 10.484195j,
-18.300400 + 12.265351j,
-17.456663 + 14.065350j,
-16.463032 + 15.889910j,
-15.298849 + 17.746914j,
-13.934466 + 19.647827j,
-12.324914 + 21.610519j,
-10.395893 + 23.665701j,
- 8.005600 + 25.875019j,
- 4.792045 + 28.406037j,
]
assert_allclose(sorted(p, key=np.imag),
sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
# high odd order, 'delay'
z, p, k = bessel(30, 1, analog=True, output='zpk', norm='delay')
p2 = [-20.201029 + 0.867750j,
-20.097257 + 2.604235j,
-19.888485 + 4.343721j,
-19.572188 + 6.088363j,
-19.144380 + 7.840570j,
-18.599342 + 9.603147j,
-17.929195 + 11.379494j,
-17.123228 + 13.173901j,
-16.166808 + 14.992008j,
-15.039580 + 16.841580j,
-13.712245 + 18.733902j,
-12.140295 + 20.686563j,
-10.250119 + 22.729808j,
- 7.901170 + 24.924391j,
- 4.734679 + 27.435615j,
]
assert_allclose(sorted(p, key=np.imag),
sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
def test_refs(self):
# Compare to http://www.crbond.com/papers/bsf2.pdf
# "Delay Normalized Bessel Polynomial Coefficients"
bond_b = 10395
bond_a = [1, 21, 210, 1260, 4725, 10395, 10395]
b, a = bessel(6, 1, norm='delay', analog=True)
assert_allclose(bond_b, b)
assert_allclose(bond_a, a)
# "Delay Normalized Bessel Pole Locations"
bond_poles = {
1: [-1.0000000000],
2: [-1.5000000000 + 0.8660254038j],
3: [-1.8389073227 + 1.7543809598j, -2.3221853546],
4: [-2.1037893972 + 2.6574180419j, -2.8962106028 + 0.8672341289j],
5: [-2.3246743032 + 3.5710229203j, -3.3519563992 + 1.7426614162j,
-3.6467385953],
6: [-2.5159322478 + 4.4926729537j, -3.7357083563 + 2.6262723114j,
-4.2483593959 + 0.8675096732j],
7: [-2.6856768789 + 5.4206941307j, -4.0701391636 + 3.5171740477j,
-4.7582905282 + 1.7392860611j, -4.9717868585],
8: [-2.8389839489 + 6.3539112986j, -4.3682892172 + 4.4144425005j,
-5.2048407906 + 2.6161751526j, -5.5878860433 + 0.8676144454j],
9: [-2.9792607982 + 7.2914636883j, -4.6384398872 + 5.3172716754j,
-5.6044218195 + 3.4981569179j, -6.1293679043 + 1.7378483835j,
-6.2970191817],
10: [-3.1089162336 + 8.2326994591j, -4.8862195669 + 6.2249854825j,
-5.9675283286 + 4.3849471889j, -6.6152909655 + 2.6115679208j,
-6.9220449054 + 0.8676651955j]
}
for N in range(1, 11):
p1 = np.sort(bond_poles[N])
p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'delay')[1])))
assert_array_almost_equal(p1, p2, decimal=10)
# "Frequency Normalized Bessel Pole Locations"
bond_poles = {
1: [-1.0000000000],
2: [-1.1016013306 + 0.6360098248j],
3: [-1.0474091610 + 0.9992644363j, -1.3226757999],
4: [-0.9952087644 + 1.2571057395j, -1.3700678306 + 0.4102497175j],
5: [-0.9576765486 + 1.4711243207j, -1.3808773259 + 0.7179095876j,
-1.5023162714],
6: [-0.9306565229 + 1.6618632689j, -1.3818580976 + 0.9714718907j,
-1.5714904036 + 0.3208963742j],
7: [-0.9098677806 + 1.8364513530j, -1.3789032168 + 1.1915667778j,
-1.6120387662 + 0.5892445069j, -1.6843681793],
8: [-0.8928697188 + 1.9983258436j, -1.3738412176 + 1.3883565759j,
-1.6369394181 + 0.8227956251j, -1.7574084004 + 0.2728675751j],
9: [-0.8783992762 + 2.1498005243j, -1.3675883098 + 1.5677337122j,
-1.6523964846 + 1.0313895670j, -1.8071705350 + 0.5123837306j,
-1.8566005012],
10: [-0.8657569017 + 2.2926048310j, -1.3606922784 + 1.7335057427j,
-1.6618102414 + 1.2211002186j, -1.8421962445 + 0.7272575978j,
-1.9276196914 + 0.2416234710j]
}
for N in range(1, 11):
p1 = np.sort(bond_poles[N])
p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'mag')[1])))
assert_array_almost_equal(p1, p2, decimal=10)
# Compare to http://www.rane.com/note147.html
# "Table 1 - Bessel Crossovers of Second, Third, and Fourth-Order"
a = [1, 1, 1/3]
b2, a2 = bessel(2, 1, norm='delay', analog=True)
assert_allclose(a[::-1], a2/b2)
a = [1, 1, 2/5, 1/15]
b2, a2 = bessel(3, 1, norm='delay', analog=True)
assert_allclose(a[::-1], a2/b2)
a = [1, 1, 9/21, 2/21, 1/105]
b2, a2 = bessel(4, 1, norm='delay', analog=True)
assert_allclose(a[::-1], a2/b2)
a = [1, np.sqrt(3), 1]
b2, a2 = bessel(2, 1, norm='phase', analog=True)
assert_allclose(a[::-1], a2/b2)
# TODO: Why so inaccurate? Is reference flawed?
a = [1, 2.481, 2.463, 1.018]
b2, a2 = bessel(3, 1, norm='phase', analog=True)
assert_array_almost_equal(a[::-1], a2/b2, decimal=1)
# TODO: Why so inaccurate? Is reference flawed?
a = [1, 3.240, 4.5, 3.240, 1.050]
b2, a2 = bessel(4, 1, norm='phase', analog=True)
assert_array_almost_equal(a[::-1], a2/b2, decimal=1)
# Table of -3 dB factors:
N, scale = 2, 1.272
scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
assert_array_almost_equal(scale, scale2, decimal=3)
# TODO: Why so inaccurate? Is reference flawed?
N, scale = 3, 1.413
scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
assert_array_almost_equal(scale, scale2, decimal=2)
# TODO: Why so inaccurate? Is reference flawed?
N, scale = 4, 1.533
scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
assert_array_almost_equal(scale, scale2, decimal=1)
def test_hardcoded(self):
# Compare to values from original hardcoded implementation
originals = {
0: [],
1: [-1],
2: [-.8660254037844386467637229 + .4999999999999999999999996j],
3: [-.9416000265332067855971980,
-.7456403858480766441810907 + .7113666249728352680992154j],
4: [-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642624 + .2709187330038746636700926j],
5: [-.9264420773877602247196260,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 + .9072067564574549539291747j],
6: [-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 + .9616876881954277199245657j],
7: [-.9194871556490290014311619,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 + 1.002508508454420401230220j],
8: [-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 + 1.034388681126901058116589j],
9: [-.9154957797499037686769223,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 + 1.060073670135929666774323j],
10: [-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 + 1.081274842819124562037210j],
11: [-.9129067244518981934637318,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 + 1.099117466763120928733632j],
12: [-.9084478234140682638817772 + 95506365213450398415258360e-27j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 + 1.114373575641546257595657j],
13: [-.9110914665984182781070663,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 + 1.127591548317705678613239j],
14: [-.9077932138396487614720659 + 82196399419401501888968130e-27j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 + 1.139172297839859991370924j],
15: [-.9097482363849064167228581,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 + 1.149416154583629539665297j],
16: [-.9072099595087001356491337 + 72142113041117326028823950e-27j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 + 1.158552841199330479412225j],
17: [-.9087141161336397432860029,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 + 1.166761272925668786676672j],
18: [-.9067004324162775554189031 + 64279241063930693839360680e-27j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 + 1.174183010600059128532230j],
19: [-.9078934217899404528985092,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 + 1.180931628453291873626003j],
20: [-.9062570115576771146523497 + 57961780277849516990208850e-27j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 + 1.187099379810885886139638j],
21: [-.9072262653142957028884077,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 + 1.192762031948052470183960j],
22: [-.9058702269930872551848625 + 52774908289999045189007100e-27j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 + 1.197982433555213008346532j],
23: [-.9066732476324988168207439,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 + 1.202813187870697831365338j],
24: [-.9055312363372773709269407 + 48440066540478700874836350e-27j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 + 1.207298683731972524975429j],
25: [-.9062073871811708652496104,
-.9028833390228020537142561 + 93077131185102967450643820e-27j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 + 1.211476658382565356579418j],
}
for N in originals:
p1 = sorted(np.union1d(originals[N],
np.conj(originals[N])), key=np.imag)
p2 = sorted(besselap(N)[1], key=np.imag)
assert_allclose(p1, p2, rtol=1e-14)
def test_norm_phase(self):
# Test some orders and frequencies and see that they have the right
# phase at w0
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, w0, analog=True, norm='phase')
w = np.linspace(0, w0, 100)
w, h = freqs(b, a, w)
phase = np.unwrap(np.angle(h))
assert_allclose(phase[[0, -1]], (0, -N*pi/4), rtol=1e-1)
def test_norm_mag(self):
# Test some orders and frequencies and see that they have the right
# mag at w0
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, w0, analog=True, norm='mag')
w = (0, w0)
w, h = freqs(b, a, w)
mag = abs(h)
assert_allclose(mag, (1, 1/np.sqrt(2)))
def test_norm_delay(self):
# Test some orders and frequencies and see that they have the right
# delay at DC
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, w0, analog=True, norm='delay')
w = np.linspace(0, 10*w0, 1000)
w, h = freqs(b, a, w)
delay = -np.diff(np.unwrap(np.angle(h)))/np.diff(w)
assert_allclose(delay[0], 1/w0, rtol=1e-4)
def test_norm_factor(self):
mpmath_values = {
1: 1, 2: 1.361654128716130520, 3: 1.755672368681210649,
4: 2.113917674904215843, 5: 2.427410702152628137,
6: 2.703395061202921876, 7: 2.951722147038722771,
8: 3.179617237510651330, 9: 3.391693138911660101,
10: 3.590980594569163482, 11: 3.779607416439620092,
12: 3.959150821144285315, 13: 4.130825499383535980,
14: 4.295593409533637564, 15: 4.454233021624377494,
16: 4.607385465472647917, 17: 4.755586548961147727,
18: 4.899289677284488007, 19: 5.038882681488207605,
20: 5.174700441742707423, 21: 5.307034531360917274,
22: 5.436140703250035999, 23: 5.562244783787878196,
24: 5.685547371295963521, 25: 5.806227623775418541,
50: 8.268963160013226298, 51: 8.352374541546012058,
}
for N in mpmath_values:
z, p, k = besselap(N, 'delay')
assert_allclose(mpmath_values[N], _norm_factor(p, k), rtol=1e-13)
def test_bessel_poly(self):
assert_array_equal(_bessel_poly(5), [945, 945, 420, 105, 15, 1])
assert_array_equal(_bessel_poly(4, True), [1, 10, 45, 105, 105])
def test_bessel_zeros(self):
assert_array_equal(_bessel_zeros(0), [])
def test_invalid(self):
assert_raises(ValueError, besselap, 5, 'nonsense')
assert_raises(ValueError, besselap, -5)
assert_raises(ValueError, besselap, 3.2)
assert_raises(ValueError, _bessel_poly, -3)
assert_raises(ValueError, _bessel_poly, 3.3)
class TestButter(object):
def test_degenerate(self):
# 0-order filter is just a passthrough
b, a = butter(0, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = butter(1, 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = butter(1, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [3.249196962329063e-01], rtol=1e-14)
assert_allclose(k, 3.375401518835469e-01, rtol=1e-14)
def test_basic(self):
# analog s-plane
for N in range(25):
wn = 0.01
z, p, k = butter(N, wn, 'low', analog=True, output='zpk')
assert_array_almost_equal([], z)
assert_(len(p) == N)
# All poles should be at distance wn from origin
assert_array_almost_equal(wn, abs(p))
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
assert_array_almost_equal(wn**N, k)
# digital z-plane
for N in range(25):
wn = 0.01
z, p, k = butter(N, wn, 'high', analog=False, output='zpk')
assert_array_equal(np.ones(N), z) # All zeros exactly at DC
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
b1, a1 = butter(2, 1, analog=True)
assert_array_almost_equal(b1, [1])
assert_array_almost_equal(a1, [1, np.sqrt(2), 1])
b2, a2 = butter(5, 1, analog=True)
assert_array_almost_equal(b2, [1])
assert_array_almost_equal(a2, [1, 3.2361, 5.2361,
5.2361, 3.2361, 1], decimal=4)
b3, a3 = butter(10, 1, analog=True)
assert_array_almost_equal(b3, [1])
assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824,
74.2334, 64.8824, 42.8021, 20.4317,
6.3925, 1], decimal=4)
b2, a2 = butter(19, 1.0441379169150726, analog=True)
assert_array_almost_equal(b2, [2.2720], decimal=4)
assert_array_almost_equal(a2, 1.0e+004 * np.array([
0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570,
0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044,
1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153,
0.0026, 0.0002]), decimal=0)
b, a = butter(5, 0.4)
assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194,
0.2194, 0.1097, 0.0219], decimal=4)
assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738,
-0.3864, 0.1112, -0.0113], decimal=4)
def test_highpass(self):
# highpass, high even order
z, p, k = butter(28, 0.43, 'high', output='zpk')
z2 = np.ones(28)
p2 = [
2.068257195514592e-01 + 9.238294351481734e-01j,
2.068257195514592e-01 - 9.238294351481734e-01j,
1.874933103892023e-01 + 8.269455076775277e-01j,
1.874933103892023e-01 - 8.269455076775277e-01j,
1.717435567330153e-01 + 7.383078571194629e-01j,
1.717435567330153e-01 - 7.383078571194629e-01j,
1.588266870755982e-01 + 6.564623730651094e-01j,
1.588266870755982e-01 - 6.564623730651094e-01j,
1.481881532502603e-01 + 5.802343458081779e-01j,
1.481881532502603e-01 - 5.802343458081779e-01j,
1.394122576319697e-01 + 5.086609000582009e-01j,
1.394122576319697e-01 - 5.086609000582009e-01j,
1.321840881809715e-01 + 4.409411734716436e-01j,
1.321840881809715e-01 - 4.409411734716436e-01j,
1.262633413354405e-01 + 3.763990035551881e-01j,
1.262633413354405e-01 - 3.763990035551881e-01j,
1.214660449478046e-01 + 3.144545234797277e-01j,
1.214660449478046e-01 - 3.144545234797277e-01j,
1.104868766650320e-01 + 2.771505404367791e-02j,
1.104868766650320e-01 - 2.771505404367791e-02j,
1.111768629525075e-01 + 8.331369153155753e-02j,
1.111768629525075e-01 - 8.331369153155753e-02j,
1.125740630842972e-01 + 1.394219509611784e-01j,
1.125740630842972e-01 - 1.394219509611784e-01j,
1.147138487992747e-01 + 1.963932363793666e-01j,
1.147138487992747e-01 - 1.963932363793666e-01j,
1.176516491045901e-01 + 2.546021573417188e-01j,
1.176516491045901e-01 - 2.546021573417188e-01j,
]
k2 = 1.446671081817286e-06
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-7)
assert_allclose(k, k2, rtol=1e-10)
# highpass, high odd order
z, p, k = butter(27, 0.56, 'high', output='zpk')
z2 = np.ones(27)
p2 = [
-1.772572785680147e-01 + 9.276431102995948e-01j,
-1.772572785680147e-01 - 9.276431102995948e-01j,
-1.600766565322114e-01 + 8.264026279893268e-01j,
-1.600766565322114e-01 - 8.264026279893268e-01j,
-1.461948419016121e-01 + 7.341841939120078e-01j,
-1.461948419016121e-01 - 7.341841939120078e-01j,
-1.348975284762046e-01 + 6.493235066053785e-01j,
-1.348975284762046e-01 - 6.493235066053785e-01j,
-1.256628210712206e-01 + 5.704921366889227e-01j,
-1.256628210712206e-01 - 5.704921366889227e-01j,
-1.181038235962314e-01 + 4.966120551231630e-01j,
-1.181038235962314e-01 - 4.966120551231630e-01j,
-1.119304913239356e-01 + 4.267938916403775e-01j,
-1.119304913239356e-01 - 4.267938916403775e-01j,
-1.069237739782691e-01 + 3.602914879527338e-01j,
-1.069237739782691e-01 - 3.602914879527338e-01j,
-1.029178030691416e-01 + 2.964677964142126e-01j,
-1.029178030691416e-01 - 2.964677964142126e-01j,
-9.978747500816100e-02 + 2.347687643085738e-01j,
-9.978747500816100e-02 - 2.347687643085738e-01j,
-9.743974496324025e-02 + 1.747028739092479e-01j,
-9.743974496324025e-02 - 1.747028739092479e-01j,
-9.580754551625957e-02 + 1.158246860771989e-01j,
-9.580754551625957e-02 - 1.158246860771989e-01j,
-9.484562207782568e-02 + 5.772118357151691e-02j,
-9.484562207782568e-02 - 5.772118357151691e-02j,
-9.452783117928215e-02
]
k2 = 9.585686688851069e-09
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-8)
assert_allclose(k, k2)
def test_bandpass(self):
z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk')
z2 = [1, 1, 1, 1, 1, 1, 1, 1,
-1, -1, -1, -1, -1, -1, -1, -1]
p2 = [
4.979909925436156e-01 + 8.367609424799387e-01j,
4.979909925436156e-01 - 8.367609424799387e-01j,
4.913338722555539e-01 + 7.866774509868817e-01j,
4.913338722555539e-01 - 7.866774509868817e-01j,
5.035229361778706e-01 + 7.401147376726750e-01j,
5.035229361778706e-01 - 7.401147376726750e-01j,
5.307617160406101e-01 + 7.029184459442954e-01j,
5.307617160406101e-01 - 7.029184459442954e-01j,
5.680556159453138e-01 + 6.788228792952775e-01j,
5.680556159453138e-01 - 6.788228792952775e-01j,
6.100962560818854e-01 + 6.693849403338664e-01j,
6.100962560818854e-01 - 6.693849403338664e-01j,
6.904694312740631e-01 + 6.930501690145245e-01j,
6.904694312740631e-01 - 6.930501690145245e-01j,
6.521767004237027e-01 + 6.744414640183752e-01j,
6.521767004237027e-01 - 6.744414640183752e-01j,
]
k2 = 3.398854055800844e-08
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-13)
# bandpass analog
z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk')
z2 = np.zeros(4)
p2 = [
-4.179137760733086e+00 + 1.095935899082837e+02j,
-4.179137760733086e+00 - 1.095935899082837e+02j,
-9.593598668443835e+00 + 1.034745398029734e+02j,
-9.593598668443835e+00 - 1.034745398029734e+02j,
-8.883991981781929e+00 + 9.582087115567160e+01j,
-8.883991981781929e+00 - 9.582087115567160e+01j,
-3.474530886568715e+00 + 9.111599925805801e+01j,
-3.474530886568715e+00 - 9.111599925805801e+01j,
]
k2 = 1.600000000000001e+05
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-15)
def test_bandstop(self):
z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk')
z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j]
p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j,
-1.766850742887729e-01 - 9.466951258673900e-01j,
1.467897662432886e-01 + 9.515917126462422e-01j,
1.467897662432886e-01 - 9.515917126462422e-01j,
-1.370083529426906e-01 + 8.880376681273993e-01j,
-1.370083529426906e-01 - 8.880376681273993e-01j,
1.086774544701390e-01 + 8.915240810704319e-01j,
1.086774544701390e-01 - 8.915240810704319e-01j,
-7.982704457700891e-02 + 8.506056315273435e-01j,
-7.982704457700891e-02 - 8.506056315273435e-01j,
5.238812787110331e-02 + 8.524011102699969e-01j,
5.238812787110331e-02 - 8.524011102699969e-01j,
-1.357545000491310e-02 + 8.382287744986582e-01j,
-1.357545000491310e-02 - 8.382287744986582e-01j]
k2 = 4.577122512960063e-01
assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag))
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
def test_ba_output(self):
b, a = butter(4, [100, 300], 'bandpass', analog=True)
b2 = [1.6e+09, 0, 0, 0, 0]
a2 = [1.000000000000000e+00, 5.226251859505511e+02,
2.565685424949238e+05, 6.794127417357160e+07,
1.519411254969542e+10, 2.038238225207147e+12,
2.309116882454312e+14, 1.411088002066486e+16,
8.099999999999991e+17]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestCheby1(object):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Even-order filters have DC gain of -rp dB
b, a = cheby1(0, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1/np.sqrt(2)])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = cheby1(1, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = cheby1(1, 0.1, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [-5.390126972799615e-01], rtol=1e-14)
assert_allclose(k, 7.695063486399808e-01, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk')
assert_array_almost_equal([], z)
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk')
assert_array_equal(np.ones(N), z) # All zeros exactly at DC
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
# Same test as TestNormalize
b, a = cheby1(8, 0.5, 0.048)
assert_array_almost_equal(b, [
2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11], decimal=14)
assert_array_almost_equal(a, [
1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01], decimal=14)
b, a = cheby1(4, 1, [0.4, 0.7], btype='band')
assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0,
-0.0335, 0, 0.0084], decimal=4)
assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137,
1.8653, 1.8982, 0.5676, 0.4103],
decimal=4)
b2, a2 = cheby1(5, 3, 1, analog=True)
assert_array_almost_equal(b2, [0.0626], decimal=4)
assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080,
0.0626], decimal=4)
b, a = cheby1(8, 0.5, 0.1)
assert_array_almost_equal(b, 1.0e-006 * np.array([
0.00703924326028, 0.05631394608227, 0.19709881128793,
0.39419762257586, 0.49274702821983, 0.39419762257586,
0.19709881128793, 0.05631394608227, 0.00703924326028]),
decimal=13)
assert_array_almost_equal(a, [
1.00000000000000, -7.44912258934158, 24.46749067762108,
-46.27560200466141, 55.11160187999928, -42.31640010161038,
20.45543300484147, -5.69110270561444, 0.69770374759022],
decimal=13)
b, a = cheby1(8, 0.5, 0.25)
assert_array_almost_equal(b, 1.0e-003 * np.array([
0.00895261138923, 0.07162089111382, 0.25067311889837,
0.50134623779673, 0.62668279724591, 0.50134623779673,
0.25067311889837, 0.07162089111382, 0.00895261138923]),
decimal=13)
assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545,
16.58122329202101, -27.71423273542923,
30.39509758355313, -22.34729670426879,
10.74509800434910, -3.08924633697497,
0.40707685889802], decimal=13)
def test_highpass(self):
# high even order
z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk')
z2 = np.ones(24)
p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j,
-6.136558509657073e-01 - 2.700091504942893e-01j,
-3.303348340927516e-01 + 6.659400861114254e-01j,
-3.303348340927516e-01 - 6.659400861114254e-01j,
8.779713780557169e-03 + 8.223108447483040e-01j,
8.779713780557169e-03 - 8.223108447483040e-01j,
2.742361123006911e-01 + 8.356666951611864e-01j,
2.742361123006911e-01 - 8.356666951611864e-01j,
4.562984557158206e-01 + 7.954276912303594e-01j,
4.562984557158206e-01 - 7.954276912303594e-01j,
5.777335494123628e-01 + 7.435821817961783e-01j,
5.777335494123628e-01 - 7.435821817961783e-01j,
6.593260977749194e-01 + 6.955390907990932e-01j,
6.593260977749194e-01 - 6.955390907990932e-01j,
7.149590948466562e-01 + 6.559437858502012e-01j,
7.149590948466562e-01 - 6.559437858502012e-01j,
7.532432388188739e-01 + 6.256158042292060e-01j,
7.532432388188739e-01 - 6.256158042292060e-01j,
7.794365244268271e-01 + 6.042099234813333e-01j,
7.794365244268271e-01 - 6.042099234813333e-01j,
7.967253874772997e-01 + 5.911966597313203e-01j,
7.967253874772997e-01 - 5.911966597313203e-01j,
8.069756417293870e-01 + 5.862214589217275e-01j,
8.069756417293870e-01 - 5.862214589217275e-01j]
k2 = 6.190427617192018e-04
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-10)
assert_allclose(k, k2, rtol=1e-10)
# high odd order
z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk')
z2 = np.ones(23)
p2 = [-7.676400532011010e-01,
-6.754621070166477e-01 + 3.970502605619561e-01j,
-6.754621070166477e-01 - 3.970502605619561e-01j,
-4.528880018446727e-01 + 6.844061483786332e-01j,
-4.528880018446727e-01 - 6.844061483786332e-01j,
-1.986009130216447e-01 + 8.382285942941594e-01j,
-1.986009130216447e-01 - 8.382285942941594e-01j,
2.504673931532608e-02 + 8.958137635794080e-01j,
2.504673931532608e-02 - 8.958137635794080e-01j,
2.001089429976469e-01 + 9.010678290791480e-01j,
2.001089429976469e-01 - 9.010678290791480e-01j,
3.302410157191755e-01 + 8.835444665962544e-01j,
3.302410157191755e-01 - 8.835444665962544e-01j,
4.246662537333661e-01 + 8.594054226449009e-01j,
4.246662537333661e-01 - 8.594054226449009e-01j,
4.919620928120296e-01 + 8.366772762965786e-01j,
4.919620928120296e-01 - 8.366772762965786e-01j,
5.385746917494749e-01 + 8.191616180796720e-01j,
5.385746917494749e-01 - 8.191616180796720e-01j,
5.855636993537203e-01 + 8.060680937701062e-01j,
5.855636993537203e-01 - 8.060680937701062e-01j,
5.688812849391721e-01 + 8.086497795114683e-01j,
5.688812849391721e-01 - 8.086497795114683e-01j]
k2 = 1.941697029206324e-05
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-10)
assert_allclose(k, k2, rtol=1e-10)
z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk')
z2 = np.zeros(10)
p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j,
-3.144743169501551e+03 - 3.511680029092744e+03j,
-5.633065604514602e+02 + 2.023615191183945e+03j,
-5.633065604514602e+02 - 2.023615191183945e+03j,
-1.946412183352025e+02 + 1.372309454274755e+03j,
-1.946412183352025e+02 - 1.372309454274755e+03j,
-7.987162953085479e+01 + 1.105207708045358e+03j,
-7.987162953085479e+01 - 1.105207708045358e+03j,
-2.250315039031946e+01 + 1.001723931471477e+03j,
-2.250315039031946e+01 - 1.001723931471477e+03j]
k2 = 8.912509381337453e-01
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-15)
def test_bandpass(self):
z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk')
z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1]
p2 = [3.077784854851463e-01 + 9.453307017592942e-01j,
3.077784854851463e-01 - 9.453307017592942e-01j,
3.280567400654425e-01 + 9.272377218689016e-01j,
3.280567400654425e-01 - 9.272377218689016e-01j,
3.677912763284301e-01 + 9.038008865279966e-01j,
3.677912763284301e-01 - 9.038008865279966e-01j,
4.194425632520948e-01 + 8.769407159656157e-01j,
4.194425632520948e-01 - 8.769407159656157e-01j,
4.740921994669189e-01 + 8.496508528630974e-01j,
4.740921994669189e-01 - 8.496508528630974e-01j,
5.234866481897429e-01 + 8.259608422808477e-01j,
5.234866481897429e-01 - 8.259608422808477e-01j,
5.844717632289875e-01 + 8.052901363500210e-01j,
5.844717632289875e-01 - 8.052901363500210e-01j,
5.615189063336070e-01 + 8.100667803850766e-01j,
5.615189063336070e-01 - 8.100667803850766e-01j]
k2 = 5.007028718074307e-09
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-13)
def test_bandstop(self):
z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk')
z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j]
p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j,
-8.942974551472813e-02 - 3.482480481185926e-01j,
1.293775154041798e-01 + 8.753499858081858e-01j,
1.293775154041798e-01 - 8.753499858081858e-01j,
3.399741945062013e-02 + 9.690316022705607e-01j,
3.399741945062013e-02 - 9.690316022705607e-01j,
4.167225522796539e-04 + 9.927338161087488e-01j,
4.167225522796539e-04 - 9.927338161087488e-01j,
-3.912966549550960e-01 + 8.046122859255742e-01j,
-3.912966549550960e-01 - 8.046122859255742e-01j,
-3.307805547127368e-01 + 9.133455018206508e-01j,
-3.307805547127368e-01 - 9.133455018206508e-01j,
-3.072658345097743e-01 + 9.443589759799366e-01j,
-3.072658345097743e-01 - 9.443589759799366e-01j]
k2 = 3.619438310405028e-01
assert_allclose(sorted(z, key=np.imag),
sorted(z2, key=np.imag), rtol=1e-13)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-15)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True)
b2 = [1.000000000000006e+00, 0,
3.255000000000020e+05, 0,
4.238010000000026e+10, 0,
2.758944510000017e+15, 0,
8.980364380050052e+19, 0,
1.169243442282517e+24
]
a2 = [1.000000000000000e+00, 4.630555945694342e+02,
4.039266454794788e+05, 1.338060988610237e+08,
5.844333551294591e+10, 1.357346371637638e+13,
3.804661141892782e+15, 5.670715850340080e+17,
1.114411200988328e+20, 8.316815934908471e+21,
1.169243442282517e+24
]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestCheby2(object):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Stopband ripple factor doesn't matter
b, a = cheby2(0, 123.456, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = cheby2(1, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = cheby2(1, 50, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [9.967826460175649e-01], rtol=1e-14)
assert_allclose(k, 1.608676991217512e-03, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk')
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk')
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
B, A = cheby2(18, 100, 0.5)
assert_array_almost_equal(B, [
0.00167583914216, 0.01249479541868, 0.05282702120282,
0.15939804265706, 0.37690207631117, 0.73227013789108,
1.20191856962356, 1.69522872823393, 2.07598674519837,
2.21972389625291, 2.07598674519838, 1.69522872823395,
1.20191856962359, 0.73227013789110, 0.37690207631118,
0.15939804265707, 0.05282702120282, 0.01249479541868,
0.00167583914216], decimal=13)
assert_array_almost_equal(A, [
1.00000000000000, -0.27631970006174, 3.19751214254060,
-0.15685969461355, 4.13926117356269, 0.60689917820044,
2.95082770636540, 0.89016501910416, 1.32135245849798,
0.51502467236824, 0.38906643866660, 0.15367372690642,
0.07255803834919, 0.02422454070134, 0.00756108751837,
0.00179848550988, 0.00033713574499, 0.00004258794833,
0.00000281030149], decimal=13)
def test_highpass(self):
# high even order
z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk')
z2 = [9.981088955489852e-01 + 6.147058341984388e-02j,
9.981088955489852e-01 - 6.147058341984388e-02j,
9.832702870387426e-01 + 1.821525257215483e-01j,
9.832702870387426e-01 - 1.821525257215483e-01j,
9.550760158089112e-01 + 2.963609353922882e-01j,
9.550760158089112e-01 - 2.963609353922882e-01j,
9.162054748821922e-01 + 4.007087817803773e-01j,
9.162054748821922e-01 - 4.007087817803773e-01j,
8.700619897368064e-01 + 4.929423232136168e-01j,
8.700619897368064e-01 - 4.929423232136168e-01j,
5.889791753434985e-01 + 8.081482110427953e-01j,
5.889791753434985e-01 - 8.081482110427953e-01j,
5.984900456570295e-01 + 8.011302423760501e-01j,
5.984900456570295e-01 - 8.011302423760501e-01j,
6.172880888914629e-01 + 7.867371958365343e-01j,
6.172880888914629e-01 - 7.867371958365343e-01j,
6.448899971038180e-01 + 7.642754030030161e-01j,
6.448899971038180e-01 - 7.642754030030161e-01j,
6.804845629637927e-01 + 7.327624168637228e-01j,
6.804845629637927e-01 - 7.327624168637228e-01j,
8.202619107108660e-01 + 5.719881098737678e-01j,
8.202619107108660e-01 - 5.719881098737678e-01j,
7.228410452536148e-01 + 6.910143437705678e-01j,
7.228410452536148e-01 - 6.910143437705678e-01j,
7.702121399578629e-01 + 6.377877856007792e-01j,
7.702121399578629e-01 - 6.377877856007792e-01j]
p2 = [7.365546198286450e-01 + 4.842085129329526e-02j,
7.365546198286450e-01 - 4.842085129329526e-02j,
7.292038510962885e-01 + 1.442201672097581e-01j,
7.292038510962885e-01 - 1.442201672097581e-01j,
7.151293788040354e-01 + 2.369925800458584e-01j,
7.151293788040354e-01 - 2.369925800458584e-01j,
6.955051820787286e-01 + 3.250341363856910e-01j,
6.955051820787286e-01 - 3.250341363856910e-01j,
6.719122956045220e-01 + 4.070475750638047e-01j,
6.719122956045220e-01 - 4.070475750638047e-01j,
6.461722130611300e-01 + 4.821965916689270e-01j,
6.461722130611300e-01 - 4.821965916689270e-01j,
5.528045062872224e-01 + 8.162920513838372e-01j,
5.528045062872224e-01 - 8.162920513838372e-01j,
5.464847782492791e-01 + 7.869899955967304e-01j,
5.464847782492791e-01 - 7.869899955967304e-01j,
5.488033111260949e-01 + 7.520442354055579e-01j,
5.488033111260949e-01 - 7.520442354055579e-01j,
6.201874719022955e-01 + 5.500894392527353e-01j,
6.201874719022955e-01 - 5.500894392527353e-01j,
5.586478152536709e-01 + 7.112676877332921e-01j,
5.586478152536709e-01 - 7.112676877332921e-01j,
5.958145844148228e-01 + 6.107074340842115e-01j,
5.958145844148228e-01 - 6.107074340842115e-01j,
5.747812938519067e-01 + 6.643001536914696e-01j,
5.747812938519067e-01 - 6.643001536914696e-01j]
k2 = 9.932997786497189e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-12)
assert_allclose(k, k2, rtol=1e-11)
# high odd order
z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk')
z2 = [9.690690376586687e-01 + 2.467897896011971e-01j,
9.690690376586687e-01 - 2.467897896011971e-01j,
9.999999999999492e-01,
8.835111277191199e-01 + 4.684101698261429e-01j,
8.835111277191199e-01 - 4.684101698261429e-01j,
7.613142857900539e-01 + 6.483830335935022e-01j,
7.613142857900539e-01 - 6.483830335935022e-01j,
6.232625173626231e-01 + 7.820126817709752e-01j,
6.232625173626231e-01 - 7.820126817709752e-01j,
4.864456563413621e-01 + 8.737108351316745e-01j,
4.864456563413621e-01 - 8.737108351316745e-01j,
3.618368136816749e-01 + 9.322414495530347e-01j,
3.618368136816749e-01 - 9.322414495530347e-01j,
2.549486883466794e-01 + 9.669545833752675e-01j,
2.549486883466794e-01 - 9.669545833752675e-01j,
1.676175432109457e-01 + 9.858520980390212e-01j,
1.676175432109457e-01 - 9.858520980390212e-01j,
1.975218468277521e-03 + 9.999980492540941e-01j,
1.975218468277521e-03 - 9.999980492540941e-01j,
1.786959496651858e-02 + 9.998403260399917e-01j,
1.786959496651858e-02 - 9.998403260399917e-01j,
9.967933660557139e-02 + 9.950196127985684e-01j,
9.967933660557139e-02 - 9.950196127985684e-01j,
5.013970951219547e-02 + 9.987422137518890e-01j,
5.013970951219547e-02 - 9.987422137518890e-01j]
p2 = [4.218866331906864e-01,
4.120110200127552e-01 + 1.361290593621978e-01j,
4.120110200127552e-01 - 1.361290593621978e-01j,
3.835890113632530e-01 + 2.664910809911026e-01j,
3.835890113632530e-01 - 2.664910809911026e-01j,
3.399195570456499e-01 + 3.863983538639875e-01j,
3.399195570456499e-01 - 3.863983538639875e-01j,
2.855977834508353e-01 + 4.929444399540688e-01j,
2.855977834508353e-01 - 4.929444399540688e-01j,
2.255765441339322e-01 + 5.851631870205766e-01j,
2.255765441339322e-01 - 5.851631870205766e-01j,
1.644087535815792e-01 + 6.637356937277153e-01j,
1.644087535815792e-01 - 6.637356937277153e-01j,
-7.293633845273095e-02 + 9.739218252516307e-01j,
-7.293633845273095e-02 - 9.739218252516307e-01j,
1.058259206358626e-01 + 7.304739464862978e-01j,
1.058259206358626e-01 - 7.304739464862978e-01j,
-5.703971947785402e-02 + 9.291057542169088e-01j,
-5.703971947785402e-02 - 9.291057542169088e-01j,
5.263875132656864e-02 + 7.877974334424453e-01j,
5.263875132656864e-02 - 7.877974334424453e-01j,
-3.007943405982616e-02 + 8.846331716180016e-01j,
-3.007943405982616e-02 - 8.846331716180016e-01j,
6.857277464483946e-03 + 8.383275456264492e-01j,
6.857277464483946e-03 - 8.383275456264492e-01j]
k2 = 6.507068761705037e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-12)
assert_allclose(k, k2, rtol=1e-11)
def test_bandpass(self):
z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk')
z2 = [-9.999999999999999e-01,
3.676588029658514e-01 + 9.299607543341383e-01j,
3.676588029658514e-01 - 9.299607543341383e-01j,
7.009689684982283e-01 + 7.131917730894889e-01j,
7.009689684982283e-01 - 7.131917730894889e-01j,
7.815697973765858e-01 + 6.238178033919218e-01j,
7.815697973765858e-01 - 6.238178033919218e-01j,
8.063793628819866e-01 + 5.913986160941200e-01j,
8.063793628819866e-01 - 5.913986160941200e-01j,
1.000000000000001e+00,
9.944493019920448e-01 + 1.052168511576739e-01j,
9.944493019920448e-01 - 1.052168511576739e-01j,
9.854674703367308e-01 + 1.698642543566085e-01j,
9.854674703367308e-01 - 1.698642543566085e-01j,
9.762751735919308e-01 + 2.165335665157851e-01j,
9.762751735919308e-01 - 2.165335665157851e-01j,
9.792277171575134e-01 + 2.027636011479496e-01j,
9.792277171575134e-01 - 2.027636011479496e-01j]
p2 = [8.143803410489621e-01 + 5.411056063397541e-01j,
8.143803410489621e-01 - 5.411056063397541e-01j,
7.650769827887418e-01 + 5.195412242095543e-01j,
7.650769827887418e-01 - 5.195412242095543e-01j,
6.096241204063443e-01 + 3.568440484659796e-01j,
6.096241204063443e-01 - 3.568440484659796e-01j,
6.918192770246239e-01 + 4.770463577106911e-01j,
6.918192770246239e-01 - 4.770463577106911e-01j,
6.986241085779207e-01 + 1.146512226180060e-01j,
6.986241085779207e-01 - 1.146512226180060e-01j,
8.654645923909734e-01 + 1.604208797063147e-01j,
8.654645923909734e-01 - 1.604208797063147e-01j,
9.164831670444591e-01 + 1.969181049384918e-01j,
9.164831670444591e-01 - 1.969181049384918e-01j,
9.630425777594550e-01 + 2.317513360702271e-01j,
9.630425777594550e-01 - 2.317513360702271e-01j,
9.438104703725529e-01 + 2.193509900269860e-01j,
9.438104703725529e-01 - 2.193509900269860e-01j]
k2 = 9.345352824659604e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-11)
def test_bandstop(self):
z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk')
z2 = [6.230544895101009e-01 + 7.821784343111114e-01j,
6.230544895101009e-01 - 7.821784343111114e-01j,
9.086608545660115e-01 + 4.175349702471991e-01j,
9.086608545660115e-01 - 4.175349702471991e-01j,
9.478129721465802e-01 + 3.188268649763867e-01j,
9.478129721465802e-01 - 3.188268649763867e-01j,
-6.230544895100982e-01 + 7.821784343111109e-01j,
-6.230544895100982e-01 - 7.821784343111109e-01j,
-9.086608545660116e-01 + 4.175349702472088e-01j,
-9.086608545660116e-01 - 4.175349702472088e-01j,
-9.478129721465784e-01 + 3.188268649763897e-01j,
-9.478129721465784e-01 - 3.188268649763897e-01j]
p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j,
-9.464094036167638e-01 - 1.720048695084344e-01j,
-8.715844103386737e-01 + 1.370665039509297e-01j,
-8.715844103386737e-01 - 1.370665039509297e-01j,
-8.078751204586425e-01 + 5.729329866682983e-02j,
-8.078751204586425e-01 - 5.729329866682983e-02j,
9.464094036167665e-01 + 1.720048695084332e-01j,
9.464094036167665e-01 - 1.720048695084332e-01j,
8.078751204586447e-01 + 5.729329866683007e-02j,
8.078751204586447e-01 - 5.729329866683007e-02j,
8.715844103386721e-01 + 1.370665039509331e-01j,
8.715844103386721e-01 - 1.370665039509331e-01j]
k2 = 2.917823332763358e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-11)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = cheby2(5, 20, [2010, 2100], 'stop', True)
b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12,
2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04,
1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02,
7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09,
1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15,
1.339913493808585e+33]
a2 = [1.000000000000000e+00, 1.849550755473371e+02,
2.113222918998538e+07, 3.125114149732283e+09,
1.785133457155609e+14, 1.979158697776348e+16,
7.535048322653831e+20, 5.567966191263037e+22,
1.589246884221346e+27, 5.871210648525566e+28,
1.339913493808590e+33]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestEllip(object):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Even-order filters have DC gain of -rp dB
# Stopband ripple factor doesn't matter
b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True)
assert_array_almost_equal(b, [1/np.sqrt(2)])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = ellip(1, 1, 55, 0.3, output='zpk')
assert_allclose(z, [-9.999999999999998e-01], rtol=1e-14)
assert_allclose(p, [-6.660721153525525e-04], rtol=1e-10)
assert_allclose(k, 5.003330360576763e-01, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk')
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk')
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
b3, a3 = ellip(5, 3, 26, 1, analog=True)
assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0,
0.2409], decimal=4)
assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012,
0.2409], decimal=4)
b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop')
assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042,
0.3469, 0.3310], decimal=4)
assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323,
0.1131, -0.0060], decimal=4)
def test_highpass(self):
# high even order
z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk')
z2 = [9.761875332501075e-01 + 2.169283290099910e-01j,
9.761875332501075e-01 - 2.169283290099910e-01j,
8.413503353963494e-01 + 5.404901600661900e-01j,
8.413503353963494e-01 - 5.404901600661900e-01j,
7.160082576305009e-01 + 6.980918098681732e-01j,
7.160082576305009e-01 - 6.980918098681732e-01j,
6.456533638965329e-01 + 7.636306264739803e-01j,
6.456533638965329e-01 - 7.636306264739803e-01j,
6.127321820971366e-01 + 7.902906256703928e-01j,
6.127321820971366e-01 - 7.902906256703928e-01j,
5.983607817490196e-01 + 8.012267936512676e-01j,
5.983607817490196e-01 - 8.012267936512676e-01j,
5.922577552594799e-01 + 8.057485658286990e-01j,
5.922577552594799e-01 - 8.057485658286990e-01j,
5.896952092563588e-01 + 8.076258788449631e-01j,
5.896952092563588e-01 - 8.076258788449631e-01j,
5.886248765538837e-01 + 8.084063054565607e-01j,
5.886248765538837e-01 - 8.084063054565607e-01j,
5.881802711123132e-01 + 8.087298490066037e-01j,
5.881802711123132e-01 - 8.087298490066037e-01j,
5.879995719101164e-01 + 8.088612386766461e-01j,
5.879995719101164e-01 - 8.088612386766461e-01j,
5.879354086709576e-01 + 8.089078780868164e-01j,
5.879354086709576e-01 - 8.089078780868164e-01j]
p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j,
-3.184805259081650e-01 - 4.206951906775851e-01j,
1.417279173459985e-01 + 7.903955262836452e-01j,
1.417279173459985e-01 - 7.903955262836452e-01j,
4.042881216964651e-01 + 8.309042239116594e-01j,
4.042881216964651e-01 - 8.309042239116594e-01j,
5.128964442789670e-01 + 8.229563236799665e-01j,
5.128964442789670e-01 - 8.229563236799665e-01j,
5.569614712822724e-01 + 8.155957702908510e-01j,
5.569614712822724e-01 - 8.155957702908510e-01j,
5.750478870161392e-01 + 8.118633973883931e-01j,
5.750478870161392e-01 - 8.118633973883931e-01j,
5.825314018170804e-01 + 8.101960910679270e-01j,
5.825314018170804e-01 - 8.101960910679270e-01j,
5.856397379751872e-01 + 8.094825218722543e-01j,
5.856397379751872e-01 - 8.094825218722543e-01j,
5.869326035251949e-01 + 8.091827531557583e-01j,
5.869326035251949e-01 - 8.091827531557583e-01j,
5.874697218855733e-01 + 8.090593298213502e-01j,
5.874697218855733e-01 - 8.090593298213502e-01j,
5.876904783532237e-01 + 8.090127161018823e-01j,
5.876904783532237e-01 - 8.090127161018823e-01j,
5.877753105317594e-01 + 8.090050577978136e-01j,
5.877753105317594e-01 - 8.090050577978136e-01j]
k2 = 4.918081266957108e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
# high odd order
z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk')
z2 = [9.999999999998661e-01,
6.603717261750994e-01 + 7.509388678638675e-01j,
6.603717261750994e-01 - 7.509388678638675e-01j,
2.788635267510325e-01 + 9.603307416968041e-01j,
2.788635267510325e-01 - 9.603307416968041e-01j,
1.070215532544218e-01 + 9.942567008268131e-01j,
1.070215532544218e-01 - 9.942567008268131e-01j,
4.049427369978163e-02 + 9.991797705105507e-01j,
4.049427369978163e-02 - 9.991797705105507e-01j,
1.531059368627931e-02 + 9.998827859909265e-01j,
1.531059368627931e-02 - 9.998827859909265e-01j,
5.808061438534933e-03 + 9.999831330689181e-01j,
5.808061438534933e-03 - 9.999831330689181e-01j,
2.224277847754599e-03 + 9.999975262909676e-01j,
2.224277847754599e-03 - 9.999975262909676e-01j,
8.731857107534554e-04 + 9.999996187732845e-01j,
8.731857107534554e-04 - 9.999996187732845e-01j,
3.649057346914968e-04 + 9.999999334218996e-01j,
3.649057346914968e-04 - 9.999999334218996e-01j,
1.765538109802615e-04 + 9.999999844143768e-01j,
1.765538109802615e-04 - 9.999999844143768e-01j,
1.143655290967426e-04 + 9.999999934602630e-01j,
1.143655290967426e-04 - 9.999999934602630e-01j]
p2 = [-6.322017026545028e-01,
-4.648423756662754e-01 + 5.852407464440732e-01j,
-4.648423756662754e-01 - 5.852407464440732e-01j,
-2.249233374627773e-01 + 8.577853017985717e-01j,
-2.249233374627773e-01 - 8.577853017985717e-01j,
-9.234137570557621e-02 + 9.506548198678851e-01j,
-9.234137570557621e-02 - 9.506548198678851e-01j,
-3.585663561241373e-02 + 9.821494736043981e-01j,
-3.585663561241373e-02 - 9.821494736043981e-01j,
-1.363917242312723e-02 + 9.933844128330656e-01j,
-1.363917242312723e-02 - 9.933844128330656e-01j,
-5.131505238923029e-03 + 9.975221173308673e-01j,
-5.131505238923029e-03 - 9.975221173308673e-01j,
-1.904937999259502e-03 + 9.990680819857982e-01j,
-1.904937999259502e-03 - 9.990680819857982e-01j,
-6.859439885466834e-04 + 9.996492201426826e-01j,
-6.859439885466834e-04 - 9.996492201426826e-01j,
-2.269936267937089e-04 + 9.998686250679161e-01j,
-2.269936267937089e-04 - 9.998686250679161e-01j,
-5.687071588789117e-05 + 9.999527573294513e-01j,
-5.687071588789117e-05 - 9.999527573294513e-01j,
-6.948417068525226e-07 + 9.999882737700173e-01j,
-6.948417068525226e-07 - 9.999882737700173e-01j]
k2 = 1.220910020289434e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
def test_bandpass(self):
z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk')
z2 = [-9.999999999999991e-01,
6.856610961780020e-01 + 7.279209168501619e-01j,
6.856610961780020e-01 - 7.279209168501619e-01j,
7.850346167691289e-01 + 6.194518952058737e-01j,
7.850346167691289e-01 - 6.194518952058737e-01j,
7.999038743173071e-01 + 6.001281461922627e-01j,
7.999038743173071e-01 - 6.001281461922627e-01j,
9.999999999999999e-01,
9.862938983554124e-01 + 1.649980183725925e-01j,
9.862938983554124e-01 - 1.649980183725925e-01j,
9.788558330548762e-01 + 2.045513580850601e-01j,
9.788558330548762e-01 - 2.045513580850601e-01j,
9.771155231720003e-01 + 2.127093189691258e-01j,
9.771155231720003e-01 - 2.127093189691258e-01j]
p2 = [8.063992755498643e-01 + 5.858071374778874e-01j,
8.063992755498643e-01 - 5.858071374778874e-01j,
8.050395347071724e-01 + 5.639097428109795e-01j,
8.050395347071724e-01 - 5.639097428109795e-01j,
8.113124936559144e-01 + 4.855241143973142e-01j,
8.113124936559144e-01 - 4.855241143973142e-01j,
8.665595314082394e-01 + 3.334049560919331e-01j,
8.665595314082394e-01 - 3.334049560919331e-01j,
9.412369011968871e-01 + 2.457616651325908e-01j,
9.412369011968871e-01 - 2.457616651325908e-01j,
9.679465190411238e-01 + 2.228772501848216e-01j,
9.679465190411238e-01 - 2.228772501848216e-01j,
9.747235066273385e-01 + 2.178937926146544e-01j,
9.747235066273385e-01 - 2.178937926146544e-01j]
k2 = 8.354782670263239e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk')
z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j,
-5.583607317695175e-14 - 1.433755965989225e+02j,
5.740106416459296e-14 + 1.261678754570291e+02j,
5.740106416459296e-14 - 1.261678754570291e+02j,
-2.199676239638652e-14 + 6.974861996895196e+01j,
-2.199676239638652e-14 - 6.974861996895196e+01j,
-3.372595657044283e-14 + 7.926145989044531e+01j,
-3.372595657044283e-14 - 7.926145989044531e+01j,
0]
p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j,
-8.814960004852743e-01 - 1.104124501436066e+02j,
-2.477372459140184e+00 + 1.065638954516534e+02j,
-2.477372459140184e+00 - 1.065638954516534e+02j,
-3.072156842945799e+00 + 9.995404870405324e+01j,
-3.072156842945799e+00 - 9.995404870405324e+01j,
-2.180456023925693e+00 + 9.379206865455268e+01j,
-2.180456023925693e+00 - 9.379206865455268e+01j,
-7.230484977485752e-01 + 9.056598800801140e+01j,
-7.230484977485752e-01 - 9.056598800801140e+01j]
k2 = 3.774571622827070e-02
assert_allclose(sorted(z, key=np.imag),
sorted(z2, key=np.imag), rtol=1e-4)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-6)
assert_allclose(k, k2, rtol=1e-3)
def test_bandstop(self):
z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk')
z2 = [3.528578094286510e-01 + 9.356769561794296e-01j,
3.528578094286510e-01 - 9.356769561794296e-01j,
3.769716042264783e-01 + 9.262248159096587e-01j,
3.769716042264783e-01 - 9.262248159096587e-01j,
4.406101783111199e-01 + 8.976985411420985e-01j,
4.406101783111199e-01 - 8.976985411420985e-01j,
5.539386470258847e-01 + 8.325574907062760e-01j,
5.539386470258847e-01 - 8.325574907062760e-01j,
6.748464963023645e-01 + 7.379581332490555e-01j,
6.748464963023645e-01 - 7.379581332490555e-01j,
7.489887970285254e-01 + 6.625826604475596e-01j,
7.489887970285254e-01 - 6.625826604475596e-01j,
7.913118471618432e-01 + 6.114127579150699e-01j,
7.913118471618432e-01 - 6.114127579150699e-01j,
7.806804740916381e-01 + 6.249303940216475e-01j,
7.806804740916381e-01 - 6.249303940216475e-01j]
p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j,
-1.025299146693730e-01 - 5.662682444754943e-01j,
1.698463595163031e-01 + 8.926678667070186e-01j,
1.698463595163031e-01 - 8.926678667070186e-01j,
2.750532687820631e-01 + 9.351020170094005e-01j,
2.750532687820631e-01 - 9.351020170094005e-01j,
3.070095178909486e-01 + 9.457373499553291e-01j,
3.070095178909486e-01 - 9.457373499553291e-01j,
7.695332312152288e-01 + 2.792567212705257e-01j,
7.695332312152288e-01 - 2.792567212705257e-01j,
8.083818999225620e-01 + 4.990723496863960e-01j,
8.083818999225620e-01 - 4.990723496863960e-01j,
8.066158014414928e-01 + 5.649811440393374e-01j,
8.066158014414928e-01 - 5.649811440393374e-01j,
8.062787978834571e-01 + 5.855780880424964e-01j,
8.062787978834571e-01 - 5.855780880424964e-01j]
k2 = 2.068622545291259e-01
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-6)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-5)
assert_allclose(k, k2, rtol=1e-5)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = ellip(5, 1, 40, [201, 240], 'stop', True)
b2 = [
1.000000000000000e+00, 0, # Matlab: 1.743506051190569e-13,
2.426561778314366e+05, 0, # Matlab: 3.459426536825722e-08,
2.348218683400168e+10, 0, # Matlab: 2.559179747299313e-03,
1.132780692872241e+15, 0, # Matlab: 8.363229375535731e+01,
2.724038554089566e+19, 0, # Matlab: 1.018700994113120e+06,
2.612380874940186e+23
]
a2 = [
1.000000000000000e+00, 1.337266601804649e+02,
2.486725353510667e+05, 2.628059713728125e+07,
2.436169536928770e+10, 1.913554568577315e+12,
1.175208184614438e+15, 6.115751452473410e+16,
2.791577695211466e+19, 7.241811142725384e+20,
2.612380874940182e+23
]
assert_allclose(b, b2, rtol=1e-6)
assert_allclose(a, a2, rtol=1e-4)
def test_sos_consistency():
# Consistency checks of output='sos' for the specialized IIR filter
# design functions.
design_funcs = [(bessel, (0.1,)),
(butter, (0.1,)),
(cheby1, (45.0, 0.1)),
(cheby2, (0.087, 0.1)),
(ellip, (0.087, 45, 0.1))]
for func, args in design_funcs:
name = func.__name__
b, a = func(2, *args, output='ba')
sos = func(2, *args, output='sos')
assert_allclose(sos, [np.hstack((b, a))], err_msg="%s(2,...)" % name)
zpk = func(3, *args, output='zpk')
sos = func(3, *args, output='sos')
assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(3,...)" % name)
zpk = func(4, *args, output='zpk')
sos = func(4, *args, output='sos')
assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(4,...)" % name)
class TestIIRNotch(object):
def test_ba_output(self):
# Compare coeficients with Matlab ones
# for the equivalent input:
b, a = iirnotch(0.06, 30)
b2 = [
9.9686824e-01, -1.9584219e+00,
9.9686824e-01
]
a2 = [
1.0000000e+00, -1.9584219e+00,
9.9373647e-01
]
assert_allclose(b, b2, rtol=1e-8)
assert_allclose(a, a2, rtol=1e-8)
def test_frequency_response(self):
# Get filter coeficients
b, a = iirnotch(0.3, 30)
# Get frequency response
w, h = freqz(b, a, 1000)
# Pick 5 point
p = [200, # w0 = 0.200
295, # w0 = 0.295
300, # w0 = 0.300
305, # w0 = 0.305
400] # w0 = 0.400
# Get frequency response correspondent to each of those points
hp = h[p]
# Check if the frequency response fulfil the specifications:
# hp[0] and hp[4] correspond to frequencies distant from
# w0 = 0.3 and should be close to 1
assert_allclose(abs(hp[0]), 1, rtol=1e-2)
assert_allclose(abs(hp[4]), 1, rtol=1e-2)
# hp[1] and hp[3] correspond to frequencies approximately
# on the edges of the passband and should be close to -3dB
assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
# hp[2] correspond to the frequency that should be removed
# the frequency response should be very close to 0
assert_allclose(abs(hp[2]), 0, atol=1e-10)
def test_errors(self):
# Exception should be raised if w0 > 1 or w0 <0
assert_raises(ValueError, iirnotch, w0=2, Q=30)
assert_raises(ValueError, iirnotch, w0=-1, Q=30)
# Exception should be raised if any of the parameters
# are not float (or cannot be converted to one)
assert_raises(ValueError, iirnotch, w0="blabla", Q=30)
assert_raises(TypeError, iirnotch, w0=-1, Q=[1, 2, 3])
class TestIIRPeak(object):
def test_ba_output(self):
# Compare coeficients with Matlab ones
# for the equivalent input:
b, a = iirpeak(0.06, 30)
b2 = [
3.131764229e-03, 0,
-3.131764229e-03
]
a2 = [
1.0000000e+00, -1.958421917e+00,
9.9373647e-01
]
assert_allclose(b, b2, rtol=1e-8)
assert_allclose(a, a2, rtol=1e-8)
def test_frequency_response(self):
# Get filter coeficients
b, a = iirpeak(0.3, 30)
# Get frequency response
w, h = freqz(b, a, 1000)
# Pick 5 point
p = [30, # w0 = 0.030
295, # w0 = 0.295
300, # w0 = 0.300
305, # w0 = 0.305
800] # w0 = 0.800
# Get frequency response correspondent to each of those points
hp = h[p]
# Check if the frequency response fulfil the specifications:
# hp[0] and hp[4] correspond to frequencies distant from
# w0 = 0.3 and should be close to 0
assert_allclose(abs(hp[0]), 0, atol=1e-2)
assert_allclose(abs(hp[4]), 0, atol=1e-2)
# hp[1] and hp[3] correspond to frequencies approximately
# on the edges of the passband and should be close to 10**(-3/20)
assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
# hp[2] correspond to the frequency that should be retained and
# the frequency response should be very close to 1
assert_allclose(abs(hp[2]), 1, rtol=1e-10)
def test_errors(self):
# Exception should be raised if w0 > 1 or w0 <0
assert_raises(ValueError, iirpeak, w0=2, Q=30)
assert_raises(ValueError, iirpeak, w0=-1, Q=30)
# Exception should be raised if any of the parameters
# are not float (or cannot be converted to one)
assert_raises(ValueError, iirpeak, w0="blabla", Q=30)
assert_raises(TypeError, iirpeak, w0=-1, Q=[1, 2, 3])
class TestIIRFilter(object):
def test_symmetry(self):
# All built-in IIR filters are real, so should have perfectly
# symmetrical poles and zeros. Then ba representation (using
# numpy.poly) will be purely real instead of having negligible
# imaginary parts.
for N in np.arange(1, 26):
for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'):
z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
ftype=ftype, output='zpk')
assert_array_equal(sorted(z), sorted(z.conj()))
assert_array_equal(sorted(p), sorted(p.conj()))
assert_equal(k, np.real(k))
b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
ftype=ftype, output='ba')
assert_(issubclass(b.dtype.type, np.floating))
assert_(issubclass(a.dtype.type, np.floating))
def test_int_inputs(self):
# Using integer frequency arguments and large N should not produce
# np.ints that wraparound to negative numbers
k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel',
output='zpk')[2]
k2 = 9.999999999999989e+47
assert_allclose(k, k2)
def test_invalid_wn_size(self):
# low and high have 1 Wn, band and stop have 2 Wn
assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low')
assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high')
assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp')
assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True)
def test_invalid_wn_range(self):
# For digital filters, 0 <= Wn <= 1
assert_raises(ValueError, iirfilter, 1, 2, btype='low')
assert_raises(ValueError, iirfilter, 1, [0.5, 1], btype='band')
assert_raises(ValueError, iirfilter, 1, [0., 0.5], btype='band')
assert_raises(ValueError, iirfilter, 1, -1, btype='high')
assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band')
assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop')
class TestGroupDelay(object):
def test_identity_filter(self):
w, gd = group_delay((1, 1))
assert_array_almost_equal(w, pi * np.arange(512) / 512)
assert_array_almost_equal(gd, np.zeros(512))
w, gd = group_delay((1, 1), whole=True)
assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512)
assert_array_almost_equal(gd, np.zeros(512))
def test_fir(self):
# Let's design linear phase FIR and check that the group delay
# is constant.
N = 100
b = firwin(N + 1, 0.1)
w, gd = group_delay((b, 1))
assert_allclose(gd, 0.5 * N)
def test_iir(self):
# Let's design Butterworth filter and test the group delay at
# some points against MATLAB answer.
b, a = butter(4, 0.1)
w = np.linspace(0, pi, num=10, endpoint=False)
w, gd = group_delay((b, a), w=w)
matlab_gd = np.array([8.249313898506037, 11.958947880907104,
2.452325615326005, 1.048918665702008,
0.611382575635897, 0.418293269460578,
0.317932917836572, 0.261371844762525,
0.229038045801298, 0.212185774208521])
assert_array_almost_equal(gd, matlab_gd)
def test_singular(self):
# Let's create a filter with zeros and poles on the unit circle and
# check if warning is raised and the group delay is set to zero at
# these frequencies.
z1 = np.exp(1j * 0.1 * pi)
z2 = np.exp(1j * 0.25 * pi)
p1 = np.exp(1j * 0.5 * pi)
p2 = np.exp(1j * 0.8 * pi)
b = np.convolve([1, -z1], [1, -z2])
a = np.convolve([1, -p1], [1, -p2])
w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi])
w, gd = assert_warns(UserWarning, group_delay, (b, a), w=w)
assert_allclose(gd, 0)
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, gd1 = group_delay((1, 1))
w2, gd2 = group_delay((1, 1), None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(gd1, gd2)
| 143,187 | 43.858396 | 254 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/tests/test_waveforms.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_, assert_allclose, assert_array_equal)
from pytest import raises as assert_raises
import scipy.signal.waveforms as waveforms
# These chirp_* functions are the instantaneous frequencies of the signals
# returned by chirp().
def chirp_linear(t, f0, f1, t1):
f = f0 + (f1 - f0) * t / t1
return f
def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
if vertex_zero:
f = f0 + (f1 - f0) * t**2 / t1**2
else:
f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
return f
def chirp_geometric(t, f0, f1, t1):
f = f0 * (f1/f0)**(t/t1)
return f
def chirp_hyperbolic(t, f0, f1, t1):
f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
return f
def compute_frequency(t, theta):
"""
Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t).
"""
# Assume theta and t are 1D numpy arrays.
# Assume that t is uniformly spaced.
dt = t[1] - t[0]
f = np.diff(theta)/(2*np.pi) / dt
tf = 0.5*(t[1:] + t[:-1])
return tf, f
class TestChirp(object):
def test_linear_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
assert_almost_equal(w, 1.0)
def test_linear_freq_01(self):
method = 'linear'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_linear_freq_02(self):
method = 'linear'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
assert_almost_equal(w, 1.0)
def test_quadratic_at_zero2(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
vertex_zero=False)
assert_almost_equal(w, 1.0)
def test_quadratic_freq_01(self):
method = 'quadratic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_freq_02(self):
method = 'quadratic'
f0 = 20.0
f1 = 10.0
t1 = 10.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
assert_almost_equal(w, 1.0)
def test_logarithmic_freq_01(self):
method = 'logarithmic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_02(self):
method = 'logarithmic'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_03(self):
method = 'logarithmic'
f0 = 100.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_hyperbolic_at_zero(self):
w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
assert_almost_equal(w, 1.0)
def test_hyperbolic_freq_01(self):
method = 'hyperbolic'
t1 = 1.0
t = np.linspace(0, t1, 10000)
# f0 f1
cases = [[10.0, 1.0],
[1.0, 10.0],
[-10.0, -1.0],
[-1.0, -10.0]]
for f0, f1 in cases:
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
expected = chirp_hyperbolic(tf, f0, f1, t1)
assert_allclose(f, expected)
def test_hyperbolic_zero_freq(self):
# f0=0 or f1=0 must raise a ValueError.
method = 'hyperbolic'
t1 = 1.0
t = np.linspace(0, t1, 5)
assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method)
assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method)
def test_unknown_method(self):
method = "foo"
f0 = 10.0
f1 = 20.0
t1 = 1.0
t = np.linspace(0, t1, 10)
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
def test_integer_t1(self):
f0 = 10.0
f1 = 20.0
t = np.linspace(-1, 1, 11)
t1 = 3.0
float_result = waveforms.chirp(t, f0, t1, f1)
t1 = 3
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 't1=3' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f0(self):
f1 = 20.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f0 = 10.0
float_result = waveforms.chirp(t, f0, t1, f1)
f0 = 10
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f1(self):
f0 = 10.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f1 = 20.0
float_result = waveforms.chirp(t, f0, t1, f1)
f1 = 20
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_all(self):
f0 = 10
t1 = 3
f1 = 20
t = np.linspace(-1, 1, 11)
float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
class TestSweepPoly(object):
def test_sweep_poly_quad1(self):
p = np.poly1d([1.0, 0.0, 1.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_const(self):
p = np.poly1d(2.0)
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_linear(self):
p = np.poly1d([-1.0, 10.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_quad2(self):
p = np.poly1d([1.0, 0.0, -2.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic(self):
p = np.poly1d([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic2(self):
"""Use an array of coefficients instead of a poly1d."""
p = np.array([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic3(self):
"""Use a list of coefficients instead of a poly1d."""
p = [2.0, 1.0, 0.0, -2.0]
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
class TestGaussPulse(object):
def test_integer_fc(self):
float_result = waveforms.gausspulse('cutoff', fc=1000.0)
int_result = waveforms.gausspulse('cutoff', fc=1000)
err_msg = "Integer input 'fc=1000' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bw(self):
float_result = waveforms.gausspulse('cutoff', bw=1.0)
int_result = waveforms.gausspulse('cutoff', bw=1)
err_msg = "Integer input 'bw=1' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bwr(self):
float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
int_result = waveforms.gausspulse('cutoff', bwr=-6)
err_msg = "Integer input 'bwr=-6' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_tpr(self):
float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
int_result = waveforms.gausspulse('cutoff', tpr=-60)
err_msg = "Integer input 'tpr=-60' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
class TestUnitImpulse(object):
def test_no_index(self):
assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0])
assert_array_equal(waveforms.unit_impulse((3, 3)),
[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
def test_index(self):
assert_array_equal(waveforms.unit_impulse(10, 3),
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)),
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
# Broadcasting
imp = waveforms.unit_impulse((4, 4), 2)
assert_array_equal(imp, np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]))
def test_mid(self):
assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'),
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_array_equal(waveforms.unit_impulse(9, 'mid'),
[0, 0, 0, 0, 1, 0, 0, 0, 0])
def test_dtype(self):
imp = waveforms.unit_impulse(7)
assert_(np.issubdtype(imp.dtype, np.floating))
imp = waveforms.unit_impulse(5, 3, dtype=int)
assert_(np.issubdtype(imp.dtype, np.integer))
imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex)
assert_(np.issubdtype(imp.dtype, np.complexfloating))
| 12,071 | 33.101695 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/windows/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('windows', parent_package, top_path)
config.add_data_dir('tests')
return config
| 294 | 23.583333 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/windows/windows.py
|
"""The suite of window functions."""
from __future__ import division, print_function, absolute_import
import operator
import warnings
import numpy as np
from scipy import fftpack, linalg, special
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_cosine','general_gaussian',
'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann',
'exponential', 'tukey', 'dpss', 'get_window']
def _len_guards(M):
"""Handle small or incorrect window lengths"""
if int(M) != M or M < 0:
raise ValueError('Window length M must be a non-negative integer')
return M <= 1
def _extend(M, sym):
"""Extend window by 1 sample if needed for DFT-even symmetry"""
if not sym:
return M + 1, True
else:
return M, False
def _truncate(w, needed):
"""Truncate window by 1 sample if needed for DFT-even symmetry"""
if needed:
return w[:-1]
else:
return w
def general_cosine(M, a, sym=True):
r"""
Generic weighted sum of cosine terms window
Parameters
----------
M : int
Number of points in the output window
a : array_like
Sequence of weighting coefficients. This uses the convention of being
centered on the origin, so these will typically all be positive
numbers, not alternating sign.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
References
----------
.. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
.. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
Discrete Fourier transform (DFT), including a comprehensive list of
window functions and some new flat-top windows", February 15, 2002
https://holometer.fnal.gov/GH_FFT.pdf
Examples
--------
Heinzel describes a flat-top window named "HFT90D" with formula: [2]_
.. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z)
- 0.440811 \cos(3z) + 0.043097 \cos(4z)
where
.. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1
Since this uses the convention of starting at the origin, to reproduce the
window, we need to convert every other coefficient to a positive number:
>>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097]
The paper states that the highest sidelobe is at -90.2 dB. Reproduce
Figure 42 by plotting the window and its frequency response, and confirm
the sidelobe level in red:
>>> from scipy.signal.windows import general_cosine
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = general_cosine(1000, HFT90D, sym=False)
>>> plt.plot(window)
>>> plt.title("HFT90D window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 10000) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-50/1000, 50/1000, -140, 0])
>>> plt.title("Frequency response of the HFT90D window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.axhline(-90.2, color='red')
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
fac = np.linspace(-np.pi, np.pi, M)
w = np.zeros(M)
for k in range(len(a)):
w += a[k] * np.cos(k * fac)
return _truncate(w, needs_trunc)
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Also known as a rectangular window or Dirichlet window, this is equivalent
to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
w = np.ones(M, float)
return _truncate(w, needs_trunc)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
See Also
--------
bartlett : A triangular window that touches zero
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
return _truncate(w, needs_trunc)
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] E. Parzen, "Mathematical Considerations in the Estimation of
Spectra", Technometrics, Vol. 3, No. 2 (May, 1961), pp. 167-190
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
return _truncate(w, needs_trunc)
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
return _truncate(w, needs_trunc)
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
The "exact Blackman" window was designed to null out the third and fourth
sidelobes, but has discontinuities at the boundaries, resulting in a
6 dB/oct fall-off. This window is an approximation of the "exact" window,
which does not null the sidelobes as well, but is smooth at the edges,
improving the fall-off rate to 18 dB/oct. [3]_
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
.. [3] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
return general_cosine(M, [0.42, 0.50, 0.08], sym)
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
This variation is called "Nuttall4c" by Heinzel. [2]_
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
.. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
Discrete Fourier transform (DFT), including a comprehensive list of
window functions and some new flat-top windows", February 15, 2002
https://holometer.fnal.gov/GH_FFT.pdf
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return general_cosine(M, [0.3635819, 0.4891775, 0.1365995, 0.0106411], sym)
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym)
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
Flat top windows are used for taking accurate measurements of signal
amplitude in the frequency domain, with minimal scalloping error from the
center of a frequency bin to its edges, compared to others. This is a
5th-order cosine window, with the 5 terms optimized to make the main lobe
maximally flat. [1]_
References
----------
.. [1] D'Antona, Gabriele, and A. Ferrero, "Digital Signal Processing for
Measurement Systems", Springer Media, 2006, p. 70
:doi:`10.1007/0-387-28666-7`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368]
return general_cosine(M, a, sym)
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
See Also
--------
triang : A triangular window that does not touch zero at the ends
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich. [2]_
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
return _truncate(w, needs_trunc)
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
return general_hamming(M, 0.5, sym)
@np.deprecate(new_name='scipy.signal.windows.hann')
def hanning(*args, **kwargs):
return hann(*args, **kwargs)
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the fraction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
return _truncate(w, needs_trunc)
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
return _truncate(w, needs_trunc)
def general_hamming(M, alpha, sym=True):
r"""Return a generalized Hamming window.
The generalized Hamming window is constructed by multiplying a rectangular
window by one period of a cosine function [1]_.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float
The window coefficient, :math:`\alpha`
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Hamming window is defined as
.. math:: w(n) = \alpha - \left(1 - \alpha\right) \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
Both the common Hamming window and Hann window are special cases of the
generalized Hamming window with :math:`\alpha` = 0.54 and :math:`\alpha` =
0.5, respectively [2]_.
See Also
--------
hamming, hann
Examples
--------
The Sentinel-1A/B Instrument Processing Facility uses generalized Hamming
windows in the processing of spaceborne Synthetic Aperture Radar (SAR)
data [3]_. The facility uses various values for the :math:`\alpha` parameter
based on operating mode of the SAR instrument. Some common :math:`\alpha`
values include 0.75, 0.7 and 0.52 [4]_. As an example, we plot these different
windows.
>>> from scipy.signal.windows import general_hamming
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.title("Generalized Hamming Windows")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> spatial_plot = plt.axes()
>>> plt.figure()
>>> plt.title("Frequency Responses")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> freq_plot = plt.axes()
>>> for alpha in [0.75, 0.7, 0.52]:
... window = general_hamming(41, alpha)
... spatial_plot.plot(window, label="{:.2f}".format(alpha))
... A = fft(window, 2048) / (len(window)/2.0)
... freq = np.linspace(-0.5, 0.5, len(A))
... response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
... freq_plot.plot(freq, response, label="{:.2f}".format(alpha))
>>> freq_plot.legend(loc="upper right")
>>> spatial_plot.legend(loc="upper right")
References
----------
.. [1] DSPRelated, "Generalized Hamming Window Family",
https://www.dsprelated.com/freebooks/sasp/Generalized_Hamming_Window_Family.html
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [3] Riccardo Piantanida ESA, "Sentinel-1 Level 1 Detailed Algorithm
Definition",
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Level-1-Detailed-Algorithm-Definition
.. [4] Matthieu Bourbigot ESA, "Sentinel-1 Product Definition",
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Definition
"""
return general_cosine(M, [alpha, 1. - alpha], sym)
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
return general_hamming(M, 0.54, sym)
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate other windows by varying the beta parameter.
(Some literature uses alpha = beta/pi.) [4]_
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
be returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transform," Proceedings of the IEEE, vol. 66,
no. 1, pp. 51-83, Jan. 1978. :doi:`10.1109/PROC.1978.10837`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
return _truncate(w, needs_trunc)
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
return _truncate(w, needs_trunc)
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian "
... "window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
return _truncate(w, needs_trunc)
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (2 * (M % 2) - 1) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fftpack.fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fftpack.fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
return _truncate(w, needs_trunc)
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
.. note:: Deprecated in SciPy 1.1.
`slepian` will be removed in a future version of SciPy, it is
replaced by `dpss`, which uses the standard definition of a
digital Slepian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
See Also
--------
dpss
References
----------
.. [1] D. Slepian & H. O. Pollak: "Prolate spheroidal wave functions,
Fourier analysis and uncertainty-I," Bell Syst. Tech. J., vol.40,
pp.43-63, 1961. https://archive.org/details/bstj40-1-43
.. [2] H. J. Landau & H. O. Pollak: "Prolate spheroidal wave functions,
Fourier analysis and uncertainty-II," Bell Syst. Tech. J. , vol.40,
pp.65-83, 1961. https://archive.org/details/bstj40-1-65
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
warnings.warn('slepian is deprecated and will be removed in a future '
'version, use dpss instead', DeprecationWarning)
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
return _truncate(win, needs_trunc)
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
return _truncate(w, needs_trunc)
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
return _truncate(w, needs_trunc)
def dpss(M, NW, Kmax=None, sym=True, norm=None, return_ratios=False):
"""
Compute the Discrete Prolate Spheroidal Sequences (DPSS).
DPSS (or Slepian sequencies) are often used in multitaper power spectral
density estimation (see [1]_). The first window in the sequence can be
used to maximize the energy concentration in the main lobe, and is also
called the Slepian window.
Parameters
----------
M : int
Window length.
NW : float
Standardized half bandwidth corresponding to ``2*NW = BW/f0 = BW*N*dt``
where ``dt`` is taken as 1.
Kmax : int | None, optional
Number of DPSS windows to return (orders ``0`` through ``Kmax-1``).
If None (default), return only a single window of shape ``(M,)``
instead of an array of windows of shape ``(Kmax, M)``.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
norm : {2, 'approximate', 'subsample'} | None, optional
If 'approximate' or 'subsample', then the windows are normalized by the
maximum, and a correction scale-factor for even-length windows
is applied either using ``M**2/(M**2+NW)`` ("approximate") or
a FFT-based subsample shift ("subsample"), see Notes for details.
If None, then "approximate" is used when ``Kmax=None`` and 2 otherwise
(which uses the l2 norm).
return_ratios : bool, optional
If True, also return the concentration ratios in addition to the
windows.
Returns
-------
v : ndarray, shape (Kmax, N) or (N,)
The DPSS windows. Will be 1D if `Kmax` is None.
r : ndarray, shape (Kmax,) or float, optional
The concentration ratios for the windows. Only returned if
`return_ratios` evaluates to True. Will be 0D if `Kmax` is None.
Notes
-----
This computation uses the tridiagonal eigenvector formulation given
in [2]_.
The default normalization for ``Kmax=None``, i.e. window-generation mode,
simply using the l-infinity norm would create a window with two unity
values, which creates slight normalization differences between even and odd
orders. The approximate correction of ``M**2/float(M**2+NW)`` for even
sample numbers is used to counteract this effect (see Examples below).
For very long signals (e.g., 1e6 elements), it can be useful to compute
windows orders of magnitude shorter and use interpolation (e.g.,
`scipy.interpolate.interp1d`) to obtain tapers of length `M`,
but this in general will not preserve orthogonality between the tapers.
.. versionadded:: 1.1
References
----------
.. [1] Percival DB, Walden WT. Spectral Analysis for Physical Applications:
Multitaper and Conventional Univariate Techniques.
Cambridge University Press; 1993.
.. [2] Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430.
.. [3] Kaiser, JF, Schafer RW. On the Use of the I0-Sinh Window for
Spectrum Analysis. IEEE Transactions on Acoustics, Speech and
Signal Processing. ASSP-28 (1): 105-107; 1980.
Examples
--------
We can compare the window to `kaiser`, which was invented as an alternative
that was easier to calculate [3]_ (example adapted from
`here <https://ccrma.stanford.edu/~jos/sasp/Kaiser_DPSS_Windows_Compared.html>`_):
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import windows, freqz
>>> N = 51
>>> fig, axes = plt.subplots(3, 2, figsize=(5, 7))
>>> for ai, alpha in enumerate((1, 3, 5)):
... win_dpss = windows.dpss(N, alpha)
... beta = alpha*np.pi
... win_kaiser = windows.kaiser(N, beta)
... for win, c in ((win_dpss, 'k'), (win_kaiser, 'r')):
... win /= win.sum()
... axes[ai, 0].plot(win, color=c, lw=1.)
... axes[ai, 0].set(xlim=[0, N-1], title=r'$\\alpha$ = %s' % alpha,
... ylabel='Amplitude')
... w, h = freqz(win)
... axes[ai, 1].plot(w, 20 * np.log10(np.abs(h)), color=c, lw=1.)
... axes[ai, 1].set(xlim=[0, np.pi],
... title=r'$\\beta$ = %0.2f' % beta,
... ylabel='Magnitude (dB)')
>>> for ax in axes.ravel():
... ax.grid(True)
>>> axes[2, 1].legend(['DPSS', 'Kaiser'])
>>> fig.tight_layout()
>>> plt.show()
And here are examples of the first four windows, along with their
concentration ratios:
>>> M = 512
>>> NW = 2.5
>>> win, eigvals = windows.dpss(M, NW, 4, return_ratios=True)
>>> fig, ax = plt.subplots(1)
>>> ax.plot(win.T, linewidth=1.)
>>> ax.set(xlim=[0, M-1], ylim=[-0.1, 0.1], xlabel='Samples',
... title='DPSS, M=%d, NW=%0.1f' % (M, NW))
>>> ax.legend(['win[%d] (%0.4f)' % (ii, ratio)
... for ii, ratio in enumerate(eigvals)])
>>> fig.tight_layout()
>>> plt.show()
Using a standard :math:`l_{\\infty}` norm would produce two unity values
for even `M`, but only one unity value for odd `M`. This produces uneven
window power that can be counteracted by the approximate correction
``M**2/float(M**2+NW)``, which can be selected by using
``norm='approximate'`` (which is the same as ``norm=None`` when
``Kmax=None``, as is the case here). Alternatively, the slower
``norm='subsample'`` can be used, which uses subsample shifting in the
frequency domain (FFT) to compute the correction:
>>> Ms = np.arange(1, 41)
>>> factors = (50, 20, 10, 5, 2.0001)
>>> energy = np.empty((3, len(Ms), len(factors)))
>>> for mi, M in enumerate(Ms):
... for fi, factor in enumerate(factors):
... NW = M / float(factor)
... # Corrected using empirical approximation (default)
... win = windows.dpss(M, NW)
... energy[0, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
... # Corrected using subsample shifting
... win = windows.dpss(M, NW, norm='subsample')
... energy[1, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
... # Uncorrected (using l-infinity norm)
... win /= win.max()
... energy[2, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
>>> fig, ax = plt.subplots(1)
>>> hs = ax.plot(Ms, energy[2], '-o', markersize=4,
... markeredgecolor='none')
>>> leg = [hs[-1]]
>>> for hi, hh in enumerate(hs):
... h1 = ax.plot(Ms, energy[0, :, hi], '-o', markersize=4,
... color=hh.get_color(), markeredgecolor='none',
... alpha=0.66)
... h2 = ax.plot(Ms, energy[1, :, hi], '-o', markersize=4,
... color=hh.get_color(), markeredgecolor='none',
... alpha=0.33)
... if hi == len(hs) - 1:
... leg.insert(0, h1[0])
... leg.insert(0, h2[0])
>>> ax.set(xlabel='M (samples)', ylabel=r'Power / $\\sqrt{M}$')
>>> ax.legend(leg, ['Uncorrected', r'Corrected: $\\frac{M^2}{M^2+NW}$',
... 'Corrected (subsample)'])
>>> fig.tight_layout()
""" # noqa: E501
if _len_guards(M):
return np.ones(M)
if norm is None:
norm = 'approximate' if Kmax is None else 2
known_norms = (2, 'approximate', 'subsample')
if norm not in known_norms:
raise ValueError('norm must be one of %s, got %s'
% (known_norms, norm))
if Kmax is None:
singleton = True
Kmax = 1
else:
singleton = False
Kmax = operator.index(Kmax)
if not 0 < Kmax <= M:
raise ValueError('Kmax must be greater than 0 and less than M')
if NW >= M/2.:
raise ValueError('NW must be less than M/2.')
if NW <= 0:
raise ValueError('NW must be positive')
M, needs_trunc = _extend(M, sym)
W = float(NW) / M
nidx = np.arange(M)
# Here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian
# sequences, or discrete prolate spheroidal sequences (DPSS). Only the
# first K, K = 2NW/dt orders of DPSS will exhibit good spectral
# concentration
# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here we set up an alternative symmetric tri-diagonal eigenvalue
# problem such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
d = ((M - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
e = nidx[1:] * (M - nidx[1:]) / 2.
# only calculate the highest Kmax eigenvalues
w, windows = linalg.eigh_tridiagonal(
d, e, select='i', select_range=(M - Kmax, M - 1))
w = w[::-1]
windows = windows[:, ::-1].T
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
fix_even = (windows[::2].sum(axis=1) < 0)
for i, f in enumerate(fix_even):
if f:
windows[2 * i] *= -1
# * antisymmetric tapers should begin with a positive lobe
# (this depends on the definition of "lobe", here we'll take the first
# point above the numerical noise, which should be good enough for
# sufficiently smooth functions, and more robust than relying on an
# algorithm that uses max(abs(w)), which is susceptible to numerical
# noise problems)
thresh = max(1e-7, 1. / M)
for i, w in enumerate(windows[1::2]):
if w[w * w > thresh][0] < 0:
windows[2 * i + 1] *= -1
# Now find the eigenvalues of the original spectral concentration problem
# Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
if return_ratios:
dpss_rxx = _fftautocorr(windows)
r = 4 * W * np.sinc(2 * W * nidx)
r[0] = 2 * W
ratios = np.dot(dpss_rxx, r)
if singleton:
ratios = ratios[0]
# Deal with sym and Kmax=None
if norm != 2:
windows /= windows.max()
if M % 2 == 0:
if norm == 'approximate':
correction = M**2 / float(M**2 + NW)
else:
s = np.fft.rfft(windows[0])
shift = -(1 - 1./M) * np.arange(1, M//2 + 1)
s[1:] *= 2 * np.exp(-1j * np.pi * shift)
correction = M / s.real.sum()
windows *= correction
# else we're already l2 normed, so do nothing
if needs_trunc:
windows = windows[:, :-1]
if singleton:
windows = windows[0]
return (windows, ratios) if return_ratios else windows
def _fftautocorr(x):
"""Compute the autocorrelation of a real array and crop the result."""
N = x.shape[-1]
use_N = fftpack.next_fast_len(2*N-1)
x_fft = np.fft.rfft(x, use_N, axis=-1)
cxy = np.fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N]
# Or equivalently (but in most cases slower):
# cxy = np.array([np.convolve(xx, yy[::-1], mode='full')
# for xx, yy in zip(x, x)])[:, N-1:2*N-1]
return cxy
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True (default), create a "periodic" window, ready to use with
`ifftshift` and be multiplied by the result of an FFT (see also
`fftpack.fftfreq`).
If False, create a "symmetric" window, for use in filter design.
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
`boxcar`, `triang`, `blackman`, `hamming`, `hann`, `bartlett`,
`flattop`, `parzen`, `bohman`, `blackmanharris`, `nuttall`,
`barthann`, `kaiser` (needs beta), `gaussian` (needs standard
deviation), `general_gaussian` (needs power, width), `slepian`
(needs width), `dpss` (needs normalized half-bandwidth),
`chebwin` (needs attenuation), `exponential` (needs decay scale),
`tukey` (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the `kaiser` window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.125, 0.375, 0.625, 0.875, 0.875, 0.625, 0.375])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093,
0.97885093, 0.82160913, 0.56437221, 0.29425961])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093,
0.97885093, 0.82160913, 0.56437221, 0.29425961])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| 73,495 | 33.881822 | 109 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/signal/windows/__init__.py
|
"""
==============================================
Window functions (:mod:`scipy.signal.windows`)
==============================================
The suite of window functions for filtering and spectral estimation.
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
dpss -- Discrete prolate spheroidal sequences
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_cosine -- Generalized Cosine window
general_gaussian -- Generalized Gaussian window
general_hamming -- Generalized Hamming window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
"""
from .windows import *
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'general_cosine',
'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann',
'exponential', 'tukey', 'get_window', 'dpss']
| 1,778 | 36.851064 | 81 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/measurements.py
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
import numpy as np
from . import _ni_support
from . import _ni_label
from . import _nd_image
from . import morphology
__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
'histogram', 'watershed_ift']
def label(input, structure=None, output=None):
"""
Label features in an array.
Parameters
----------
input : array_like
An array-like object to be labeled. Any non-zero values in `input` are
counted as features and zero values are considered the background.
structure : array_like, optional
A structuring element that defines feature connections.
`structure` must be symmetric. If no structuring element is provided,
one is automatically generated with a squared connectivity equal to
one. That is, for a 2-D `input` array, the default structuring element
is::
[[0,1,0],
[1,1,1],
[0,1,0]]
output : (None, data-type, array_like), optional
If `output` is a data type, it specifies the type of the resulting
labeled feature array
If `output` is an array-like object, then `output` will be updated
with the labeled features from this function. This function can
operate in-place, by passing output=input.
Note that the output must be able to store the largest label, or this
function will raise an Exception.
Returns
-------
label : ndarray or int
An integer ndarray where each unique feature in `input` has a unique
label in the returned array.
num_features : int
How many objects were found.
If `output` is None, this function returns a tuple of
(`labeled_array`, `num_features`).
If `output` is a ndarray, then it will be updated with values in
`labeled_array` and only `num_features` will be returned by this
function.
See Also
--------
find_objects : generate a list of slices for the labeled features (or
objects); useful for finding features' position or
dimensions
Examples
--------
Create an image with some features, then label it using the default
(cross-shaped) structuring element:
>>> from scipy.ndimage import label, generate_binary_structure
>>> a = np.array([[0,0,1,1,0,0],
... [0,0,0,1,0,0],
... [1,1,0,0,1,0],
... [0,0,0,1,0,0]])
>>> labeled_array, num_features = label(a)
Each of the 4 features are labeled with a different integer:
>>> num_features
4
>>> labeled_array
array([[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[2, 2, 0, 0, 3, 0],
[0, 0, 0, 4, 0, 0]])
Generate a structuring element that will consider features connected even
if they touch diagonally:
>>> s = generate_binary_structure(2,2)
or,
>>> s = [[1,1,1],
... [1,1,1],
... [1,1,1]]
Label the image using the new structuring element:
>>> labeled_array, num_features = label(a, structure=s)
Show the 2 labeled features (note that features 1, 3, and 4 from above are
now considered a single feature):
>>> num_features
2
>>> labeled_array
array([[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[2, 2, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0]])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if structure is None:
structure = morphology.generate_binary_structure(input.ndim, 1)
structure = numpy.asarray(structure, dtype=bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have equal rank')
for ii in structure.shape:
if ii != 3:
raise ValueError('structure dimensions must be equal to 3')
# Use 32 bits if it's large enough for this image.
# _ni_label.label() needs two entries for background and
# foreground tracking
need_64bits = input.size >= (2**31 - 2)
if isinstance(output, numpy.ndarray):
if output.shape != input.shape:
raise ValueError("output shape not correct")
caller_provided_output = True
else:
caller_provided_output = False
if output is None:
output = np.empty(input.shape, np.intp if need_64bits else np.int32)
else:
output = np.empty(input.shape, output)
# handle scalars, 0-dim arrays
if input.ndim == 0 or input.size == 0:
if input.ndim == 0:
# scalar
maxlabel = 1 if (input != 0) else 0
output[...] = maxlabel
else:
# 0-dim
maxlabel = 0
if caller_provided_output:
return maxlabel
else:
return output, maxlabel
try:
max_label = _ni_label._label(input, structure, output)
except _ni_label.NeedMoreBits:
# Make another attempt with enough bits, then try to cast to the
# new type.
tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
max_label = _ni_label._label(input, structure, tmp_output)
output[...] = tmp_output[...]
if not np.all(output == tmp_output):
# refuse to return bad results
raise RuntimeError("insufficient bit-depth in requested output type")
if caller_provided_output:
# result was written in-place
return max_label
else:
return output, max_label
def find_objects(input, max_label=0):
"""
Find objects in a labeled array.
Parameters
----------
input : ndarray of ints
Array containing objects defined by different labels. Labels with
value 0 are ignored.
max_label : int, optional
Maximum label to be searched for in `input`. If max_label is not
given, the positions of all objects are returned.
Returns
-------
object_slices : list of tuples
A list of tuples, with each tuple containing N slices (with N the
dimension of the input array). Slices correspond to the minimal
parallelepiped that contains the object. If a number is missing,
None is returned instead of a slice.
See Also
--------
label, center_of_mass
Notes
-----
This function is very useful for isolating a volume of interest inside
a 3-D array, that cannot be "seen through".
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((6,6), dtype=int)
>>> a[2:4, 2:4] = 1
>>> a[4, 4] = 1
>>> a[:2, :3] = 2
>>> a[0, 5] = 3
>>> a
array([[2, 2, 2, 0, 0, 3],
[2, 2, 2, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0]])
>>> ndimage.find_objects(a)
[(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None)), (slice(0, 1, None), slice(5, 6, None))]
>>> ndimage.find_objects(a, max_label=2)
[(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]
>>> ndimage.find_objects(a == 1, max_label=2)
[(slice(2, 5, None), slice(2, 5, None)), None]
>>> loc = ndimage.find_objects(a)[0]
>>> a[loc]
array([[1, 1, 0],
[1, 1, 0],
[0, 0, 1]])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if max_label < 1:
max_label = input.max()
return _nd_image.find_objects(input, max_label)
def labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False):
"""
Roughly equivalent to [func(input[labels == i]) for i in index].
Sequentially applies an arbitrary function (that works on array_like input)
to subsets of an n-D image array specified by `labels` and `index`.
The option exists to provide the function with positional parameters as the
second argument.
Parameters
----------
input : array_like
Data from which to select `labels` to process.
labels : array_like or None
Labels to objects in `input`.
If not None, array must be same shape as `input`.
If None, `func` is applied to raveled `input`.
index : int, sequence of ints or None
Subset of `labels` to which to apply `func`.
If a scalar, a single value is returned.
If None, `func` is applied to all non-zero values of `labels`.
func : callable
Python function to apply to `labels` from `input`.
out_dtype : dtype
Dtype to use for `result`.
default : int, float or None
Default return value when a element of `index` does not exist
in `labels`.
pass_positions : bool, optional
If True, pass linear indices to `func` as a second argument.
Default is False.
Returns
-------
result : ndarray
Result of applying `func` to each of `labels` to `input` in `index`.
Examples
--------
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> lbl, nlbl = ndimage.label(a)
>>> lbls = np.arange(1, nlbl+1)
>>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)
array([ 2.75, 5.5 , 6. ])
Falling back to `default`:
>>> lbls = np.arange(1, nlbl+2)
>>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)
array([ 2.75, 5.5 , 6. , -1. ])
Passing positions:
>>> def fn(val, pos):
... print("fn says: %s : %s" % (val, pos))
... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
...
>>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
fn says: [1 2 5 3] : [0 1 4 5]
fn says: [4 7] : [ 7 11]
fn says: [9 3] : [12 13]
array([ 11., 11., -12., 0.])
"""
as_scalar = numpy.isscalar(index)
input = numpy.asarray(input)
if pass_positions:
positions = numpy.arange(input.size).reshape(input.shape)
if labels is None:
if index is not None:
raise ValueError("index without defined labels")
if not pass_positions:
return func(input.ravel())
else:
return func(input.ravel(), positions.ravel())
try:
input, labels = numpy.broadcast_arrays(input, labels)
except ValueError:
raise ValueError("input and labels must have the same shape "
"(excepting dimensions with width 1)")
if index is None:
if not pass_positions:
return func(input[labels > 0])
else:
return func(input[labels > 0], positions[labels > 0])
index = numpy.atleast_1d(index)
if np.any(index.astype(labels.dtype).astype(index.dtype) != index):
raise ValueError("Cannot convert index values from <%s> to <%s> "
"(labels' type) without loss of precision" %
(index.dtype, labels.dtype))
index = index.astype(labels.dtype)
# optimization: find min/max in index, and select those parts of labels, input, and positions
lo = index.min()
hi = index.max()
mask = (labels >= lo) & (labels <= hi)
# this also ravels the arrays
labels = labels[mask]
input = input[mask]
if pass_positions:
positions = positions[mask]
# sort everything by labels
label_order = labels.argsort()
labels = labels[label_order]
input = input[label_order]
if pass_positions:
positions = positions[label_order]
index_order = index.argsort()
sorted_index = index[index_order]
def do_map(inputs, output):
"""labels must be sorted"""
nidx = sorted_index.size
# Find boundaries for each stretch of constant labels
# This could be faster, but we already paid N log N to sort labels.
lo = numpy.searchsorted(labels, sorted_index, side='left')
hi = numpy.searchsorted(labels, sorted_index, side='right')
for i, l, h in zip(range(nidx), lo, hi):
if l == h:
continue
output[i] = func(*[inp[l:h] for inp in inputs])
temp = numpy.empty(index.shape, out_dtype)
temp[:] = default
if not pass_positions:
do_map([input], temp)
else:
do_map([input, positions], temp)
output = numpy.zeros(index.shape, out_dtype)
output[index_order] = temp
if as_scalar:
output = output[0]
return output
def _safely_castable_to_int(dt):
"""Test whether the numpy data type `dt` can be safely cast to an int."""
int_size = np.dtype(int).itemsize
safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or
(np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))
return safe
def _stats(input, labels=None, index=None, centered=False):
"""Count, sum, and optionally compute (sum - centre)^2 of input by label
Parameters
----------
input : array_like, n-dimensional
The input data to be analyzed.
labels : array_like (n-dimensional), optional
The labels of the data in `input`. This array must be broadcast
compatible with `input`; typically it is the same shape as `input`.
If `labels` is None, all nonzero values in `input` are treated as
the single labeled group.
index : label or sequence of labels, optional
These are the labels of the groups for which the stats are computed.
If `index` is None, the stats are computed for the single group where
`labels` is greater than 0.
centered : bool, optional
If True, the centered sum of squares for each labeled group is
also returned. Default is False.
Returns
-------
counts : int or ndarray of ints
The number of elements in each labeled group.
sums : scalar or ndarray of scalars
The sums of the values in each labeled group.
sums_c : scalar or ndarray of scalars, optional
The sums of mean-centered squares of the values in each labeled group.
This is only returned if `centered` is True.
"""
def single_group(vals):
if centered:
vals_c = vals - vals.mean()
return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()
else:
return vals.size, vals.sum()
if labels is None:
return single_group(input)
# ensure input and labels match sizes
input, labels = numpy.broadcast_arrays(input, labels)
if index is None:
return single_group(input[labels > 0])
if numpy.isscalar(index):
return single_group(input[labels == index])
def _sum_centered(labels):
# `labels` is expected to be an ndarray with the same shape as `input`.
# It must contain the label indices (which are not necessarily the labels
# themselves).
means = sums / counts
centered_input = input - means[labels]
# bincount expects 1d inputs, so we ravel the arguments.
bc = numpy.bincount(labels.ravel(),
weights=(centered_input *
centered_input.conjugate()).ravel())
return bc
# Remap labels to unique integers if necessary, or if the largest
# label is larger than the number of values.
if (not _safely_castable_to_int(labels.dtype) or
labels.min() < 0 or labels.max() > labels.size):
# Use numpy.unique to generate the label indices. `new_labels` will
# be 1-d, but it should be interpreted as the flattened n-d array of
# label indices.
unique_labels, new_labels = numpy.unique(labels, return_inverse=True)
counts = numpy.bincount(new_labels)
sums = numpy.bincount(new_labels, weights=input.ravel())
if centered:
# Compute the sum of the mean-centered squares.
# We must reshape new_labels to the n-d shape of `input` before
# passing it _sum_centered.
sums_c = _sum_centered(new_labels.reshape(labels.shape))
idxs = numpy.searchsorted(unique_labels, index)
# make all of idxs valid
idxs[idxs >= unique_labels.size] = 0
found = (unique_labels[idxs] == index)
else:
# labels are an integer type allowed by bincount, and there aren't too
# many, so call bincount directly.
counts = numpy.bincount(labels.ravel())
sums = numpy.bincount(labels.ravel(), weights=input.ravel())
if centered:
sums_c = _sum_centered(labels)
# make sure all index values are valid
idxs = numpy.asanyarray(index, numpy.int).copy()
found = (idxs >= 0) & (idxs < counts.size)
idxs[~found] = 0
counts = counts[idxs]
counts[~found] = 0
sums = sums[idxs]
sums[~found] = 0
if not centered:
return (counts, sums)
else:
sums_c = sums_c[idxs]
sums_c[~found] = 0
return (counts, sums, sums_c)
def sum(input, labels=None, index=None):
"""
Calculate the sum of the values of the array.
Parameters
----------
input : array_like
Values of `input` inside the regions defined by `labels`
are summed together.
labels : array_like of ints, optional
Assign labels to the values of the array. Has to have the same shape as
`input`.
index : array_like, optional
A single label number or a sequence of label numbers of
the objects to be measured.
Returns
-------
sum : ndarray or scalar
An array of the sums of values of `input` inside the regions defined
by `labels` with the same shape as `index`. If 'index' is None or scalar,
a scalar is returned.
See also
--------
mean, median
Examples
--------
>>> from scipy import ndimage
>>> input = [0,1,2,3]
>>> labels = [1,1,2,2]
>>> ndimage.sum(input, labels, index=[1,2])
[1.0, 5.0]
>>> ndimage.sum(input, labels, index=1)
1
>>> ndimage.sum(input, labels)
6
"""
count, sum = _stats(input, labels, index)
return sum
def mean(input, labels=None, index=None):
"""
Calculate the mean of the values of an array at labels.
Parameters
----------
input : array_like
Array on which to compute the mean of elements over distinct
regions.
labels : array_like, optional
Array of labels of same shape, or broadcastable to the same shape as
`input`. All elements sharing the same label form one region over
which the mean of the elements is computed.
index : int or sequence of ints, optional
Labels of the objects over which the mean is to be computed.
Default is None, in which case the mean for all values where label is
greater than 0 is calculated.
Returns
-------
out : list
Sequence of same length as `index`, with the mean of the different
regions labeled by the labels in `index`.
See also
--------
ndimage.variance, ndimage.standard_deviation, ndimage.minimum,
ndimage.maximum, ndimage.sum
ndimage.label
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(25).reshape((5,5))
>>> labels = np.zeros_like(a)
>>> labels[3:5,3:5] = 1
>>> index = np.unique(labels)
>>> labels
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1]])
>>> index
array([0, 1])
>>> ndimage.mean(a, labels=labels, index=index)
[10.285714285714286, 21.0]
"""
count, sum = _stats(input, labels, index)
return sum / numpy.asanyarray(count).astype(numpy.float)
def variance(input, labels=None, index=None):
"""
Calculate the variance of the values of an n-D image array, optionally at
specified sub-regions.
Parameters
----------
input : array_like
Nd-image data to process.
labels : array_like, optional
Labels defining sub-regions in `input`.
If not None, must be same shape as `input`.
index : int or sequence of ints, optional
`labels` to include in output. If None (default), all values where
`labels` is non-zero are used.
Returns
-------
variance : float or ndarray
Values of variance, for each sub-region if `labels` and `index` are
specified.
See Also
--------
label, standard_deviation, maximum, minimum, extrema
Examples
--------
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> ndimage.variance(a)
7.609375
Features to process can be specified using `labels` and `index`:
>>> lbl, nlbl = ndimage.label(a)
>>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))
array([ 2.1875, 2.25 , 9. ])
If no index is given, all non-zero `labels` are processed:
>>> ndimage.variance(a, lbl)
6.1875
"""
count, sum, sum_c_sq = _stats(input, labels, index, centered=True)
return sum_c_sq / np.asanyarray(count).astype(float)
def standard_deviation(input, labels=None, index=None):
"""
Calculate the standard deviation of the values of an n-D image array,
optionally at specified sub-regions.
Parameters
----------
input : array_like
Nd-image data to process.
labels : array_like, optional
Labels to identify sub-regions in `input`.
If not None, must be same shape as `input`.
index : int or sequence of ints, optional
`labels` to include in output. If None (default), all values where
`labels` is non-zero are used.
Returns
-------
standard_deviation : float or ndarray
Values of standard deviation, for each sub-region if `labels` and
`index` are specified.
See Also
--------
label, variance, maximum, minimum, extrema
Examples
--------
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> ndimage.standard_deviation(a)
2.7585095613392387
Features to process can be specified using `labels` and `index`:
>>> lbl, nlbl = ndimage.label(a)
>>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))
array([ 1.479, 1.5 , 3. ])
If no index is given, non-zero `labels` are processed:
>>> ndimage.standard_deviation(a, lbl)
2.4874685927665499
"""
return numpy.sqrt(variance(input, labels, index))
def _select(input, labels=None, index=None, find_min=False, find_max=False,
find_min_positions=False, find_max_positions=False,
find_median=False):
"""Returns min, max, or both, plus their positions (if requested), and
median."""
input = numpy.asanyarray(input)
find_positions = find_min_positions or find_max_positions
positions = None
if find_positions:
positions = numpy.arange(input.size).reshape(input.shape)
def single_group(vals, positions):
result = []
if find_min:
result += [vals.min()]
if find_min_positions:
result += [positions[vals == vals.min()][0]]
if find_max:
result += [vals.max()]
if find_max_positions:
result += [positions[vals == vals.max()][0]]
if find_median:
result += [numpy.median(vals)]
return result
if labels is None:
return single_group(input, positions)
# ensure input and labels match sizes
input, labels = numpy.broadcast_arrays(input, labels)
if index is None:
mask = (labels > 0)
masked_positions = None
if find_positions:
masked_positions = positions[mask]
return single_group(input[mask], masked_positions)
if numpy.isscalar(index):
mask = (labels == index)
masked_positions = None
if find_positions:
masked_positions = positions[mask]
return single_group(input[mask], masked_positions)
# remap labels to unique integers if necessary, or if the largest
# label is larger than the number of values.
if (not _safely_castable_to_int(labels.dtype) or
labels.min() < 0 or labels.max() > labels.size):
# remap labels, and indexes
unique_labels, labels = numpy.unique(labels, return_inverse=True)
idxs = numpy.searchsorted(unique_labels, index)
# make all of idxs valid
idxs[idxs >= unique_labels.size] = 0
found = (unique_labels[idxs] == index)
else:
# labels are an integer type, and there aren't too many.
idxs = numpy.asanyarray(index, numpy.int).copy()
found = (idxs >= 0) & (idxs <= labels.max())
idxs[~ found] = labels.max() + 1
if find_median:
order = numpy.lexsort((input.ravel(), labels.ravel()))
else:
order = input.ravel().argsort()
input = input.ravel()[order]
labels = labels.ravel()[order]
if find_positions:
positions = positions.ravel()[order]
result = []
if find_min:
mins = numpy.zeros(labels.max() + 2, input.dtype)
mins[labels[::-1]] = input[::-1]
result += [mins[idxs]]
if find_min_positions:
minpos = numpy.zeros(labels.max() + 2, int)
minpos[labels[::-1]] = positions[::-1]
result += [minpos[idxs]]
if find_max:
maxs = numpy.zeros(labels.max() + 2, input.dtype)
maxs[labels] = input
result += [maxs[idxs]]
if find_max_positions:
maxpos = numpy.zeros(labels.max() + 2, int)
maxpos[labels] = positions
result += [maxpos[idxs]]
if find_median:
locs = numpy.arange(len(labels))
lo = numpy.zeros(labels.max() + 2, numpy.int)
lo[labels[::-1]] = locs[::-1]
hi = numpy.zeros(labels.max() + 2, numpy.int)
hi[labels] = locs
lo = lo[idxs]
hi = hi[idxs]
# lo is an index to the lowest value in input for each label,
# hi is an index to the largest value.
# move them to be either the same ((hi - lo) % 2 == 0) or next
# to each other ((hi - lo) % 2 == 1), then average.
step = (hi - lo) // 2
lo += step
hi -= step
result += [(input[lo] + input[hi]) / 2.0]
return result
def minimum(input, labels=None, index=None):
"""
Calculate the minimum of the values of an array over labeled regions.
Parameters
----------
input : array_like
Array_like of values. For each region specified by `labels`, the
minimal values of `input` over the region is computed.
labels : array_like, optional
An array_like of integers marking different regions over which the
minimum value of `input` is to be computed. `labels` must have the
same shape as `input`. If `labels` is not specified, the minimum
over the whole array is returned.
index : array_like, optional
A list of region labels that are taken into account for computing the
minima. If index is None, the minimum over all elements where `labels`
is non-zero is returned.
Returns
-------
minimum : float or list of floats
List of minima of `input` over the regions determined by `labels` and
whose index is in `index`. If `index` or `labels` are not specified, a
float is returned: the minimal value of `input` if `labels` is None,
and the minimal value of elements where `labels` is greater than zero
if `index` is None.
See also
--------
label, maximum, median, minimum_position, extrema, sum, mean, variance,
standard_deviation
Notes
-----
The function returns a Python list and not a Numpy array, use
`np.array` to convert the list to an array.
Examples
--------
>>> from scipy import ndimage
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> labels, labels_nb = ndimage.label(a)
>>> labels
array([[1, 1, 0, 0],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
>>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))
[1.0, 4.0, 3.0]
>>> ndimage.minimum(a)
0.0
>>> ndimage.minimum(a, labels=labels)
1.0
"""
return _select(input, labels, index, find_min=True)[0]
def maximum(input, labels=None, index=None):
"""
Calculate the maximum of the values of an array over labeled regions.
Parameters
----------
input : array_like
Array_like of values. For each region specified by `labels`, the
maximal values of `input` over the region is computed.
labels : array_like, optional
An array of integers marking different regions over which the
maximum value of `input` is to be computed. `labels` must have the
same shape as `input`. If `labels` is not specified, the maximum
over the whole array is returned.
index : array_like, optional
A list of region labels that are taken into account for computing the
maxima. If index is None, the maximum over all elements where `labels`
is non-zero is returned.
Returns
-------
output : float or list of floats
List of maxima of `input` over the regions determined by `labels` and
whose index is in `index`. If `index` or `labels` are not specified, a
float is returned: the maximal value of `input` if `labels` is None,
and the maximal value of elements where `labels` is greater than zero
if `index` is None.
See also
--------
label, minimum, median, maximum_position, extrema, sum, mean, variance,
standard_deviation
Notes
-----
The function returns a Python list and not a Numpy array, use
`np.array` to convert the list to an array.
Examples
--------
>>> a = np.arange(16).reshape((4,4))
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> labels = np.zeros_like(a)
>>> labels[:2,:2] = 1
>>> labels[2:, 1:3] = 2
>>> labels
array([[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 2, 2, 0],
[0, 2, 2, 0]])
>>> from scipy import ndimage
>>> ndimage.maximum(a)
15.0
>>> ndimage.maximum(a, labels=labels, index=[1,2])
[5.0, 14.0]
>>> ndimage.maximum(a, labels=labels)
14.0
>>> b = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> labels, labels_nb = ndimage.label(b)
>>> labels
array([[1, 1, 0, 0],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
>>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))
[5.0, 7.0, 9.0]
"""
return _select(input, labels, index, find_max=True)[0]
def median(input, labels=None, index=None):
"""
Calculate the median of the values of an array over labeled regions.
Parameters
----------
input : array_like
Array_like of values. For each region specified by `labels`, the
median value of `input` over the region is computed.
labels : array_like, optional
An array_like of integers marking different regions over which the
median value of `input` is to be computed. `labels` must have the
same shape as `input`. If `labels` is not specified, the median
over the whole array is returned.
index : array_like, optional
A list of region labels that are taken into account for computing the
medians. If index is None, the median over all elements where `labels`
is non-zero is returned.
Returns
-------
median : float or list of floats
List of medians of `input` over the regions determined by `labels` and
whose index is in `index`. If `index` or `labels` are not specified, a
float is returned: the median value of `input` if `labels` is None,
and the median value of elements where `labels` is greater than zero
if `index` is None.
See also
--------
label, minimum, maximum, extrema, sum, mean, variance, standard_deviation
Notes
-----
The function returns a Python list and not a Numpy array, use
`np.array` to convert the list to an array.
Examples
--------
>>> from scipy import ndimage
>>> a = np.array([[1, 2, 0, 1],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> labels, labels_nb = ndimage.label(a)
>>> labels
array([[1, 1, 0, 2],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
>>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))
[2.5, 4.0, 6.0]
>>> ndimage.median(a)
1.0
>>> ndimage.median(a, labels=labels)
3.0
"""
return _select(input, labels, index, find_median=True)[0]
def minimum_position(input, labels=None, index=None):
"""
Find the positions of the minimums of the values of an array at labels.
Parameters
----------
input : array_like
Array_like of values.
labels : array_like, optional
An array of integers marking different regions over which the
position of the minimum value of `input` is to be computed.
`labels` must have the same shape as `input`. If `labels` is not
specified, the location of the first minimum over the whole
array is returned.
The `labels` argument only works when `index` is specified.
index : array_like, optional
A list of region labels that are taken into account for finding the
location of the minima. If `index` is None, the ``first`` minimum
over all elements where `labels` is non-zero is returned.
The `index` argument only works when `labels` is specified.
Returns
-------
output : list of tuples of ints
Tuple of ints or list of tuples of ints that specify the location
of minima of `input` over the regions determined by `labels` and
whose index is in `index`.
If `index` or `labels` are not specified, a tuple of ints is
returned specifying the location of the first minimal value of `input`.
See also
--------
label, minimum, median, maximum_position, extrema, sum, mean, variance,
standard_deviation
Examples
--------
>>> a = np.array([[10, 20, 30],
... [40, 80, 100],
... [1, 100, 200]])
>>> b = np.array([[1, 2, 0, 1],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> ndimage.minimum_position(a)
(2, 0)
>>> ndimage.minimum_position(b)
(0, 2)
Features to process can be specified using `labels` and `index`:
>>> label, pos = ndimage.label(a)
>>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))
[(2, 0)]
>>> label, pos = ndimage.label(b)
>>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))
[(0, 0), (0, 3), (3, 1)]
"""
dims = numpy.array(numpy.asarray(input).shape)
# see numpy.unravel_index to understand this line.
dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
result = _select(input, labels, index, find_min_positions=True)[0]
if numpy.isscalar(result):
return tuple((result // dim_prod) % dims)
return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
def maximum_position(input, labels=None, index=None):
"""
Find the positions of the maximums of the values of an array at labels.
For each region specified by `labels`, the position of the maximum
value of `input` within the region is returned.
Parameters
----------
input : array_like
Array_like of values.
labels : array_like, optional
An array of integers marking different regions over which the
position of the maximum value of `input` is to be computed.
`labels` must have the same shape as `input`. If `labels` is not
specified, the location of the first maximum over the whole
array is returned.
The `labels` argument only works when `index` is specified.
index : array_like, optional
A list of region labels that are taken into account for finding the
location of the maxima. If `index` is None, the first maximum
over all elements where `labels` is non-zero is returned.
The `index` argument only works when `labels` is specified.
Returns
-------
output : list of tuples of ints
List of tuples of ints that specify the location of maxima of
`input` over the regions determined by `labels` and whose index
is in `index`.
If `index` or `labels` are not specified, a tuple of ints is
returned specifying the location of the ``first`` maximal value
of `input`.
See also
--------
label, minimum, median, maximum_position, extrema, sum, mean, variance,
standard_deviation
"""
dims = numpy.array(numpy.asarray(input).shape)
# see numpy.unravel_index to understand this line.
dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
result = _select(input, labels, index, find_max_positions=True)[0]
if numpy.isscalar(result):
return tuple((result // dim_prod) % dims)
return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
def extrema(input, labels=None, index=None):
"""
Calculate the minimums and maximums of the values of an array
at labels, along with their positions.
Parameters
----------
input : ndarray
Nd-image data to process.
labels : ndarray, optional
Labels of features in input.
If not None, must be same shape as `input`.
index : int or sequence of ints, optional
Labels to include in output. If None (default), all values where
non-zero `labels` are used.
Returns
-------
minimums, maximums : int or ndarray
Values of minimums and maximums in each feature.
min_positions, max_positions : tuple or list of tuples
Each tuple gives the n-D coordinates of the corresponding minimum
or maximum.
See Also
--------
maximum, minimum, maximum_position, minimum_position, center_of_mass
Examples
--------
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> ndimage.extrema(a)
(0, 9, (0, 2), (3, 0))
Features to process can be specified using `labels` and `index`:
>>> lbl, nlbl = ndimage.label(a)
>>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
(array([1, 4, 3]),
array([5, 7, 9]),
[(0, 0), (1, 3), (3, 1)],
[(1, 0), (2, 3), (3, 0)])
If no index is given, non-zero `labels` are processed:
>>> ndimage.extrema(a, lbl)
(1, 9, (0, 0), (3, 0))
"""
dims = numpy.array(numpy.asarray(input).shape)
# see numpy.unravel_index to understand this line.
dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
minimums, min_positions, maximums, max_positions = _select(input, labels,
index,
find_min=True,
find_max=True,
find_min_positions=True,
find_max_positions=True)
if numpy.isscalar(minimums):
return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
tuple((max_positions // dim_prod) % dims))
min_positions = [tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims]
max_positions = [tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims]
return minimums, maximums, min_positions, max_positions
def center_of_mass(input, labels=None, index=None):
"""
Calculate the center of mass of the values of an array at labels.
Parameters
----------
input : ndarray
Data from which to calculate center-of-mass. The masses can either
be positive or negative.
labels : ndarray, optional
Labels for objects in `input`, as generated by `ndimage.label`.
Only used with `index`. Dimensions must be the same as `input`.
index : int or sequence of ints, optional
Labels for which to calculate centers-of-mass. If not specified,
all labels greater than zero are used. Only used with `labels`.
Returns
-------
center_of_mass : tuple, or list of tuples
Coordinates of centers-of-mass.
Examples
--------
>>> a = np.array(([0,0,0,0],
... [0,1,1,0],
... [0,1,1,0],
... [0,1,1,0]))
>>> from scipy import ndimage
>>> ndimage.measurements.center_of_mass(a)
(2.0, 1.5)
Calculation of multiple objects in an image
>>> b = np.array(([0,1,1,0],
... [0,1,0,0],
... [0,0,0,0],
... [0,0,1,1],
... [0,0,1,1]))
>>> lbl = ndimage.label(b)[0]
>>> ndimage.measurements.center_of_mass(b, lbl, [1,2])
[(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
Negative masses are also accepted, which can occur for example when
bias is removed from measured data due to random noise.
>>> c = np.array(([-1,0,0,0],
... [0,-1,-1,0],
... [0,1,-1,0],
... [0,1,1,0]))
>>> ndimage.measurements.center_of_mass(c)
(-4.0, 1.0)
If there are division by zero issues, the function does not raise an
error but rather issues a RuntimeWarning before returning inf and/or NaN.
>>> d = np.array([-1, 1])
>>> ndimage.measurements.center_of_mass(d)
(inf,)
"""
normalizer = sum(input, labels, index)
grids = numpy.ogrid[[slice(0, i) for i in input.shape]]
results = [sum(input * grids[dir].astype(float), labels, index) / normalizer
for dir in range(input.ndim)]
if numpy.isscalar(results[0]):
return tuple(results)
return [tuple(v) for v in numpy.array(results).T]
def histogram(input, min, max, bins, labels=None, index=None):
"""
Calculate the histogram of the values of an array, optionally at labels.
Histogram calculates the frequency of values in an array within bins
determined by `min`, `max`, and `bins`. The `labels` and `index`
keywords can limit the scope of the histogram to specified sub-regions
within the array.
Parameters
----------
input : array_like
Data for which to calculate histogram.
min, max : int
Minimum and maximum values of range of histogram bins.
bins : int
Number of bins.
labels : array_like, optional
Labels for objects in `input`.
If not None, must be same shape as `input`.
index : int or sequence of ints, optional
Label or labels for which to calculate histogram. If None, all values
where label is greater than zero are used
Returns
-------
hist : ndarray
Histogram counts.
Examples
--------
>>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ],
... [ 0. , 0.7778, 0. , 0. ],
... [ 0. , 0. , 0. , 0. ],
... [ 0. , 0. , 0.7181, 0.2787],
... [ 0. , 0. , 0.6573, 0.3094]])
>>> from scipy import ndimage
>>> ndimage.measurements.histogram(a, 0, 1, 10)
array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0])
With labels and no indices, non-zero elements are counted:
>>> lbl, nlbl = ndimage.label(a)
>>> ndimage.measurements.histogram(a, 0, 1, 10, lbl)
array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])
Indices can be used to count only certain objects:
>>> ndimage.measurements.histogram(a, 0, 1, 10, lbl, 2)
array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
"""
_bins = numpy.linspace(min, max, bins + 1)
def _hist(vals):
return numpy.histogram(vals, _bins)[0]
return labeled_comprehension(input, labels, index, _hist, object, None,
pass_positions=False)
def watershed_ift(input, markers, structure=None, output=None):
"""
Apply watershed from markers using image foresting transform algorithm.
Parameters
----------
input : array_like
Input.
markers : array_like
Markers are points within each watershed that form the beginning
of the process. Negative markers are considered background markers
which are processed after the other markers.
structure : structure element, optional
A structuring element defining the connectivity of the object can be
provided. If None, an element is generated with a squared
connectivity equal to one.
output : ndarray, optional
An output array can optionally be provided. The same shape as input.
Returns
-------
watershed_ift : ndarray
Output. Same shape as `input`.
References
----------
.. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image
foresting transform: theory, algorithms, and applications",
Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.
"""
input = numpy.asarray(input)
if input.dtype.type not in [numpy.uint8, numpy.uint16]:
raise TypeError('only 8 and 16 unsigned inputs are supported')
if structure is None:
structure = morphology.generate_binary_structure(input.ndim, 1)
structure = numpy.asarray(structure, dtype=bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have equal rank')
for ii in structure.shape:
if ii != 3:
raise RuntimeError('structure dimensions must be equal to 3')
if not structure.flags.contiguous:
structure = structure.copy()
markers = numpy.asarray(markers)
if input.shape != markers.shape:
raise RuntimeError('input and markers must have equal shape')
integral_types = [numpy.int0,
numpy.int8,
numpy.int16,
numpy.int32,
numpy.int_,
numpy.int64,
numpy.intc,
numpy.intp]
if markers.dtype.type not in integral_types:
raise RuntimeError('marker should be of integer type')
if isinstance(output, numpy.ndarray):
if output.dtype.type not in integral_types:
raise RuntimeError('output should be of integer type')
else:
output = markers.dtype
output = _ni_support._get_output(output, input)
_nd_image.watershed_ift(input, markers, structure, output)
return output
| 49,497 | 32.717984 | 124 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/setup.py
|
from __future__ import division, print_function, absolute_import
import os
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from numpy import get_include
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
config = Configuration('ndimage', parent_package, top_path)
include_dirs = ['src',
get_include(),
os.path.join(os.path.dirname(__file__), '..', '_lib', 'src')]
config.add_extension("_nd_image",
sources=["src/nd_image.c",
"src/ni_filters.c",
"src/ni_fourier.c",
"src/ni_interpolation.c",
"src/ni_measure.c",
"src/ni_morphology.c",
"src/ni_splines.c",
"src/ni_support.c"],
include_dirs=include_dirs,
**numpy_nodepr_api)
# Cython wants the .c and .pyx to have the underscore.
config.add_extension("_ni_label",
sources=["src/_ni_label.c",],
include_dirs=['src']+[get_include()])
config.add_extension("_ctest",
sources=["src/_ctest.c"],
include_dirs=[get_include()],
**numpy_nodepr_api)
_define_macros = [("OLDAPI", 1)]
if 'define_macros' in numpy_nodepr_api:
_define_macros.extend(numpy_nodepr_api['define_macros'])
config.add_extension("_ctest_oldapi",
sources=["src/_ctest.c"],
include_dirs=[get_include()],
define_macros=_define_macros)
config.add_extension("_cytest",
sources=["src/_cytest.c"])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
setup(**configuration(top_path='').todict())
| 1,891 | 30.533333 | 81 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/fourier.py
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
from . import _ni_support
from . import _nd_image
__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
'fourier_shift']
def _get_output_fourier(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128,
numpy.float32]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.float64)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128,
numpy.float32, numpy.float64]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def _get_output_fourier_complex(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.complex128)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
"""
Multi-dimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_gaussian : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
if not sigmas.flags.contiguous:
sigmas = sigmas.copy()
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
return output
def fourier_uniform(input, size, n=-1, axis=-1, output=None):
"""
Multi-dimensional uniform fourier filter.
The array is multiplied with the fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_uniform : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
return output
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
"""
Multi-dimensional ellipsoid fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : ndarray
The filtered input.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_ellipsoid(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return output
def fourier_shift(input, shift, n=-1, axis=-1, output=None):
"""
Multi-dimensional fourier shift filter.
The array is multiplied with the fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of shifting the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_shift : ndarray
The shifted input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(input_, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier_complex(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
shifts = _ni_support._normalize_sequence(shift, input.ndim)
shifts = numpy.asarray(shifts, dtype=numpy.float64)
if not shifts.flags.contiguous:
shifts = shifts.copy()
_nd_image.fourier_shift(input, shifts, n, axis, output)
return output
| 11,266 | 35.700326 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/_ni_support.py
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
from scipy._lib.six import string_types
def _extend_mode_to_code(mode):
"""Convert an extension mode to the corresponding integer code.
"""
if mode == 'nearest':
return 0
elif mode == 'wrap':
return 1
elif mode == 'reflect':
return 2
elif mode == 'mirror':
return 3
elif mode == 'constant':
return 4
else:
raise RuntimeError('boundary mode not supported')
def _normalize_sequence(input, rank):
"""If input is a scalar, create a sequence of length equal to the
rank by duplicating the input. If input is a sequence,
check if its length is equal to the length of array.
"""
is_str = isinstance(input, string_types)
if hasattr(input, '__iter__') and not is_str:
normalized = list(input)
if len(normalized) != rank:
err = "sequence argument must have length equal to input rank"
raise RuntimeError(err)
else:
normalized = [input] * rank
return normalized
def _get_output(output, input, shape=None):
if shape is None:
shape = input.shape
if output is None:
output = numpy.zeros(shape, dtype=input.dtype.name)
elif type(output) in [type(type), type(numpy.zeros((4,)).dtype)]:
output = numpy.zeros(shape, dtype=output)
elif type(output) in string_types:
output = numpy.typeDict[output]
output = numpy.zeros(shape, dtype=output)
elif output.shape != shape:
raise RuntimeError("output shape not correct")
return output
def _check_axis(axis, rank):
if axis < 0:
axis += rank
if axis < 0 or axis >= rank:
raise ValueError('invalid axis')
return axis
| 3,229 | 34.108696 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/interpolation.py
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from . import _ni_docstrings
from functools import wraps
import warnings
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
@_ni_docstrings.docfiller
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculate a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
%(input)s
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
spline_filter1d : ndarray
The filtered input.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return output
def spline_filter(input, order=3, output=numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
output : ndarray
The filtered input.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Notes
-----
This function also accepts low-level callback functions with one
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int mapping(npy_intp *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
int mapping(intptr_t *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
The calling function iterates over the elements of the output array,
calling the callback function at each element. The coordinates of the
current output element are passed through ``output_coordinates``. The
callback function must return the coordinates at which the input must
be interpolated in ``input_coordinates``. The rank of the input and
output arrays are given by ``input_rank`` and ``output_rank``
respectively. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
Examples
--------
>>> import numpy as np
>>> from scipy.ndimage import geometric_transform
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
>>> b = [1, 2, 3, 4, 5]
>>> def shift_func(output_coords):
... return (output_coords[0] - 3,)
...
>>> geometric_transform(b, shift_func, mode='constant')
array([0, 0, 0, 1, 2])
>>> geometric_transform(b, shift_func, mode='nearest')
array([1, 1, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='reflect')
array([3, 2, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='wrap')
array([2, 3, 4, 1, 2])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input, shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, extra_arguments,
extra_keywords)
return output
@_ni_docstrings.docfiller
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
%(input)s
coordinates : array_like
The coordinates at which `input` is evaluated.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return output
@_ni_docstrings.docfiller
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.
Parameters
----------
%(input)s
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value. A more
efficient algorithm is then used that exploits the separability
of the problem.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates [1]_. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
and may be omitted.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
affine_transform : ndarray
The transformed input.
Notes
-----
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a one-dimensional or
two-dimensional array. If a one-dimensional array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position
``matrix * (o + offset)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
if matrix.shape[0] == input.ndim + 1:
exptd = [0] * input.ndim + [1]
if not numpy.all(matrix[input.ndim] == exptd):
msg = ('Expected homogeneous transformation matrix with '
'shape %s for image shape %s, but bottom row was '
'not equal to %s' % (matrix.shape, input.shape, exptd))
raise ValueError(msg)
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:input.ndim, input.ndim]
matrix = matrix[:input.ndim, :input.ndim]
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behaviour of affine_transform with a one-dimensional "
"array supplied for the matrix parameter has changed in "
"scipy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return output
@_ni_docstrings.docfiller
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
%(input)s
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
shift : ndarray
The shifted input.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return output
@_ni_docstrings.docfiller
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
%(input)s
zoom : float or sequence
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
zoom : ndarray
The zoomed input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.zoom(ascent, 3.0)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
>>> print(ascent.shape)
(512, 512)
>>> print(result.shape)
(1536, 1536)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
output_shape_old = tuple(
[int(ii * jj) for ii, jj in zip(input.shape, zoom)])
if output_shape != output_shape_old:
warnings.warn(
"From scipy 0.13.0, the output shape of zoom() is calculated "
"with round() instead of int() - for these inputs the size of "
"the returned array has changed.", UserWarning)
zoom_div = numpy.array(output_shape, float) - 1
# Zooming to infinite values is unpredictable, so just choose
# zoom factor 1 instead
zoom = numpy.divide(numpy.array(input.shape) - 1, zoom_div,
out=numpy.ones_like(input.shape, dtype=numpy.float64),
where=zoom_div != 0)
output = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return output
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
%(input)s
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
rotate : ndarray
The rotated input.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype=numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[m11, -m21],
[-m12, m22]], dtype=numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype=numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype=numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape, axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return output
| 26,362 | 35.412983 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/__init__.py
|
"""
=========================================================
Multi-dimensional image processing (:mod:`scipy.ndimage`)
=========================================================
.. currentmodule:: scipy.ndimage
This package contains various functions for multi-dimensional image
processing.
Filters
=======
.. autosummary::
:toctree: generated/
convolve - Multi-dimensional convolution
convolve1d - 1-D convolution along the given axis
correlate - Multi-dimensional correlation
correlate1d - 1-D correlation along the given axis
gaussian_filter
gaussian_filter1d
gaussian_gradient_magnitude
gaussian_laplace
generic_filter - Multi-dimensional filter using a given function
generic_filter1d - 1-D generic filter along the given axis
generic_gradient_magnitude
generic_laplace
laplace - n-D Laplace filter based on approximate second derivatives
maximum_filter
maximum_filter1d
median_filter - Calculates a multi-dimensional median filter
minimum_filter
minimum_filter1d
percentile_filter - Calculates a multi-dimensional percentile filter
prewitt
rank_filter - Calculates a multi-dimensional rank filter
sobel
uniform_filter - Multi-dimensional uniform filter
uniform_filter1d - 1-D uniform filter along the given axis
Fourier filters
===============
.. autosummary::
:toctree: generated/
fourier_ellipsoid
fourier_gaussian
fourier_shift
fourier_uniform
Interpolation
=============
.. autosummary::
:toctree: generated/
affine_transform - Apply an affine transformation
geometric_transform - Apply an arbritrary geometric transform
map_coordinates - Map input array to new coordinates by interpolation
rotate - Rotate an array
shift - Shift an array
spline_filter
spline_filter1d
zoom - Zoom an array
Measurements
============
.. autosummary::
:toctree: generated/
center_of_mass - The center of mass of the values of an array at labels
extrema - Min's and max's of an array at labels, with their positions
find_objects - Find objects in a labeled array
histogram - Histogram of the values of an array, optionally at labels
label - Label features in an array
labeled_comprehension
maximum
maximum_position
mean - Mean of the values of an array at labels
median
minimum
minimum_position
standard_deviation - Standard deviation of an n-D image array
sum - Sum of the values of the array
variance - Variance of the values of an n-D image array
watershed_ift
Morphology
==========
.. autosummary::
:toctree: generated/
binary_closing
binary_dilation
binary_erosion
binary_fill_holes
binary_hit_or_miss
binary_opening
binary_propagation
black_tophat
distance_transform_bf
distance_transform_cdt
distance_transform_edt
generate_binary_structure
grey_closing
grey_dilation
grey_erosion
grey_opening
iterate_structure
morphological_gradient
morphological_laplace
white_tophat
Utility
=======
.. autosummary::
:toctree: generated/
imread - Load an image from a file
"""
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
from .filters import *
from .fourier import *
from .interpolation import *
from .measurements import *
from .morphology import *
from .io import *
__version__ = '2.0'
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 4,946 | 27.268571 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/morphology.py
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from . import _ni_support
from . import _nd_image
from . import filters
__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
'binary_dilation', 'binary_opening', 'binary_closing',
'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes',
'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing',
'morphological_gradient', 'morphological_laplace', 'white_tophat',
'black_tophat', 'distance_transform_bf', 'distance_transform_cdt',
'distance_transform_edt']
def _center_is_true(structure, origin):
structure = numpy.array(structure)
coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape,
origin)])
return bool(structure[coor])
def iterate_structure(structure, iterations, origin=None):
"""
Iterate a structure by dilating it with itself.
Parameters
----------
structure : array_like
Structuring element (an array of bools, for example), to be dilated with
itself.
iterations : int
number of dilations performed on the structure with itself
origin : optional
If origin is None, only the iterated structure is returned. If
not, a tuple of the iterated structure and the modified origin is
returned.
Returns
-------
iterate_structure : ndarray of bools
A new structuring element obtained by dilating `structure`
(`iterations` - 1) times with itself.
See also
--------
generate_binary_structure
Examples
--------
>>> from scipy import ndimage
>>> struct = ndimage.generate_binary_structure(2, 1)
>>> struct.astype(int)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
>>> ndimage.iterate_structure(struct, 2).astype(int)
array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
>>> ndimage.iterate_structure(struct, 3).astype(int)
array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]])
"""
structure = numpy.asarray(structure)
if iterations < 2:
return structure.copy()
ni = iterations - 1
shape = [ii + ni * (ii - 1) for ii in structure.shape]
pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
slc = [slice(pos[ii], pos[ii] + structure.shape[ii], None)
for ii in range(len(shape))]
out = numpy.zeros(shape, bool)
out[slc] = structure != 0
out = binary_dilation(out, structure, iterations=ni)
if origin is None:
return out
else:
origin = _ni_support._normalize_sequence(origin, structure.ndim)
origin = [iterations * o for o in origin]
return out, origin
def generate_binary_structure(rank, connectivity):
"""
Generate a binary structure for binary morphological operations.
Parameters
----------
rank : int
Number of dimensions of the array to which the structuring element
will be applied, as returned by `np.ndim`.
connectivity : int
`connectivity` determines which elements of the output array belong
to the structure, i.e. are considered as neighbors of the central
element. Elements up to a squared distance of `connectivity` from
the center are considered neighbors. `connectivity` may range from 1
(no diagonal elements are neighbors) to `rank` (all elements are
neighbors).
Returns
-------
output : ndarray of bools
Structuring element which may be used for binary morphological
operations, with `rank` dimensions and all dimensions equal to 3.
See also
--------
iterate_structure, binary_dilation, binary_erosion
Notes
-----
`generate_binary_structure` can only create structuring elements with
dimensions equal to 3, i.e. minimal dimensions. For larger structuring
elements, that are useful e.g. for eroding large objects, one may either
use `iterate_structure`, or create directly custom arrays with
numpy functions such as `numpy.ones`.
Examples
--------
>>> from scipy import ndimage
>>> struct = ndimage.generate_binary_structure(2, 1)
>>> struct
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> a = np.zeros((5,5))
>>> a[2, 2] = 1
>>> a
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype)
>>> b
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype)
array([[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.]])
>>> struct = ndimage.generate_binary_structure(2, 2)
>>> struct
array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> struct = ndimage.generate_binary_structure(3, 1)
>>> struct # no diagonal elements
array([[[False, False, False],
[False, True, False],
[False, False, False]],
[[False, True, False],
[ True, True, True],
[False, True, False]],
[[False, False, False],
[False, True, False],
[False, False, False]]], dtype=bool)
"""
if connectivity < 1:
connectivity = 1
if rank < 1:
return numpy.array(True, dtype=bool)
output = numpy.fabs(numpy.indices([3] * rank) - 1)
output = numpy.add.reduce(output, 0)
return output <= connectivity
def _binary_erosion(input, structure, iterations, mask, output,
border_value, origin, invert, brute_force):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if structure is None:
structure = generate_binary_structure(input.ndim, 1)
else:
structure = numpy.asarray(structure, dtype=bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have same dimensionality')
if not structure.flags.contiguous:
structure = structure.copy()
if numpy.product(structure.shape, axis=0) < 1:
raise RuntimeError('structure must not be empty')
if mask is not None:
mask = numpy.asarray(mask)
if mask.shape != input.shape:
raise RuntimeError('mask and input must have equal sizes')
origin = _ni_support._normalize_sequence(origin, input.ndim)
cit = _center_is_true(structure, origin)
if isinstance(output, numpy.ndarray):
if numpy.iscomplexobj(output):
raise TypeError('Complex output type not supported')
else:
output = bool
output = _ni_support._get_output(output, input)
if iterations == 1:
_nd_image.binary_erosion(input, structure, mask, output,
border_value, origin, invert, cit, 0)
return output
elif cit and not brute_force:
changed, coordinate_list = _nd_image.binary_erosion(
input, structure, mask, output,
border_value, origin, invert, cit, 1)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if not structure.shape[ii] & 1:
origin[ii] -= 1
if mask is not None:
mask = numpy.asarray(mask, dtype=numpy.int8)
if not structure.flags.contiguous:
structure = structure.copy()
_nd_image.binary_erosion2(output, structure, mask, iterations - 1,
origin, invert, coordinate_list)
return output
else:
tmp_in = numpy.empty_like(input, dtype=bool)
tmp_out = output
if iterations >= 1 and not iterations & 1:
tmp_in, tmp_out = tmp_out, tmp_in
changed = _nd_image.binary_erosion(
input, structure, mask, tmp_out,
border_value, origin, invert, cit, 0)
ii = 1
while ii < iterations or (iterations < 1 and changed):
tmp_in, tmp_out = tmp_out, tmp_in
changed = _nd_image.binary_erosion(
tmp_in, structure, mask, tmp_out,
border_value, origin, invert, cit, 0)
ii += 1
return output
def binary_erosion(input, structure=None, iterations=1, mask=None, output=None,
border_value=0, origin=0, brute_force=False):
"""
Multi-dimensional binary erosion with a given structuring element.
Binary erosion is a mathematical morphology operation used for image
processing.
Parameters
----------
input : array_like
Binary image to be eroded. Non-zero (True) elements form
the subset to be eroded.
structure : array_like, optional
Structuring element used for the erosion. Non-zero elements are
considered True. If no structuring element is provided, an element
is generated with a square connectivity equal to one.
iterations : {int, float}, optional
The erosion is repeated `iterations` times (one, by default).
If iterations is less than 1, the erosion is repeated until the
result does not change anymore.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
brute_force : boolean, optional
Memory condition: if False, only the pixels whose value was changed in
the last iteration are tracked as candidates to be updated (eroded) in
the current iteration; if True all pixels are considered as candidates
for erosion, regardless of what happened in the previous iteration.
False by default.
Returns
-------
binary_erosion : ndarray of bools
Erosion of the input by the structuring element.
See also
--------
grey_erosion, binary_dilation, binary_closing, binary_opening,
generate_binary_structure
Notes
-----
Erosion [1]_ is a mathematical morphology operation [2]_ that uses a
structuring element for shrinking the shapes in an image. The binary
erosion of an image by a structuring element is the locus of the points
where a superimposition of the structuring element centered on the point
is entirely contained in the set of non-zero elements of the image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 2:5] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_erosion(a).astype(a.dtype)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> #Erosion removes objects smaller than the structure
>>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 0, brute_force)
def binary_dilation(input, structure=None, iterations=1, mask=None,
output=None, border_value=0, origin=0,
brute_force=False):
"""
Multi-dimensional binary dilation with the given structuring element.
Parameters
----------
input : array_like
Binary array_like to be dilated. Non-zero (True) elements form
the subset to be dilated.
structure : array_like, optional
Structuring element used for the dilation. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one.
iterations : {int, float}, optional
The dilation is repeated `iterations` times (one, by default).
If iterations is less than 1, the dilation is repeated until the
result does not change anymore.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
brute_force : boolean, optional
Memory condition: if False, only the pixels whose value was changed in
the last iteration are tracked as candidates to be updated (dilated)
in the current iteration; if True all pixels are considered as
candidates for dilation, regardless of what happened in the previous
iteration. False by default.
Returns
-------
binary_dilation : ndarray of bools
Dilation of the input by the structuring element.
See also
--------
grey_dilation, binary_erosion, binary_closing, binary_opening,
generate_binary_structure
Notes
-----
Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
structuring element for expanding the shapes in an image. The binary
dilation of an image by a structuring element is the locus of the points
covered by the structuring element, when its center lies within the
non-zero points of the image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5, 5))
>>> a[2, 2] = 1
>>> a
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a)
array([[False, False, False, False, False],
[False, False, True, False, False],
[False, True, True, True, False],
[False, False, True, False, False],
[False, False, False, False, False]], dtype=bool)
>>> ndimage.binary_dilation(a).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # 3x3 structuring element with connectivity 1, used by default
>>> struct1 = ndimage.generate_binary_structure(2, 1)
>>> struct1
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> # 3x3 structuring element with connectivity 2
>>> struct2 = ndimage.generate_binary_structure(2, 2)
>>> struct2
array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct1,\\
... iterations=2).astype(a.dtype)
array([[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.]])
"""
input = numpy.asarray(input)
if structure is None:
structure = generate_binary_structure(input.ndim, 1)
origin = _ni_support._normalize_sequence(origin, input.ndim)
structure = numpy.asarray(structure)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if not structure.shape[ii] & 1:
origin[ii] -= 1
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 1, brute_force)
def binary_opening(input, structure=None, iterations=1, output=None,
origin=0, mask=None, border_value=0, brute_force=False):
"""
Multi-dimensional binary opening with the given structuring element.
The *opening* of an input image by a structuring element is the
*dilation* of the *erosion* of the image by the structuring element.
Parameters
----------
input : array_like
Binary array_like to be opened. Non-zero (True) elements form
the subset to be opened.
structure : array_like, optional
Structuring element used for the opening. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one (i.e., only
nearest neighbors are connected to the center, diagonally-connected
elements are not considered neighbors).
iterations : {int, float}, optional
The erosion step of the opening, then the dilation step are each
repeated `iterations` times (one, by default). If `iterations` is
less than 1, each operation is repeated until the result does
not change anymore.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
.. versionadded:: 1.1.0
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
.. versionadded:: 1.1.0
brute_force : boolean, optional
Memory condition: if False, only the pixels whose value was changed in
the last iteration are tracked as candidates to be updated in the
current iteration; if true all pixels are considered as candidates for
update, regardless of what happened in the previous iteration.
False by default.
.. versionadded:: 1.1.0
Returns
-------
binary_opening : ndarray of bools
Opening of the input by the structuring element.
See also
--------
grey_opening, binary_closing, binary_erosion, binary_dilation,
generate_binary_structure
Notes
-----
*Opening* [1]_ is a mathematical morphology operation [2]_ that
consists in the succession of an erosion and a dilation of the
input with the same structuring element. Opening therefore removes
objects smaller than the structuring element.
Together with *closing* (`binary_closing`), opening can be used for
noise removal.
References
----------
.. [1] http://en.wikipedia.org/wiki/Opening_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5,5), dtype=int)
>>> a[1:4, 1:4] = 1; a[4, 4] = 1
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1]])
>>> # Opening removes small objects
>>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Opening can also smooth corners
>>> ndimage.binary_opening(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
>>> # Opening is the dilation of the erosion of the input
>>> ndimage.binary_erosion(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure is None:
rank = input.ndim
structure = generate_binary_structure(rank, 1)
tmp = binary_erosion(input, structure, iterations, mask, None,
border_value, origin, brute_force)
return binary_dilation(tmp, structure, iterations, mask, output,
border_value, origin, brute_force)
def binary_closing(input, structure=None, iterations=1, output=None,
origin=0, mask=None, border_value=0, brute_force=False):
"""
Multi-dimensional binary closing with the given structuring element.
The *closing* of an input image by a structuring element is the
*erosion* of the *dilation* of the image by the structuring element.
Parameters
----------
input : array_like
Binary array_like to be closed. Non-zero (True) elements form
the subset to be closed.
structure : array_like, optional
Structuring element used for the closing. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one (i.e., only
nearest neighbors are connected to the center, diagonally-connected
elements are not considered neighbors).
iterations : {int, float}, optional
The dilation step of the closing, then the erosion step are each
repeated `iterations` times (one, by default). If iterations is
less than 1, each operations is repeated until the result does
not change anymore.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
.. versionadded:: 1.1.0
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
.. versionadded:: 1.1.0
brute_force : boolean, optional
Memory condition: if False, only the pixels whose value was changed in
the last iteration are tracked as candidates to be updated in the
current iteration; if true al pixels are considered as candidates for
update, regardless of what happened in the previous iteration.
False by default.
.. versionadded:: 1.1.0
Returns
-------
binary_closing : ndarray of bools
Closing of the input by the structuring element.
See also
--------
grey_closing, binary_opening, binary_dilation, binary_erosion,
generate_binary_structure
Notes
-----
*Closing* [1]_ is a mathematical morphology operation [2]_ that
consists in the succession of a dilation and an erosion of the
input with the same structuring element. Closing therefore fills
holes smaller than the structuring element.
Together with *opening* (`binary_opening`), closing can be used for
noise removal.
References
----------
.. [1] http://en.wikipedia.org/wiki/Closing_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5,5), dtype=int)
>>> a[1:-1, 1:-1] = 1; a[2,2] = 0
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Closing removes small holes
>>> ndimage.binary_closing(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Closing is the erosion of the dilation of the input
>>> ndimage.binary_dilation(a).astype(int)
array([[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0]])
>>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 2:5] = 1; a[1:3,3] = 0
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # In addition to removing holes, closing can also
>>> # coarsen boundaries with fine hollows.
>>> ndimage.binary_closing(a).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure is None:
rank = input.ndim
structure = generate_binary_structure(rank, 1)
tmp = binary_dilation(input, structure, iterations, mask, None,
border_value, origin, brute_force)
return binary_erosion(tmp, structure, iterations, mask, output,
border_value, origin, brute_force)
def binary_hit_or_miss(input, structure1=None, structure2=None,
output=None, origin1=0, origin2=None):
"""
Multi-dimensional binary hit-or-miss transform.
The hit-or-miss transform finds the locations of a given pattern
inside the input image.
Parameters
----------
input : array_like (cast to booleans)
Binary image where a pattern is to be detected.
structure1 : array_like (cast to booleans), optional
Part of the structuring element to be fitted to the foreground
(non-zero elements) of `input`. If no value is provided, a
structure of square connectivity 1 is chosen.
structure2 : array_like (cast to booleans), optional
Second part of the structuring element that has to miss completely
the foreground. If no value is provided, the complementary of
`structure1` is taken.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin1 : int or tuple of ints, optional
Placement of the first part of the structuring element `structure1`,
by default 0 for a centered structure.
origin2 : int or tuple of ints, optional
Placement of the second part of the structuring element `structure2`,
by default 0 for a centered structure. If a value is provided for
`origin1` and not for `origin2`, then `origin2` is set to `origin1`.
Returns
-------
binary_hit_or_miss : ndarray
Hit-or-miss transform of `input` with the given structuring
element (`structure1`, `structure2`).
See also
--------
ndimage.morphology, binary_erosion
References
----------
.. [1] http://en.wikipedia.org/wiki/Hit-or-miss_transform
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
>>> structure1
array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> # Find the matches of structure1 in the array a
>>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # Change the origin of the filter
>>> # origin1=1 is equivalent to origin1=(1,1) here
>>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\
... origin1=1).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure1 is None:
structure1 = generate_binary_structure(input.ndim, 1)
if structure2 is None:
structure2 = numpy.logical_not(structure1)
origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
if origin2 is None:
origin2 = origin1
else:
origin2 = _ni_support._normalize_sequence(origin2, input.ndim)
tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
0, False)
inplace = isinstance(output, numpy.ndarray)
result = _binary_erosion(input, structure2, 1, None, output, 0,
origin2, 1, False)
if inplace:
numpy.logical_not(output, output)
numpy.logical_and(tmp1, output, output)
else:
numpy.logical_not(result, result)
return numpy.logical_and(tmp1, result)
def binary_propagation(input, structure=None, mask=None,
output=None, border_value=0, origin=0):
"""
Multi-dimensional binary propagation with the given structuring element.
Parameters
----------
input : array_like
Binary image to be propagated inside `mask`.
structure : array_like, optional
Structuring element used in the successive dilations. The output
may depend on the structuring element, especially if `mask` has
several connex components. If no structuring element is
provided, an element is generated with a squared connectivity equal
to one.
mask : array_like, optional
Binary mask defining the region into which `input` is allowed to
propagate.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
Returns
-------
binary_propagation : ndarray
Binary propagation of `input` inside `mask`.
Notes
-----
This function is functionally equivalent to calling binary_dilation
with the number of iterations less than one: iterative dilation until
the result does not change anymore.
The succession of an erosion and propagation inside the original image
can be used instead of an *opening* for deleting small objects while
keeping the contours of larger objects untouched.
References
----------
.. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15.
.. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of
image processing", 1998
ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf
Examples
--------
>>> from scipy import ndimage
>>> input = np.zeros((8, 8), dtype=int)
>>> input[2, 2] = 1
>>> mask = np.zeros((8, 8), dtype=int)
>>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1
>>> input
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> mask
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> ndimage.binary_propagation(input, mask=mask).astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_propagation(input, mask=mask,\\
... structure=np.ones((3,3))).astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> # Comparison between opening and erosion+propagation
>>> a = np.zeros((6,6), dtype=int)
>>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1
>>> a
array([[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1]])
>>> ndimage.binary_opening(a).astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
>>> b = ndimage.binary_erosion(a)
>>> b.astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_propagation(b, mask=a).astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
"""
return binary_dilation(input, structure, -1, mask, output,
border_value, origin)
def binary_fill_holes(input, structure=None, output=None, origin=0):
"""
Fill the holes in binary objects.
Parameters
----------
input : array_like
n-dimensional binary array with holes to be filled
structure : array_like, optional
Structuring element used in the computation; large-size elements
make computations faster but may miss holes separated from the
background by thin regions. The default element (with a square
connectivity equal to one) yields the intuitive result where all
holes in the input have been filled.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int, tuple of ints, optional
Position of the structuring element.
Returns
-------
out : ndarray
Transformation of the initial image `input` where holes have been
filled.
See also
--------
binary_dilation, binary_propagation, label
Notes
-----
The algorithm used in this function consists in invading the complementary
of the shapes in `input` from the outer boundary of the image,
using binary dilations. Holes are not connected to the boundary and are
therefore not invaded. The result is the complementary subset of the
invaded region.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5, 5), dtype=int)
>>> a[1:4, 1:4] = 1
>>> a[2,2] = 0
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> ndimage.binary_fill_holes(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Too big structuring element
>>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
"""
mask = numpy.logical_not(input)
tmp = numpy.zeros(mask.shape, bool)
inplace = isinstance(output, numpy.ndarray)
if inplace:
binary_dilation(tmp, structure, -1, mask, output, 1, origin)
numpy.logical_not(output, output)
else:
output = binary_dilation(tmp, structure, -1, mask, None, 1,
origin)
numpy.logical_not(output, output)
return output
def grey_erosion(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Calculate a greyscale erosion, using either a structuring element,
or a footprint corresponding to a flat structuring element.
Grayscale erosion is a mathematical morphology operation. For the
simple case of a full and flat structuring element, it can be viewed
as a minimum filter over a sliding window.
Parameters
----------
input : array_like
Array over which the grayscale erosion is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
erosion. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale erosion. Non-zero values give the set of
neighbors of the center over which the minimum is chosen.
structure : array of ints, optional
Structuring element used for the grayscale erosion. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the erosion may be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
output : ndarray
Grayscale erosion of `input`.
See also
--------
binary_erosion, grey_dilation, grey_opening, grey_closing
generate_binary_structure, ndimage.minimum_filter
Notes
-----
The grayscale erosion of an image input by a structuring element s defined
over a domain E is given by:
(input+s)(x) = min {input(y) - s(x-y), for y in E}
In particular, for structuring elements defined as
s(y) = 0 for y in E, the grayscale erosion computes the minimum of the
input image inside a sliding window defined by E.
Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 1:6] = 3
>>> a[4,4] = 2; a[2,3] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 1, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 2, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_erosion(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 3, 2, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> footprint = ndimage.generate_binary_structure(2, 1)
>>> footprint
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> # Diagonally-connected elements are not considered neighbors
>>> ndimage.grey_erosion(a, size=(3,3), footprint=footprint)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 3, 1, 2, 0, 0],
[0, 0, 3, 2, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
if size is None and footprint is None and structure is None:
raise ValueError("size, footprint or structure must be specified")
return filters._min_or_max_filter(input, size, footprint, structure,
output, mode, cval, origin, 1)
def grey_dilation(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Calculate a greyscale dilation, using either a structuring element,
or a footprint corresponding to a flat structuring element.
Grayscale dilation is a mathematical morphology operation. For the
simple case of a full and flat structuring element, it can be viewed
as a maximum filter over a sliding window.
Parameters
----------
input : array_like
Array over which the grayscale dilation is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
dilation. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale dilation. Non-zero values give the set of
neighbors of the center over which the maximum is chosen.
structure : array of ints, optional
Structuring element used for the grayscale dilation. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the dilation may be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_dilation : ndarray
Grayscale dilation of `input`.
See also
--------
binary_dilation, grey_erosion, grey_closing, grey_opening
generate_binary_structure, ndimage.maximum_filter
Notes
-----
The grayscale dilation of an image input by a structuring element s defined
over a domain E is given by:
(input+s)(x) = max {input(y) + s(x-y), for y in E}
In particular, for structuring elements defined as
s(y) = 0 for y in E, the grayscale dilation computes the maximum of the
input image inside a sliding window defined by E.
Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> a[4,4] = 2; a[2,3] = 3
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, footprint=np.ones((3,3)))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> s = ndimage.generate_binary_structure(2,1)
>>> s
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> ndimage.grey_dilation(a, footprint=s)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 1, 3, 2, 1, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3)))
array([[1, 1, 1, 1, 1, 1, 1],
[1, 2, 4, 4, 4, 2, 1],
[1, 2, 4, 4, 4, 2, 1],
[1, 2, 4, 4, 4, 3, 1],
[1, 2, 2, 3, 3, 3, 1],
[1, 2, 2, 3, 3, 3, 1],
[1, 1, 1, 1, 1, 1, 1]])
"""
if size is None and footprint is None and structure is None:
raise ValueError("size, footprint or structure must be specified")
if structure is not None:
structure = numpy.asarray(structure)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
if footprint is not None:
footprint = numpy.asarray(footprint)
footprint = footprint[tuple([slice(None, None, -1)] *
footprint.ndim)]
input = numpy.asarray(input)
origin = _ni_support._normalize_sequence(origin, input.ndim)
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if footprint is not None:
sz = footprint.shape[ii]
elif structure is not None:
sz = structure.shape[ii]
elif numpy.isscalar(size):
sz = size
else:
sz = size[ii]
if not sz & 1:
origin[ii] -= 1
return filters._min_or_max_filter(input, size, footprint, structure,
output, mode, cval, origin, 0)
def grey_opening(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional greyscale opening.
A greyscale opening consists in the succession of a greyscale erosion,
and a greyscale dilation.
Parameters
----------
input : array_like
Array over which the grayscale opening is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
opening. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale opening.
structure : array of ints, optional
Structuring element used for the grayscale opening. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the opening may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_opening : ndarray
Result of the grayscale opening of `input` with `structure`.
See also
--------
binary_opening, grey_dilation, grey_erosion, grey_closing
generate_binary_structure
Notes
-----
The action of a grayscale opening with a flat structuring element amounts
to smoothen high local maxima, whereas binary opening erases small objects.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(36).reshape((6,6))
>>> a[3, 3] = 50
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 50, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]])
>>> ndimage.grey_opening(a, size=(3,3))
array([[ 0, 1, 2, 3, 4, 4],
[ 6, 7, 8, 9, 10, 10],
[12, 13, 14, 15, 16, 16],
[18, 19, 20, 22, 22, 22],
[24, 25, 26, 27, 28, 28],
[24, 25, 26, 27, 28, 28]])
>>> # Note that the local maximum a[3,3] has disappeared
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
tmp = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
return grey_dilation(tmp, size, footprint, structure, output, mode,
cval, origin)
def grey_closing(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional greyscale closing.
A greyscale closing consists in the succession of a greyscale dilation,
and a greyscale erosion.
Parameters
----------
input : array_like
Array over which the grayscale closing is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
closing. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale closing.
structure : array of ints, optional
Structuring element used for the grayscale closing. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the closing may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_closing : ndarray
Result of the grayscale closing of `input` with `structure`.
See also
--------
binary_closing, grey_dilation, grey_erosion, grey_opening,
generate_binary_structure
Notes
-----
The action of a grayscale closing with a flat structuring element amounts
to smoothen deep local minima, whereas binary closing fills small holes.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(36).reshape((6,6))
>>> a[3,3] = 0
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 0, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]])
>>> ndimage.grey_closing(a, size=(3,3))
array([[ 7, 7, 8, 9, 10, 11],
[ 7, 7, 8, 9, 10, 11],
[13, 13, 14, 15, 16, 17],
[19, 19, 20, 20, 22, 23],
[25, 25, 26, 27, 28, 29],
[31, 31, 32, 33, 34, 35]])
>>> # Note that the local minimum a[3,3] has disappeared
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
return grey_erosion(tmp, size, footprint, structure, output, mode,
cval, origin)
def morphological_gradient(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional morphological gradient.
The morphological gradient is calculated as the difference between a
dilation and an erosion of the input with a given structuring element.
Parameters
----------
input : array_like
Array over which to compute the morphlogical gradient.
size : tuple of ints
Shape of a flat and full structuring element used for the mathematical
morphology operations. Optional if `footprint` or `structure` is
provided. A larger `size` yields a more blurred gradient.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the morphology operations. Larger footprints
give a more blurred morphological gradient.
structure : array of ints, optional
Structuring element used for the morphology operations.
`structure` may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the morphological gradient
may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
morphological_gradient : ndarray
Morphological gradient of `input`.
See also
--------
grey_dilation, grey_erosion, ndimage.gaussian_gradient_magnitude
Notes
-----
For a flat structuring element, the morphological gradient
computed at a given point corresponds to the maximal difference
between elements of the input among the elements covered by the
structuring element centered on the point.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> ndimage.morphological_gradient(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # The morphological gradient is computed as the difference
>>> # between a dilation and an erosion
>>> ndimage.grey_dilation(a, size=(3,3)) -\\
... ndimage.grey_erosion(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> a[4,4] = 2; a[2,3] = 3
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.morphological_gradient(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 2, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_erosion(input, size, footprint, structure, output, mode,
cval, origin)
return numpy.subtract(tmp, output, output)
else:
return (tmp - grey_erosion(input, size, footprint, structure,
None, mode, cval, origin))
def morphological_laplace(input, size=None, footprint=None,
structure=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional morphological laplace.
Parameters
----------
input : array_like
Input.
size : int or sequence of ints, optional
See `structure`.
footprint : bool or ndarray, optional
See `structure`.
structure : structure, optional
Either `size`, `footprint`, or the `structure` must be provided.
output : ndarray, optional
An output array can optionally be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled.
For 'constant' mode, values beyond borders are set to be `cval`.
Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if mode is 'constant'.
Default is 0.0
origin : origin, optional
The origin parameter controls the placement of the filter.
Returns
-------
morphological_laplace : ndarray
Output
"""
tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_erosion(input, size, footprint, structure, output, mode,
cval, origin)
numpy.add(tmp1, output, output)
numpy.subtract(output, input, output)
return numpy.subtract(output, input, output)
else:
tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
numpy.add(tmp1, tmp2, tmp2)
numpy.subtract(tmp2, input, tmp2)
numpy.subtract(tmp2, input, tmp2)
return tmp2
def white_tophat(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional white tophat filter.
Parameters
----------
input : array_like
Input.
size : tuple of ints
Shape of a flat and full structuring element used for the filter.
Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of elements of a flat structuring element
used for the white tophat filter.
structure : array of ints, optional
Structuring element used for the filter. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the filter may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'.
Default is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default is 0.
Returns
-------
output : ndarray
Result of the filter of `input` with `structure`.
See also
--------
black_tophat
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
tmp = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
tmp = grey_dilation(tmp, size, footprint, structure, output, mode,
cval, origin)
if tmp is None:
tmp = output
if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
numpy.bitwise_xor(input, tmp, out=tmp)
else:
numpy.subtract(input, tmp, out=tmp)
return tmp
def black_tophat(input, size=None, footprint=None,
structure=None, output=None, mode="reflect",
cval=0.0, origin=0):
"""
Multi-dimensional black tophat filter.
Parameters
----------
input : array_like
Input.
size : tuple of ints, optional
Shape of a flat and full structuring element used for the filter.
Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the black tophat filter.
structure : array of ints, optional
Structuring element used for the filter. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the filter may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
black_tophat : ndarray
Result of the filter of `input` with `structure`.
See also
--------
white_tophat, grey_opening, grey_closing
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
tmp = grey_erosion(tmp, size, footprint, structure, output, mode,
cval, origin)
if tmp is None:
tmp = output
if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
numpy.bitwise_xor(tmp, input, out=tmp)
else:
numpy.subtract(tmp, input, out=tmp)
return tmp
def distance_transform_bf(input, metric="euclidean", sampling=None,
return_distances=True, return_indices=False,
distances=None, indices=None):
"""
Distance transform function by a brute force algorithm.
This function calculates the distance transform of the `input`, by
replacing each foreground (non-zero) element, with its
shortest distance to the background (any zero-valued element).
In addition to the distance transform, the feature transform can
be calculated. In this case the index of the closest background
element is returned along the first axis of the result.
Parameters
----------
input : array_like
Input
metric : str, optional
Three types of distance metric are supported: 'euclidean', 'taxicab'
and 'chessboard'.
sampling : {int, sequence of ints}, optional
This parameter is only used in the case of the euclidean `metric`
distance transform.
The sampling along each axis can be given by the `sampling` parameter
which should be a sequence of length equal to the input rank, or a
single number in which the `sampling` is assumed to be equal along all
axes.
return_distances : bool, optional
The `return_distances` flag can be used to indicate if the distance
transform is returned.
The default is True.
return_indices : bool, optional
The `return_indices` flags can be used to indicate if the feature
transform is returned.
The default is False.
distances : float64 ndarray, optional
Optional output array to hold distances (if `return_distances` is
True).
indices : int64 ndarray, optional
Optional output array to hold indices (if `return_indices` is True).
Returns
-------
distances : ndarray
Distance array if `return_distances` is True.
indices : ndarray
Indices array if `return_indices` is True.
Notes
-----
This function employs a slow brute force algorithm, see also the
function distance_transform_cdt for more efficient taxicab and
chessboard algorithms.
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
tmp1 = numpy.asarray(input) != 0
struct = generate_binary_structure(tmp1.ndim, tmp1.ndim)
tmp2 = binary_dilation(tmp1, struct)
tmp2 = numpy.logical_xor(tmp1, tmp2)
tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8)
metric = metric.lower()
if metric == 'euclidean':
metric = 1
elif metric in ['taxicab', 'cityblock', 'manhattan']:
metric = 2
elif metric == 'chessboard':
metric = 3
else:
raise RuntimeError('distance metric not supported')
if sampling is not None:
sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim)
sampling = numpy.asarray(sampling, dtype=numpy.float64)
if not sampling.flags.contiguous:
sampling = sampling.copy()
if return_indices:
ft = numpy.zeros(tmp1.shape, dtype=numpy.int32)
else:
ft = None
if return_distances:
if distances is None:
if metric == 1:
dt = numpy.zeros(tmp1.shape, dtype=numpy.float64)
else:
dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32)
else:
if distances.shape != tmp1.shape:
raise RuntimeError('distances array has wrong shape')
if metric == 1:
if distances.dtype.type != numpy.float64:
raise RuntimeError('distances array must be float64')
else:
if distances.dtype.type != numpy.uint32:
raise RuntimeError('distances array must be uint32')
dt = distances
else:
dt = None
_nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft)
if return_indices:
if isinstance(indices, numpy.ndarray):
if indices.dtype.type != numpy.int32:
raise RuntimeError('indices must of int32 type')
if indices.shape != (tmp1.ndim,) + tmp1.shape:
raise RuntimeError('indices has wrong shape')
tmp2 = indices
else:
tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32)
ft = numpy.ravel(ft)
for ii in range(tmp2.shape[0]):
rtmp = numpy.ravel(tmp2[ii, ...])[ft]
rtmp.shape = tmp1.shape
tmp2[ii, ...] = rtmp
ft = tmp2
# construct and return the result
result = []
if return_distances and not isinstance(distances, numpy.ndarray):
result.append(dt)
if return_indices and not isinstance(indices, numpy.ndarray):
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
def distance_transform_cdt(input, metric='chessboard', return_distances=True,
return_indices=False, distances=None, indices=None):
"""
Distance transform for chamfer type of transforms.
Parameters
----------
input : array_like
Input
metric : {'chessboard', 'taxicab'}, optional
The `metric` determines the type of chamfering that is done. If the
`metric` is equal to 'taxicab' a structure is generated using
generate_binary_structure with a squared distance equal to 1. If
the `metric` is equal to 'chessboard', a `metric` is generated
using generate_binary_structure with a squared distance equal to
the dimensionality of the array. These choices correspond to the
common interpretations of the 'taxicab' and the 'chessboard'
distance metrics in two dimensions.
The default for `metric` is 'chessboard'.
return_distances, return_indices : bool, optional
The `return_distances`, and `return_indices` flags can be used to
indicate if the distance transform, the feature transform, or both
must be returned.
If the feature transform is returned (``return_indices=True``),
the index of the closest background element is returned along
the first axis of the result.
The `return_distances` default is True, and the
`return_indices` default is False.
distances, indices : ndarrays of int32, optional
The `distances` and `indices` arguments can be used to give optional
output arrays that must be the same shape as `input`.
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
ft_inplace = isinstance(indices, numpy.ndarray)
dt_inplace = isinstance(distances, numpy.ndarray)
input = numpy.asarray(input)
if metric in ['taxicab', 'cityblock', 'manhattan']:
rank = input.ndim
metric = generate_binary_structure(rank, 1)
elif metric == 'chessboard':
rank = input.ndim
metric = generate_binary_structure(rank, rank)
else:
try:
metric = numpy.asarray(metric)
except:
raise RuntimeError('invalid metric provided')
for s in metric.shape:
if s != 3:
raise RuntimeError('metric sizes must be equal to 3')
if not metric.flags.contiguous:
metric = metric.copy()
if dt_inplace:
if distances.dtype.type != numpy.int32:
raise RuntimeError('distances must be of int32 type')
if distances.shape != input.shape:
raise RuntimeError('distances has wrong shape')
dt = distances
dt[...] = numpy.where(input, -1, 0).astype(numpy.int32)
else:
dt = numpy.where(input, -1, 0).astype(numpy.int32)
rank = dt.ndim
if return_indices:
sz = numpy.product(dt.shape, axis=0)
ft = numpy.arange(sz, dtype=numpy.int32)
ft.shape = dt.shape
else:
ft = None
_nd_image.distance_transform_op(metric, dt, ft)
dt = dt[tuple([slice(None, None, -1)] * rank)]
if return_indices:
ft = ft[tuple([slice(None, None, -1)] * rank)]
_nd_image.distance_transform_op(metric, dt, ft)
dt = dt[tuple([slice(None, None, -1)] * rank)]
if return_indices:
ft = ft[tuple([slice(None, None, -1)] * rank)]
ft = numpy.ravel(ft)
if ft_inplace:
if indices.dtype.type != numpy.int32:
raise RuntimeError('indices must of int32 type')
if indices.shape != (dt.ndim,) + dt.shape:
raise RuntimeError('indices has wrong shape')
tmp = indices
else:
tmp = numpy.indices(dt.shape, dtype=numpy.int32)
for ii in range(tmp.shape[0]):
rtmp = numpy.ravel(tmp[ii, ...])[ft]
rtmp.shape = dt.shape
tmp[ii, ...] = rtmp
ft = tmp
# construct and return the result
result = []
if return_distances and not dt_inplace:
result.append(dt)
if return_indices and not ft_inplace:
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
def distance_transform_edt(input, sampling=None, return_distances=True,
return_indices=False, distances=None, indices=None):
"""
Exact euclidean distance transform.
In addition to the distance transform, the feature transform can
be calculated. In this case the index of the closest background
element is returned along the first axis of the result.
Parameters
----------
input : array_like
Input data to transform. Can be any type but will be converted
into binary: 1 wherever input equates to True, 0 elsewhere.
sampling : float or int, or sequence of same, optional
Spacing of elements along each dimension. If a sequence, must be of
length equal to the input rank; if a single number, this is used for
all axes. If not specified, a grid spacing of unity is implied.
return_distances : bool, optional
Whether to return distance matrix. At least one of
return_distances/return_indices must be True. Default is True.
return_indices : bool, optional
Whether to return indices matrix. Default is False.
distances : ndarray, optional
Used for output of distance array, must be of type float64.
indices : ndarray, optional
Used for output of indices, must be of type int32.
Returns
-------
distance_transform_edt : ndarray or list of ndarrays
Either distance matrix, index matrix, or a list of the two,
depending on `return_x` flags and `distance` and `indices`
input parameters.
Notes
-----
The euclidean distance transform gives values of the euclidean
distance::
n
y_i = sqrt(sum (x[i]-b[i])**2)
i
where b[i] is the background point (value 0) with the smallest
Euclidean distance to input points x[i], and n is the
number of dimensions.
Examples
--------
>>> from scipy import ndimage
>>> a = np.array(([0,1,1,1,1],
... [0,0,1,1,1],
... [0,1,1,1,1],
... [0,1,1,1,0],
... [0,1,1,0,0]))
>>> ndimage.distance_transform_edt(a)
array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
[ 0. , 0. , 1. , 2. , 2. ],
[ 0. , 1. , 1.4142, 1.4142, 1. ],
[ 0. , 1. , 1.4142, 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
With a sampling of 2 units along x, 1 along y:
>>> ndimage.distance_transform_edt(a, sampling=[2,1])
array([[ 0. , 1. , 2. , 2.8284, 3.6056],
[ 0. , 0. , 1. , 2. , 3. ],
[ 0. , 1. , 2. , 2.2361, 2. ],
[ 0. , 1. , 2. , 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
Asking for indices as well:
>>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True)
>>> inds
array([[[0, 0, 1, 1, 3],
[1, 1, 1, 1, 3],
[2, 2, 1, 3, 3],
[3, 3, 4, 4, 3],
[4, 4, 4, 4, 4]],
[[0, 0, 1, 1, 4],
[0, 1, 1, 1, 4],
[0, 0, 1, 4, 4],
[0, 0, 3, 3, 4],
[0, 0, 3, 3, 4]]])
With arrays provided for inplace outputs:
>>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32)
>>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices)
array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
[ 0. , 0. , 1. , 2. , 2. ],
[ 0. , 1. , 1.4142, 1.4142, 1. ],
[ 0. , 1. , 1.4142, 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
>>> indices
array([[[0, 0, 1, 1, 3],
[1, 1, 1, 1, 3],
[2, 2, 1, 3, 3],
[3, 3, 4, 4, 3],
[4, 4, 4, 4, 4]],
[[0, 0, 1, 1, 4],
[0, 1, 1, 1, 4],
[0, 0, 1, 4, 4],
[0, 0, 3, 3, 4],
[0, 0, 3, 3, 4]]])
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
ft_inplace = isinstance(indices, numpy.ndarray)
dt_inplace = isinstance(distances, numpy.ndarray)
# calculate the feature transform
input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8))
if sampling is not None:
sampling = _ni_support._normalize_sequence(sampling, input.ndim)
sampling = numpy.asarray(sampling, dtype=numpy.float64)
if not sampling.flags.contiguous:
sampling = sampling.copy()
if ft_inplace:
ft = indices
if ft.shape != (input.ndim,) + input.shape:
raise RuntimeError('indices has wrong shape')
if ft.dtype.type != numpy.int32:
raise RuntimeError('indices must be of int32 type')
else:
ft = numpy.zeros((input.ndim,) + input.shape, dtype=numpy.int32)
_nd_image.euclidean_feature_transform(input, sampling, ft)
# if requested, calculate the distance transform
if return_distances:
dt = ft - numpy.indices(input.shape, dtype=ft.dtype)
dt = dt.astype(numpy.float64)
if sampling is not None:
for ii in range(len(sampling)):
dt[ii, ...] *= sampling[ii]
numpy.multiply(dt, dt, dt)
if dt_inplace:
dt = numpy.add.reduce(dt, axis=0)
if distances.shape != dt.shape:
raise RuntimeError('indices has wrong shape')
if distances.dtype.type != numpy.float64:
raise RuntimeError('indices must be of float64 type')
numpy.sqrt(dt, distances)
else:
dt = numpy.add.reduce(dt, axis=0)
dt = numpy.sqrt(dt)
# construct and return the result
result = []
if return_distances and not dt_inplace:
result.append(dt)
if return_indices and not ft_inplace:
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
| 82,640 | 36.158723 | 90 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/_ni_docstrings.py
|
"""Docstring components common to several ndimage functions."""
from __future__ import division, print_function, absolute_import
from scipy.misc import doccer
__all__ = ['docfiller']
_input_doc = (
"""input : array_like
The input array.""")
_axis_doc = (
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1.""")
_output_doc = (
"""output : array or dtype, optional
The array in which to place the output, or the dtype of the
returned array. By default an array of the same dtype as input
will be created.""")
_size_foot_doc = (
"""size : scalar or tuple, optional
See footprint, below. Ignored if footprint is given.
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2). When `footprint` is given, `size` is ignored.""")
_mode_doc = (
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the input array is extended
when the filter overlaps a border. Default is 'reflect'. Behavior
for each valid value is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.""")
_mode_multiple_doc = (
"""mode : str or sequence, optional
The `mode` parameter determines how the input array is extended
when the filter overlaps a border. By passing a sequence of modes
with length equal to the number of dimensions of the input array,
different modes can be specified along each axis. Default value is
'reflect'. The valid values and their behavior is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.""")
_cval_doc = (
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.""")
_origin_doc = (
"""origin : int, optional
Controls the placement of the filter on the input array's pixels.
A value of 0 (the default) centers the filter over the pixel, with
positive values shifting the filter to the left, and negative ones
to the right.""")
_origin_multiple_doc = (
"""origin : int or sequence, optional
Controls the placement of the filter on the input array's pixels.
A value of 0 (the default) centers the filter over the pixel, with
positive values shifting the filter to the left, and negative ones
to the right. By passing a sequence of origins with length equal to
the number of dimensions of the input array, different shifts can
be specified along each axis.""")
_extra_arguments_doc = (
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function.""")
_extra_keywords_doc = (
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function.""")
_prefilter_doc = (
"""prefilter : bool, optional
Determines if the input array is prefiltered with `spline_filter`
before interpolation. The default is True, which will create a
temporary `float64` array of filtered values if `order > 1`. If
setting this to False, the output will be slightly blurred if
`order > 1`, unless the input is prefiltered, i.e. it is the result
of calling `spline_filter` on the original input.""")
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'mode_multiple': _mode_multiple_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'origin_multiple': _origin_multiple_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
'prefilter': _prefilter_doc
}
docfiller = doccer.filldoc(docdict)
| 5,409 | 40.29771 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/filters.py
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import warnings
import math
import numpy
from . import _ni_support
from . import _nd_image
from . import _ni_docstrings
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
@_ni_docstrings.docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)])
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(p(x), dtype=numpy.double)
phi_x /= phi_x.sum()
if order > 0:
q = numpy.polynomial.Polynomial([1])
p_deriv = p.deriv()
for _ in range(order):
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
q = q.deriv() + q * p_deriv
phi_x *= q(x)
return phi_x
@_ni_docstrings.docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> np.random.seed(280490)
>>> x = np.random.randn(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@_ni_docstrings.docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return output
@_ni_docstrings.docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return output
@_ni_docstrings.docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-dimensional Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@_ni_docstrings.docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@_ni_docstrings.docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return output
@_ni_docstrings.docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
%(input)s
weights : ndarray
array of weights, same number of dimensions as input
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@_ni_docstrings.docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
%(input)s
weights : array_like
Array of weights, same number of dimensions as input
%(output)s
%(mode_multiple)s
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
%(origin_multiple)s
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@_ni_docstrings.docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return output
@_ni_docstrings.docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return output
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return output
@_ni_docstrings.docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@_ni_docstrings.docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@_ni_docstrings.docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return output
@_ni_docstrings.docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@_ni_docstrings.docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@_ni_docstrings.docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@_ni_docstrings.docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return output
@_ni_docstrings.docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return output
| 49,136 | 33.313547 | 90 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/io.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
_have_pil = True
try:
from scipy.misc.pilutil import imread as _imread
except ImportError:
_have_pil = False
__all__ = ['imread']
# Use the implementation of `imread` in `scipy.misc.pilutil.imread`.
# If it weren't for the different names of the first arguments of
# ndimage.io.imread and misc.pilutil.imread, we could simplify this file
# by writing
# from scipy.misc.pilutil import imread
# Unfortunately, because the argument names are different, that
# introduces a backwards incompatibility.
@np.deprecate(message="`imread` is deprecated in SciPy 1.0.0.\n"
"Use ``matplotlib.pyplot.imread`` instead.")
def imread(fname, flatten=False, mode=None):
if _have_pil:
return _imread(fname, flatten, mode)
raise ImportError("Could not import the Python Imaging Library (PIL)"
" required to load image files. Please refer to"
" http://pillow.readthedocs.org/en/latest/installation.html"
" for installation instructions.")
if _have_pil and _imread.__doc__ is not None:
imread.__doc__ = _imread.__doc__.replace('name : str', 'fname : str')
| 1,249 | 32.783784 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_io.py
|
from __future__ import division, print_function, absolute_import
import pytest
from numpy.testing import assert_array_equal
from scipy._lib._numpy_compat import suppress_warnings
import scipy.ndimage as ndi
import os
try:
from PIL import Image
pil_missing = False
except ImportError:
pil_missing = True
@pytest.mark.skipif(pil_missing, reason="The Python Image Library could not be found.")
def test_imread():
lp = os.path.join(os.path.dirname(__file__), 'dots.png')
with suppress_warnings() as sup:
# PIL causes a Py3k ResourceWarning
sup.filter(message="unclosed file")
sup.filter(DeprecationWarning)
img = ndi.imread(lp, mode="RGB")
assert_array_equal(img.shape, (300, 420, 3))
with suppress_warnings() as sup:
# PIL causes a Py3k ResourceWarning
sup.filter(message="unclosed file")
sup.filter(DeprecationWarning)
img = ndi.imread(lp, flatten=True)
assert_array_equal(img.shape, (300, 420))
with open(lp, 'rb') as fobj:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
img = ndi.imread(fobj, mode="RGB")
assert_array_equal(img.shape, (300, 420, 3))
| 1,214 | 30.153846 | 87 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.