id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
23,065 |
def stack(seq, axis=0, allow_unknown_chunksizes=False):
"""
Stack arrays along a new axis
Given a sequence of dask arrays, form a new dask array by stacking them
along a new dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
from . import wrap
seq = [asarray(a) for a in seq]
if not seq:
raise ValueError("Need array(s) to stack")
if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):
idx = [x for x in filter(lambda x: x[1].shape != seq[0].shape, enumerate(seq))]
raise ValueError(
"Stacked arrays must have the same shape. "
"The first array had shape {0}, while array "
"{1} has shape {2}".format(seq[0].shape, idx[0][0], idx[0][1].shape)
)
meta = np.stack([meta_from_array(a) for a in seq], axis=axis)
seq = [x.astype(meta.dtype) for x in seq]
ndim = meta.ndim - 1
if axis < 0:
axis = ndim + axis + 1
shape = tuple(
len(seq)
if i == axis
else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])
for i in range(meta.ndim)
)
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq2))
_, seq2 = unify_chunks(*uc_args)
assert len(set(a.chunks for a in seq2)) == 1 # same chunks
chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]
names = [a.name for a in seq2]
name = "stack-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [
(names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys
]
values = [
(
getitem,
inp,
(slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis),
)
for inp in inputs
]
layer = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
|
def stack(seq, axis=0, allow_unknown_chunksizes=False):
"""
Stack arrays along a new axis
Given a sequence of dask arrays, form a new dask array by stacking them
along a new dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
from . import wrap
seq = [asarray(a) for a in seq]
if not seq:
raise ValueError("Need array(s) to stack")
if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):
idx = first(i for i in enumerate(seq) if x.shape != seq[0].shape)
raise ValueError(
"Stacked arrays must have the same shape. "
"The first array had shape {0}, while array "
"{1} has shape {2}".format(seq[0].shape, idx[0][0], idx[0][1].shape)
)
meta = np.stack([meta_from_array(a) for a in seq], axis=axis)
seq = [x.astype(meta.dtype) for x in seq]
ndim = meta.ndim - 1
if axis < 0:
axis = ndim + axis + 1
shape = tuple(
len(seq)
if i == axis
else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])
for i in range(meta.ndim)
)
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq2))
_, seq2 = unify_chunks(*uc_args)
assert len(set(a.chunks for a in seq2)) == 1 # same chunks
chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]
names = [a.name for a in seq2]
name = "stack-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [
(names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys
]
values = [
(
getitem,
inp,
(slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis),
)
for inp in inputs
]
layer = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
|
35,542 |
def create_buttons(packer, bus, idx, button):
values = {
"ACCButtons": button,
# "RollingCounter": idx,
}
return packer.make_can_msg("ASCMSteeringButton", bus, values)
|
def create_buttons(packer, bus, idx, button):
values = {
"ACCButtons": button,
"RollingCounter": idx,
}
return packer.make_can_msg("ASCMSteeringButton", bus, values)
|
47,682 |
def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]):
"""
Extracts the commit hash from a resolved filename toward a cache file.
"""
if resolved_file is None or commit_hash is not None:
return commit_hash
if isinstance(resolved_file, str) and sys.platform == "win32":
resolved_file = re.sub("\\\\", "/", resolved_file)
search = re.search(r"snapshots/([^/]+)/", resolved_file)
if search is None:
return None
commit_hash = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None
|
def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]):
"""
Extracts the commit hash from a resolved filename toward a cache file.
"""
if resolved_file is None or commit_hash is not None:
return commit_hash
resolved_file = str(Path(resolved_file.as_posix()))
search = re.search(r"snapshots/([^/]+)/", resolved_file)
if search is None:
return None
commit_hash = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None
|
28,284 |
def guids_from_list_str(s: str) -> Optional[Tuple[str, ...]]:
"""
Get tuple of guids from a python/json string representation of a list.
Extracts the guids from a string representation of a list, tuple,
or set of guids or a single guid.
Args:
s: input string
Returns:
Extracted guids as a tuple of strings.
If a provided string does not match the format, `None` will be returned.
For an empty list/tuple/set or empty string an empty set is returned.
Examples:
>>> guids_from_str(
"['07fd7195-c51e-44d6-a085-fa8274cf00d6', \
'070d7195-c51e-44d6-a085-fa8274cf00d6']")
will return
('07fd7195-c51e-44d6-a085-fa8274cf00d6',
'070d7195-c51e-44d6-a085-fa8274cf00d6')
"""
parsed = (ast.parse(s, mode='eval')).body
if not isinstance(parsed, (ast.List, ast.Tuple, ast.Set)):
return None
if not all([isinstance(e, ast.Constant) for e in parsed.elts]):
return None
return tuple (v.value for v in parsed.elts)
|
def guids_from_list_str(s: str) -> Optional[Tuple[str, ...]]:
"""
Get tuple of guids from a python/json string representation of a list.
Extracts the guids from a string representation of a list, tuple,
or set of guids or a single guid.
Args:
s: input string
Returns:
Extracted guids as a tuple of strings.
If a provided string does not match the format, `None` will be returned.
For an empty list/tuple/set or empty string an empty set is returned.
Examples:
>>> guids_from_str(
"['07fd7195-c51e-44d6-a085-fa8274cf00d6', \
'070d7195-c51e-44d6-a085-fa8274cf00d6']")
will return
('07fd7195-c51e-44d6-a085-fa8274cf00d6',
'070d7195-c51e-44d6-a085-fa8274cf00d6')
"""
parsed = (ast.parse(s, mode='eval')).body
if not isinstance(parsed, (ast.List, ast.Tuple, ast.Set)):
return None
if not all(isinstance(e, ast.Constant) for e in parsed.elts):
return None
return tuple (v.value for v in parsed.elts)
|
31,160 |
def fetch_incidents(last_run):
"""
Fetch incidents [IdentityIQ Alerts]
:type last_run: ``[Dict[str, str]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
"""
now = dt.datetime.now().replace(microsecond=0).isoformat()
last_processed = last_run.get('last_fetch', None)
# Compute the time frame for which the alerts will be requested.
if last_processed is None:
# Never processed, hence time filter window is MAX_ALERT_WINDOW (72 hrs) past from now
last_processed_past = (dt.datetime.strptime(now, DATE_FORMAT) + dt.timedelta(hours=-MAX_ALERT_WINDOW,
minutes=0)).isoformat()
last_processed = now
else:
now_formatted = dt.datetime.strptime(now, DATE_FORMAT)
last_processed_formatted = dt.datetime.strptime(last_processed, DATE_FORMAT)
diff = (now_formatted - last_processed_formatted).total_seconds() / 3600
if diff > MAX_ALERT_WINDOW:
# If the difference between the last run and this run is more than MAX_ALERT_WINDOW (72 hrs),
# then make it only run for past MAX_ALERT_WINDOW (72 hrs)
last_processed_past = (dt.datetime.strptime(now, DATE_FORMAT) + dt.timedelta(hours=-MAX_ALERT_WINDOW,
minutes=0)).isoformat()
last_processed = now
else:
# Else, run for only the delta time (1 min in case of normal execution)
last_processed_past = last_processed
last_processed = now
incidents = []
url = ''.join(
(IIQ_SCIM_ALERTS_URL, '?filter=(lastProcessed gt "', last_processed_past, '" and lastProcessed le "',
last_processed, '")'))
response = send_request(url, "GET", None)
if response is not None and 200 <= response.status_code < 300:
alerts = transform_object_list('IdentityIQ.Alert', response.json()['Resources'])
for alert in alerts:
if 'displayName' in alert:
incident_name = alert['displayName']
else:
incident_name = alert['name']
incident = {
'name': incident_name,
'details': alert['name'],
'occurred': alert['meta']['created'],
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
next_run = {'last_fetch': now}
return next_run, incidents
|
def fetch_incidents(last_run):
"""
Fetch incidents [IdentityIQ Alerts]
:type last_run: ``[Dict[str, str]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
"""
now = dt.datetime.now().replace(microsecond=0).isoformat()
last_processed = last_run.get('last_fetch', None)
# Compute the time frame for which the alerts will be requested.
if last_processed is None:
# Never processed, hence time filter window is MAX_ALERT_WINDOW (72 hrs) past from now
last_processed_past = (dt.datetime.strptime(now, DATE_FORMAT) + dt.timedelta(hours=-MAX_ALERT_WINDOW,
minutes=0)).isoformat()
last_processed = now
else:
now_formatted = dt.datetime.strptime(now, DATE_FORMAT)
last_processed_formatted = dt.datetime.strptime(last_processed, DATE_FORMAT)
diff = (now_formatted - last_processed_formatted).total_seconds() / 3600
if diff > MAX_ALERT_WINDOW:
# If the difference between the last run and this run is more than MAX_ALERT_WINDOW (72 hrs),
# then make it only run for past MAX_ALERT_WINDOW (72 hrs)
last_processed_past = (dt.datetime.strptime(now, DATE_FORMAT) + dt.timedelta(hours=-MAX_ALERT_WINDOW,
minutes=0)).isoformat()
last_processed = now
else:
# Else, run for only the delta time (1 min in case of normal execution)
last_processed_past = last_processed
last_processed = now
incidents = []
url = ''.join(
(IIQ_SCIM_ALERTS_URL, '?filter=(lastProcessed gt "', last_processed_past, '" and lastProcessed le "',
last_processed, '")'))
response = send_request(url, "GET", None)
if response is not None and 200 <= response.status_code < 300:
alerts = transform_object_list('IdentityIQ.Alert', response.json().get('Resources'))
for alert in alerts:
if 'displayName' in alert:
incident_name = alert['displayName']
else:
incident_name = alert['name']
incident = {
'name': incident_name,
'details': alert['name'],
'occurred': alert['meta']['created'],
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
next_run = {'last_fetch': now}
return next_run, incidents
|
24,892 |
def _emit_no_member(
node,
owner,
owner_name,
mixin_class_rgx: Pattern[str],
ignored_mixins=True,
ignored_none=True,
):
"""Try to see if no-member should be emitted for the given owner.
The following cases are ignored:
* the owner is a function, and it has decorators.
* the owner is an instance, and it has __getattr__, __getattribute__ implemented
* the module is explicitly ignored from no-member checks
* the owner is a class and the name can be found in its metaclass.
* The access node is protected by an except handler, which handles
AttributeError, Exception or bare except.
* The node is guarded behind and `IF` or `IFExp` node
"""
# pylint: disable=too-many-return-statements
if node_ignores_exception(node, AttributeError):
return False
if ignored_none and isinstance(owner, nodes.Const) and owner.value is None:
return False
if is_super(owner) or getattr(owner, "type", None) == "metaclass":
return False
if owner_name and ignored_mixins and mixin_class_rgx.match(owner_name):
return False
if isinstance(owner, nodes.FunctionDef) and (
owner.decorators or owner.is_abstract()
):
return False
if isinstance(owner, (astroid.Instance, nodes.ClassDef)):
if owner.has_dynamic_getattr():
# Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not
# invoked at this point.
try:
metaclass = owner.metaclass()
except astroid.MroError:
return False
if metaclass:
# Renamed in Python 3.10 to `EnumType`
return metaclass.qname() in {"enum.EnumMeta", "enum.EnumType"}
return False
if not has_known_bases(owner):
return False
# Exclude typed annotations, since these might actually exist
# at some point during the runtime of the program.
if utils.is_attribute_typed_annotation(owner, node.attrname):
return False
if isinstance(owner, astroid.objects.Super):
# Verify if we are dealing with an invalid Super object.
# If it is invalid, then there's no point in checking that
# it has the required attribute. Also, don't fail if the
# MRO is invalid.
try:
owner.super_mro()
except (astroid.MroError, astroid.SuperError):
return False
if not all(has_known_bases(base) for base in owner.type.mro()):
return False
if isinstance(owner, nodes.Module):
try:
owner.getattr("__getattr__")
return False
except astroid.NotFoundError:
pass
if owner_name and node.attrname.startswith("_" + owner_name):
# Test if an attribute has been mangled ('private' attribute)
unmangled_name = node.attrname.split("_" + owner_name)[-1]
try:
if owner.getattr(unmangled_name, context=None) is not None:
return False
except astroid.NotFoundError:
return True
if (
owner.parent
and isinstance(owner.parent, nodes.ClassDef)
and owner.parent.name == "EnumMeta"
and owner_name == "__members__"
and node.attrname in {"items", "values", "keys"}
):
# Avoid false positive on Enum.__members__.{items(), values, keys}
# See https://github.com/PyCQA/pylint/issues/4123
return False
# Don't emit no-member if guarded behind `IF` or `IFExp`
# * Walk up recursively until if statement is found.
# * Check if condition can be inferred as `Const`,
# would evaluate as `False`,
# and wheater the node is part of the `body`.
# * Continue checking until scope of node is reached.
scope: nodes.NodeNG = node.scope()
node_origin: nodes.NodeNG = node
parent: nodes.NodeNG = node.parent
while parent != scope:
if isinstance(parent, (nodes.If, nodes.IfExp)):
inferred = safe_infer(parent.test)
if ( # pylint: disable=too-many-boolean-expressions
isinstance(inferred, nodes.Const)
and inferred.bool_value() is False
and (
isinstance(parent, nodes.If)
and node_origin in parent.body
or isinstance(parent, nodes.IfExp)
and node_origin == parent.body
)
):
return False
node_origin, parent = parent, parent.parent
return True
|
def _emit_no_member(
node,
owner,
owner_name,
mixin_class_rgx: Pattern[str],
ignored_mixins=True,
ignored_none=True,
):
"""Try to see if no-member should be emitted for the given owner.
The following cases are ignored:
* the owner is a function, and it has decorators.
* the owner is an instance and it has __getattr__, __getattribute__ implemented
* the module is explicitly ignored from no-member checks
* the owner is a class and the name can be found in its metaclass.
* The access node is protected by an except handler, which handles
AttributeError, Exception or bare except.
* The node is guarded behind and `IF` or `IFExp` node
"""
# pylint: disable=too-many-return-statements
if node_ignores_exception(node, AttributeError):
return False
if ignored_none and isinstance(owner, nodes.Const) and owner.value is None:
return False
if is_super(owner) or getattr(owner, "type", None) == "metaclass":
return False
if owner_name and ignored_mixins and mixin_class_rgx.match(owner_name):
return False
if isinstance(owner, nodes.FunctionDef) and (
owner.decorators or owner.is_abstract()
):
return False
if isinstance(owner, (astroid.Instance, nodes.ClassDef)):
if owner.has_dynamic_getattr():
# Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not
# invoked at this point.
try:
metaclass = owner.metaclass()
except astroid.MroError:
return False
if metaclass:
# Renamed in Python 3.10 to `EnumType`
return metaclass.qname() in {"enum.EnumMeta", "enum.EnumType"}
return False
if not has_known_bases(owner):
return False
# Exclude typed annotations, since these might actually exist
# at some point during the runtime of the program.
if utils.is_attribute_typed_annotation(owner, node.attrname):
return False
if isinstance(owner, astroid.objects.Super):
# Verify if we are dealing with an invalid Super object.
# If it is invalid, then there's no point in checking that
# it has the required attribute. Also, don't fail if the
# MRO is invalid.
try:
owner.super_mro()
except (astroid.MroError, astroid.SuperError):
return False
if not all(has_known_bases(base) for base in owner.type.mro()):
return False
if isinstance(owner, nodes.Module):
try:
owner.getattr("__getattr__")
return False
except astroid.NotFoundError:
pass
if owner_name and node.attrname.startswith("_" + owner_name):
# Test if an attribute has been mangled ('private' attribute)
unmangled_name = node.attrname.split("_" + owner_name)[-1]
try:
if owner.getattr(unmangled_name, context=None) is not None:
return False
except astroid.NotFoundError:
return True
if (
owner.parent
and isinstance(owner.parent, nodes.ClassDef)
and owner.parent.name == "EnumMeta"
and owner_name == "__members__"
and node.attrname in {"items", "values", "keys"}
):
# Avoid false positive on Enum.__members__.{items(), values, keys}
# See https://github.com/PyCQA/pylint/issues/4123
return False
# Don't emit no-member if guarded behind `IF` or `IFExp`
# * Walk up recursively until if statement is found.
# * Check if condition can be inferred as `Const`,
# would evaluate as `False`,
# and wheater the node is part of the `body`.
# * Continue checking until scope of node is reached.
scope: nodes.NodeNG = node.scope()
node_origin: nodes.NodeNG = node
parent: nodes.NodeNG = node.parent
while parent != scope:
if isinstance(parent, (nodes.If, nodes.IfExp)):
inferred = safe_infer(parent.test)
if ( # pylint: disable=too-many-boolean-expressions
isinstance(inferred, nodes.Const)
and inferred.bool_value() is False
and (
isinstance(parent, nodes.If)
and node_origin in parent.body
or isinstance(parent, nodes.IfExp)
and node_origin == parent.body
)
):
return False
node_origin, parent = parent, parent.parent
return True
|
12,720 |
def assert_logged(caplog, expect_logged: list[tuple[int, str]] | None = None):
if expect_logged:
assert len(caplog.records) == len(expect_logged)
for idx, (lvl, msg) in enumerate(expect_logged):
log_record = caplog.records[idx]
assert msg in log_record.message
assert lvl == log_record.levelno
else:
assert not caplog.records
|
def assert_logged(caplog, expect_logged: list[tuple[int, str]] | None = None) -> None:
if expect_logged:
assert len(caplog.records) == len(expect_logged)
for idx, (lvl, msg) in enumerate(expect_logged):
log_record = caplog.records[idx]
assert msg in log_record.message
assert lvl == log_record.levelno
else:
assert not caplog.records
|
44,299 |
def taper_excitations(generators, paulixops, paulix_sector, singles, doubles):
r"""Transform excitations with a Clifford operator and taper qubits.
The qubit operators for single and double excitations are first generated using the generators of
:func:`~.SingleExcitation` and :func:`~.DoubleExcitation` operations. Each of these operators that commutes
with all :math:`\mathbb{Z}_2` symmetries of the molecular Hamiltonian are then tranformed using the
Clifford operators :math:`U` and then tapered, while rest of the other non-commuting operators are discarded.
These new tapered excitation operators can be exponentiated using :func:`~.PauliRot` for building a
tapered UCCSD-like circuit ansatze.
Args:
generators (list[Hamiltonian]): list of generators of symmetries, taus, for the Hamiltonian
paulixops (list[Operation]): list of single-qubit Pauli-X operators
paulix_sector (list[int]): list of eigenvalues of Pauli-X operators
singles (list(list(int))): list with the indices `r`, `p` of the two qubits representing the single excitation :math:`\vert r, p \rangle = \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}\rangle`
doubles (list(list(int))): list with the indices `s`, `r`, `q`, `p` of the four qubits representing the double excitation :math:`\vert s, r, q, p \rangle = \hat{c}_p^\dagger \hat{c}_q^\dagger \hat{c}_r \hat{c}_s \vert \mathrm{HF}\rangle`
Returns:
tuple(list, list): tapered single and double excitation operators
**Example**
>>> symbols = ['He', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.4588684632]])
>>> mol = qml.qchem.Molecule(symbols, geometry, charge=1)
>>> H, n_qubits = qml.qchem.molecular_hamiltonian(symbols, geometry)
>>> n_elec = mol.n_electrons
>>> generators = qml.qchem.symmetry_generators(H)
>>> paulixops = qml.qchem.paulix_ops(generators, 4)
>>> paulix_sector = qml.qchem.optimal_sector(H, generators, n_elec)
>>> singles, doubles = qml.qchem.excitations(n_elec, n_qubits)
>>> singles_tap, doubles_tap = taper_excitations(generators, paulixops,
paulix_sector, singles, doubles)
>>> print(singles_tap[0], doubles_tap[0])
((0.5+0j)) [Y0]
((-0.25+0j)) [X0 Y1] + ((-0.25+0j)) [Y0 X1]
"""
singles_tapered, doubles_tapered = [], []
for excitation in singles:
hamil_gen = qml.SingleExcitation(1, wires=excitation).generator()
if np.all([_is_commuting_obs(generator, hamil_gen) for generator in generators]):
excitation_tapered_op = qml.taper(hamil_gen, generators, paulixops, paulix_sector)
qml.simplify(excitation_tapered_op)
singles_tapered.append(excitation_tapered_op)
for excitation in doubles:
hamil_gen = qml.DoubleExcitation(1, wires=excitation).generator()
if np.all([_is_commuting_obs(generator, hamil_gen) for generator in generators]):
excitation_tapered_op = qml.taper(hamil_gen, generators, paulixops, paulix_sector)
qml.simplify(excitation_tapered_op)
doubles_tapered.append(excitation_tapered_op)
return singles_tapered, doubles_tapered
|
def taper_excitations(generators, paulixops, paulix_sector, singles, doubles):
r"""Transform excitations with a Clifford operator and taper qubits.
The qubit operators for single and double excitations are first generated using the generators of
:func:`~.SingleExcitation` and :func:`~.DoubleExcitation` operations. Each of these operators that commutes
with all :math:`\mathbb{Z}_2` symmetries of the molecular Hamiltonian are then tranformed using the
Clifford operators :math:`U` and then tapered, while rest of the other non-commuting operators are discarded.
These new tapered excitation operators can be exponentiated using :func:`~.PauliRot` for building a
tapered UCCSD-like circuit ansatze.
Args:
generators (list[Hamiltonian]): list of generators of symmetries, taus, for the Hamiltonian
paulixops (list[Operation]): list of single-qubit Pauli-X operators
paulix_sector (list[int]): list of eigenvalues of Pauli-X operators
singles (list(list(int))): list with the indices `r`, `p` of the two qubits representing the single excitation
doubles (list(list(int))): list with the indices `s`, `r`, `q`, `p` of the four qubits representing the double excitation :math:`\vert s, r, q, p \rangle = \hat{c}_p^\dagger \hat{c}_q^\dagger \hat{c}_r \hat{c}_s \vert \mathrm{HF}\rangle`
Returns:
tuple(list, list): tapered single and double excitation operators
**Example**
>>> symbols = ['He', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.4588684632]])
>>> mol = qml.qchem.Molecule(symbols, geometry, charge=1)
>>> H, n_qubits = qml.qchem.molecular_hamiltonian(symbols, geometry)
>>> n_elec = mol.n_electrons
>>> generators = qml.qchem.symmetry_generators(H)
>>> paulixops = qml.qchem.paulix_ops(generators, 4)
>>> paulix_sector = qml.qchem.optimal_sector(H, generators, n_elec)
>>> singles, doubles = qml.qchem.excitations(n_elec, n_qubits)
>>> singles_tap, doubles_tap = taper_excitations(generators, paulixops,
paulix_sector, singles, doubles)
>>> print(singles_tap[0], doubles_tap[0])
((0.5+0j)) [Y0]
((-0.25+0j)) [X0 Y1] + ((-0.25+0j)) [Y0 X1]
"""
singles_tapered, doubles_tapered = [], []
for excitation in singles:
hamil_gen = qml.SingleExcitation(1, wires=excitation).generator()
if np.all([_is_commuting_obs(generator, hamil_gen) for generator in generators]):
excitation_tapered_op = qml.taper(hamil_gen, generators, paulixops, paulix_sector)
qml.simplify(excitation_tapered_op)
singles_tapered.append(excitation_tapered_op)
for excitation in doubles:
hamil_gen = qml.DoubleExcitation(1, wires=excitation).generator()
if np.all([_is_commuting_obs(generator, hamil_gen) for generator in generators]):
excitation_tapered_op = qml.taper(hamil_gen, generators, paulixops, paulix_sector)
qml.simplify(excitation_tapered_op)
doubles_tapered.append(excitation_tapered_op)
return singles_tapered, doubles_tapered
|
34,558 |
def get_component_class(component_name: Text) -> Type["Component"]:
"""Resolve component name to a registered components class."""
if component_name == "DucklingHTTPExtractor":
rasa.shared.utils.io.raise_deprecation_warning(
"The component 'DucklingHTTPExtractor' has been renamed to "
"'DucklingEntityExtractor'. Update your pipeline to use "
"'DucklingEntityExtractor'.",
docs=DOCS_URL_COMPONENTS,
)
component_name = "DucklingEntityExtractor"
if component_name not in registered_components:
try:
return rasa.shared.utils.common.class_from_module_path(component_name)
except (ImportError, AttributeError) as e:
# when component_name is a path to a class but that path is invalid or
# when component_name is a class name and not part of old_style_names
is_path = "." in component_name
if is_path:
module_name, _, class_name = component_name.rpartition(".")
if isinstance(e, ImportError):
exception_message = f"Failed to find module '{module_name}'."
else:
# when component_name is a path to a class but the path does
# not contain that class
exception_message = (
f"The class '{class_name}' could not be "
f"found in module '{module_name}'."
)
else:
exception_message = (
f"Cannot find class '{component_name}' in global namespace. "
f"Please check that there is no typo in the class "
f"name and that you have imported the class into the global "
f"namespace."
)
raise ComponentNotFoundException(
f"Failed to load the component "
f"'{component_name}'. "
f"{exception_message} Either your "
f"pipeline configuration contain an error "
f"or the module you are trying to import "
f"is broken (e.g. the module is trying "
f"to import a package that is not "
f"installed). {traceback.format_exc()}"
)
return registered_components[component_name]
|
def get_component_class(component_name: Text) -> Type["Component"]:
"""Resolve component name to a registered components class."""
if component_name == "DucklingHTTPExtractor":
rasa.shared.utils.io.raise_deprecation_warning(
"The component 'DucklingHTTPExtractor' has been renamed to "
"'DucklingEntityExtractor'. Update your pipeline to use "
"'DucklingEntityExtractor'.",
docs=DOCS_URL_COMPONENTS,
)
component_name = "DucklingEntityExtractor"
if component_name not in registered_components:
try:
return rasa.shared.utils.common.class_from_module_path(component_name)
except (ImportError, AttributeError) as e:
# when component_name is a path to a class but that path is invalid or
# when component_name is a class name and not part of old_style_names
is_path = "." in component_name
if is_path:
module_name, _, class_name = component_name.rpartition(".")
if isinstance(e, ImportError):
exception_message = f"Failed to find module '{module_name}'."
else:
# when component_name is a path to a class but the path does
# not contain that class
exception_message = (
f"The class '{class_name}' could not be "
f"found in module '{module_name}'."
)
else:
exception_message = (
f"Cannot find class '{component_name}' in global namespace. "
f"Please check that there is no typo in the class "
f"name and that you have imported the class into the global "
f"namespace."
)
raise ComponentNotFoundException(
f"Failed to load the component "
f"'{component_name}'. "
f"{exception_message} Either your "
f"pipeline configuration contains an error "
f"or the module you are trying to import "
f"is broken (e.g. the module is trying "
f"to import a package that is not "
f"installed). {traceback.format_exc()}"
)
return registered_components[component_name]
|
28,611 |
def plot_violin(
data,
var_names=None,
combine_dims=None,
filter_vars=None,
transform=None,
quartiles=True,
rug=False,
side="both",
hdi_prob=None,
shade=0.35,
bw="default",
circular=False,
sharex=True,
sharey=True,
grid=None,
figsize=None,
textsize=None,
labeller=None,
ax=None,
shade_kwargs=None,
rug_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""Plot posterior of traces as violin plot.
Notes
-----
If multiple chains are provided for a variable they will be combined
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
combine_dims : set_like of str, optional
List of dimensions to reduce. Defaults to reducing only the "chain" and "draw" dimensions.
See the :ref:`this section <common_combine_dims>` for usage examples.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e. the identity function).
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the ``hdi_prob`` * 100%
intervals. Defaults to ``True``.
rug: bool
If ``True`` adds a jittered rugplot. Defaults to ``False``.
side: string
If ``both``, both sides of the violin plot are rendered. If ``left`` or ``right``, only
the respective side is rendered. By separately plotting left and right halfs with
different data, split violin plots can be achieved. Defaults to ``both``.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density.
Defaults to 0.94.
shade: float
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to 0.
bw: float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental" when ``circular`` is ``False``
and "taylor" (for now) when ``circular`` is ``True``.
Defaults to "default" which means "experimental" when variable is not circular
and "taylor" when it is.
circular: bool, optional.
If ``True``, it interprets `values` is a circular variable measured in radians
and a circular KDE is used. Defaults to ``False``.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: int
Text size of the point_estimates, axis ticks, and highest density interval. If None it will
be autoscaled based on ``figsize``.
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
sharex: bool
Defaults to ``True``, violinplots share a common x-axis scale.
sharey: bool
Defaults to ``True``, violinplots share a common y-axis scale.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
shade_kwargs: dicts, optional
Additional keywords passed to :meth:`matplotlib.axes.Axes.fill_between`, or
:meth:`matplotlib.axes.Axes.barh` to control the shade.
rug_kwargs: dict
Keywords passed to the rug plot. If true only the right half side of the violin will be
plotted.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_forest: Forest plot to compare HDI intervals from a number of distributions.
Examples
--------
Show a default violin plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_violin(data)
"""
if labeller is None:
labeller = BaseLabeller()
data = convert_to_dataset(data, group="posterior")
if transform is not None:
data = transform(data)
var_names = _var_names(var_names, data, filter_vars)
plotters = filter_plotters_list(
list(xarray_var_iter(data, var_names=var_names, combined=True, skip_dims=combine_dims)),
"plot_violin",
)
rows, cols = default_grid(len(plotters), grid=grid)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
violinplot_kwargs = dict(
ax=ax,
plotters=plotters,
figsize=figsize,
rows=rows,
cols=cols,
sharex=sharex,
sharey=sharey,
shade_kwargs=shade_kwargs,
shade=shade,
rug=rug,
rug_kwargs=rug_kwargs,
side=side,
bw=bw,
textsize=textsize,
labeller=labeller,
circular=circular,
hdi_prob=hdi_prob,
quartiles=quartiles,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_violin", "violinplot", backend)
ax = plot(**violinplot_kwargs)
return ax
|
def plot_violin(
data,
var_names=None,
combine_dims=None,
filter_vars=None,
transform=None,
quartiles=True,
rug=False,
side="both",
hdi_prob=None,
shade=0.35,
bw="default",
circular=False,
sharex=True,
sharey=True,
grid=None,
figsize=None,
textsize=None,
labeller=None,
ax=None,
shade_kwargs=None,
rug_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""Plot posterior of traces as violin plot.
Notes
-----
If multiple chains are provided for a variable they will be combined
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
combine_dims : set_like of str, optional
List of dimensions to reduce. Defaults to reducing only the "chain" and "draw" dimensions.
See the :ref:`this section <common_combine_dims>` for usage examples.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e. the identity function).
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the ``hdi_prob`` * 100%
intervals. Defaults to ``True``.
rug: bool
If ``True`` adds a jittered rugplot. Defaults to ``False``.
side : str, default "both"
If ``both``, both sides of the violin plot are rendered. If ``left`` or ``right``, only
the respective side is rendered. By separately plotting left and right halfs with
different data, split violin plots can be achieved. Defaults to ``both``.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density.
Defaults to 0.94.
shade: float
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to 0.
bw: float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental" when ``circular`` is ``False``
and "taylor" (for now) when ``circular`` is ``True``.
Defaults to "default" which means "experimental" when variable is not circular
and "taylor" when it is.
circular: bool, optional.
If ``True``, it interprets `values` is a circular variable measured in radians
and a circular KDE is used. Defaults to ``False``.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: int
Text size of the point_estimates, axis ticks, and highest density interval. If None it will
be autoscaled based on ``figsize``.
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
sharex: bool
Defaults to ``True``, violinplots share a common x-axis scale.
sharey: bool
Defaults to ``True``, violinplots share a common y-axis scale.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
shade_kwargs: dicts, optional
Additional keywords passed to :meth:`matplotlib.axes.Axes.fill_between`, or
:meth:`matplotlib.axes.Axes.barh` to control the shade.
rug_kwargs: dict
Keywords passed to the rug plot. If true only the right half side of the violin will be
plotted.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_forest: Forest plot to compare HDI intervals from a number of distributions.
Examples
--------
Show a default violin plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_violin(data)
"""
if labeller is None:
labeller = BaseLabeller()
data = convert_to_dataset(data, group="posterior")
if transform is not None:
data = transform(data)
var_names = _var_names(var_names, data, filter_vars)
plotters = filter_plotters_list(
list(xarray_var_iter(data, var_names=var_names, combined=True, skip_dims=combine_dims)),
"plot_violin",
)
rows, cols = default_grid(len(plotters), grid=grid)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
violinplot_kwargs = dict(
ax=ax,
plotters=plotters,
figsize=figsize,
rows=rows,
cols=cols,
sharex=sharex,
sharey=sharey,
shade_kwargs=shade_kwargs,
shade=shade,
rug=rug,
rug_kwargs=rug_kwargs,
side=side,
bw=bw,
textsize=textsize,
labeller=labeller,
circular=circular,
hdi_prob=hdi_prob,
quartiles=quartiles,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_violin", "violinplot", backend)
ax = plot(**violinplot_kwargs)
return ax
|
15,746 |
def register_discovery_flow(
domain: str,
title: str,
discovery_function: DiscoveryFunctionType,
connection_class: str | UndefinedType = UNDEFINED,
) -> None:
"""Register flow for discovered integrations that not require auth."""
if connection_class is not UNDEFINED:
_LOGGER.warning(
(
"The %s (%s) integration is setting a connection_class"
" when calling the 'register_discovery_flow()' method in its"
" config flow. The connection class has been deprecated and will"
" be removed in a future release of Home Assistant."
" If '%s' is a custom integration, please contact the author"
" of that integration about this warning.",
),
title,
domain,
title,
)
class DiscoveryFlow(DiscoveryFlowHandler):
"""Discovery flow handler."""
def __init__(self) -> None:
super().__init__(domain, title, discovery_function)
config_entries.HANDLERS.register(domain)(DiscoveryFlow)
|
def register_discovery_flow(
domain: str,
title: str,
discovery_function: DiscoveryFunctionType,
connection_class: str | UndefinedType = UNDEFINED,
) -> None:
"""Register flow for discovered integrations that not require auth."""
if connection_class is not UNDEFINED:
_LOGGER.warning(
(
"The %s (%s) integration is setting a connection_class"
" when calling the 'register_discovery_flow()' method in its"
" config flow. The connection class has been deprecated and will"
" be removed in a future release of Home Assistant."
" If '%s' is a custom integration, please contact the author"
" of that integration about this warning.",
),
title,
domain,
domain,
)
class DiscoveryFlow(DiscoveryFlowHandler):
"""Discovery flow handler."""
def __init__(self) -> None:
super().__init__(domain, title, discovery_function)
config_entries.HANDLERS.register(domain)(DiscoveryFlow)
|
48,175 |
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
with open("dump.json") as file:
edges = json.load(file)
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
|
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
with open("dump.json") as in_file:
edges = json.load(in_file)
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
|
10,889 |
def install_pip():
print('')
print('Install pip')
print('')
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tmp = tempfile.mkdtemp()
try:
get_pip = os.path.join(tmp, 'get-pip.py')
with open(get_pip, 'wb') as f:
f.write(urlopen('https://bootstrap.pypa.io/get-pip.py').read())
if subprocess.call([sys.executable, get_pip]):
raise RuntimeError("pip failed.")
finally:
shutil.rmtree(tmp)
return_code = subprocess.call(
[sys.executable] + sys.argv + ['--no-clean']
)
sys.exit(return_code)
|
def install_pip():
print('')
print('Install pip')
print('')
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tmp = tempfile.mkdtemp(prefix='buildout-dev-')
try:
get_pip = os.path.join(tmp, 'get-pip.py')
with open(get_pip, 'wb') as f:
f.write(urlopen('https://bootstrap.pypa.io/get-pip.py').read())
if subprocess.call([sys.executable, get_pip]):
raise RuntimeError("pip failed.")
finally:
shutil.rmtree(tmp)
return_code = subprocess.call(
[sys.executable] + sys.argv + ['--no-clean']
)
sys.exit(return_code)
|
23,092 |
def _block_histogramdd(sample, bins, range=None, weights=None):
"""Call numpy.histogramdd for a blocked/chunked calculation.
Slurps the result into an additional outer axis via [np.newaxis].
This new axis will be used to stack chunked calls of the numpy
function and add them together later.
Returns
-------
:py:object:`np.ndarray`
NumPy array with an additional outer dimension.
"""
return np.histogramdd(sample, bins, range=range, weights=weights)[0][np.newaxis]
|
def _block_histogramdd(sample, bins, range=None, weights=None):
"""Call numpy.histogramdd for a blocked/chunked calculation.
Slurps the result into an additional outer axis via [np.newaxis].
This new axis will be used to stack chunked calls of the numpy
function and add them together later.
Returns
-------
:py:object:`np.ndarray`
NumPy array with an additional outer dimension.
"""
return np.histogramdd(sample, bins, range=range, weights=weights)[0:1]
|
42,517 |
def get_classifier_bb_nn(defences=None):
"""
Standard BlackBox Neural Network classifier for unit testing
:return: BlackBoxClassifierNeuralNetwork
"""
from art.estimators.classification.blackbox import BlackBoxClassifierNeuralNetwork
from art.utils import to_categorical
# define black-box classifier
def predict(x):
with open(
os.path.join(os.path.dirname(os.path.dirname(__file__)), "utils/data/mnist", "api_output.txt")
) as json_file:
predictions = json.load(json_file)
return to_categorical(predictions["values"][: len(x)], nb_classes=10)
bbc = BlackBoxClassifierNeuralNetwork(
predict, (28, 28, 1), 10, clip_values=(0, 255), preprocessing_defences=defences
)
return bbc
|
def get_classifier_bb_nn(defences=None):
"""
Standard BlackBox Neural Network classifier for unit testing.
:return: BlackBoxClassifierNeuralNetwork
"""
from art.estimators.classification.blackbox import BlackBoxClassifierNeuralNetwork
from art.utils import to_categorical
# define black-box classifier
def predict(x):
with open(
os.path.join(os.path.dirname(os.path.dirname(__file__)), "utils/data/mnist", "api_output.txt")
) as json_file:
predictions = json.load(json_file)
return to_categorical(predictions["values"][: len(x)], nb_classes=10)
bbc = BlackBoxClassifierNeuralNetwork(
predict, (28, 28, 1), 10, clip_values=(0, 255), preprocessing_defences=defences
)
return bbc
|
29,815 |
def add_subparser(subparsers):
validate_parser = subparsers.add_parser(
"validate",
description="Execute 'paasta validate' from service repo root",
help="Validate that all paasta config files in pwd are correct",
)
validate_parser.add_argument(
"-s",
"--service",
required=False,
help="Service that you want to validate. Like 'example_service'.",
).completer = lazy_choices_completer(list_services)
validate_parser.add_argument(
"-v",
"--verbose",
action="store_true",
required=False,
help="Display verbose output. This shows the next few cron runs scheduled.",
)
validate_parser.add_argument(
"-y",
"--yelpsoa-config-root",
dest="yelpsoa_config_root",
default=os.getcwd(),
required=False,
help="Path to root of yelpsoa-configs checkout",
)
validate_parser.set_defaults(command=paasta_validate)
|
def add_subparser(subparsers):
validate_parser = subparsers.add_parser(
"validate",
description="Execute 'paasta validate' from service repo root",
help="Validate that all paasta config files in pwd are correct",
)
validate_parser.add_argument(
"-s",
"--service",
required=False,
help="Service that you want to validate. Like 'example_service'.",
).completer = lazy_choices_completer(list_services)
validate_parser.add_argument(
"-v",
"--verbose",
action="store_true",
required=False,
help="Toggle to display additional validation messages for humans.",
)
validate_parser.add_argument(
"-y",
"--yelpsoa-config-root",
dest="yelpsoa_config_root",
default=os.getcwd(),
required=False,
help="Path to root of yelpsoa-configs checkout",
)
validate_parser.set_defaults(command=paasta_validate)
|
28,571 |
def _plot_atomic_elpd(
ax_,
xdata,
ydata,
model1,
model2,
threshold,
coord_labels,
xlabels,
xlabels_shown,
ylabels_shown,
plot_kwargs,
):
marker = _validate_bokeh_marker(plot_kwargs.get("marker"))
marker_func = getattr(mk, marker)
sizes = np.ones(len(xdata)) * plot_kwargs.get("s")
glyph = Scatter(x="xdata", y="ydata", size="sizes", line_color=plot_kwargs.get("color", "black"), marker='cross')
source = ColumnDataSource(dict(xdata=xdata, ydata=ydata, sizes=sizes))
ax_.add_glyph(source, glyph)
if threshold is not None:
diff_abs = np.abs(ydata - ydata.mean())
bool_ary = diff_abs > threshold * ydata.std()
if coord_labels is None:
coord_labels = xdata.astype(str)
outliers = np.argwhere(bool_ary).squeeze()
for outlier in outliers:
label = coord_labels[outlier]
ax_.text(
x=np.asarray(outlier),
y=np.asarray(ydata[outlier]),
text=label,
text_color="black",
)
if ylabels_shown:
ax_.yaxis.axis_label = "ELPD difference"
else:
ax_.yaxis.minor_tick_line_color = None
ax_.yaxis.major_label_text_font_size = "0pt"
if xlabels_shown:
if xlabels:
ax_.xaxis.ticker = np.arange(0, len(coord_labels))
ax_.xaxis.major_label_overrides = {
str(key): str(value)
for key, value in zip(np.arange(0, len(coord_labels)), list(coord_labels))
}
else:
ax_.xaxis.minor_tick_line_color = None
ax_.xaxis.major_label_text_font_size = "0pt"
title = Title()
title.text = "{} - {}".format(model1, model2)
ax_.title = title
|
def _plot_atomic_elpd(
ax_,
xdata,
ydata,
model1,
model2,
threshold,
coord_labels,
xlabels,
xlabels_shown,
ylabels_shown,
plot_kwargs,
):
marker = _validate_bokeh_marker(plot_kwargs.get("marker"))
marker_func = getattr(mk, marker)
sizes = np.ones(len(xdata)) * plot_kwargs.get("s")
glyph = Scatter(x="xdata", y="ydata", size="sizes", line_color=plot_kwargs.get("color", "black"), marker=marker)
source = ColumnDataSource(dict(xdata=xdata, ydata=ydata, sizes=sizes))
ax_.add_glyph(source, glyph)
if threshold is not None:
diff_abs = np.abs(ydata - ydata.mean())
bool_ary = diff_abs > threshold * ydata.std()
if coord_labels is None:
coord_labels = xdata.astype(str)
outliers = np.argwhere(bool_ary).squeeze()
for outlier in outliers:
label = coord_labels[outlier]
ax_.text(
x=np.asarray(outlier),
y=np.asarray(ydata[outlier]),
text=label,
text_color="black",
)
if ylabels_shown:
ax_.yaxis.axis_label = "ELPD difference"
else:
ax_.yaxis.minor_tick_line_color = None
ax_.yaxis.major_label_text_font_size = "0pt"
if xlabels_shown:
if xlabels:
ax_.xaxis.ticker = np.arange(0, len(coord_labels))
ax_.xaxis.major_label_overrides = {
str(key): str(value)
for key, value in zip(np.arange(0, len(coord_labels)), list(coord_labels))
}
else:
ax_.xaxis.minor_tick_line_color = None
ax_.xaxis.major_label_text_font_size = "0pt"
title = Title()
title.text = "{} - {}".format(model1, model2)
ax_.title = title
|
7,129 |
def test_guess_spatial_dimensions():
im1 = np.zeros((5, 5))
im2 = np.zeros((5, 5, 5))
im3 = np.zeros((5, 5, 3))
im4 = np.zeros((5, 5, 5, 3))
im5 = np.zeros((5,))
testing.assert_equal(guess_spatial_dimensions(im1), 2)
testing.assert_equal(guess_spatial_dimensions(im2), 3)
testing.assert_equal(guess_spatial_dimensions(im3), None)
testing.assert_equal(guess_spatial_dimensions(im4), 3)
with testing.raises(ValueError):
guess_spatial_dimensions(im5)
|
def test_guess_spatial_dimensions():
im1 = np.zeros((5, 5))
im2 = np.zeros((5, 5, 5))
im3 = np.zeros((5, 5, 3))
im4 = np.zeros((5, 5, 5, 3))
im5 = np.zeros((5,))
testing.assert_equal(_guess_spatial_dimensions(im1), 2)
testing.assert_equal(guess_spatial_dimensions(im2), 3)
testing.assert_equal(guess_spatial_dimensions(im3), None)
testing.assert_equal(guess_spatial_dimensions(im4), 3)
with testing.raises(ValueError):
guess_spatial_dimensions(im5)
|
50,704 |
def entity_to_json(config, entity, state):
"""Convert an entity to its Hue bridge JSON representation."""
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (entity_features & SUPPORT_BRIGHTNESS) or entity.domain in [climate.DOMAIN, fan.DOMAIN, media_player.DOMAIN, cover.DOMAIN]:
return {
"state": {
HUE_API_STATE_ON: state[STATE_ON],
HUE_API_STATE_BRI: state[STATE_BRIGHTNESS],
HUE_API_STATE_HUE: state[STATE_HUE],
HUE_API_STATE_SAT: state[STATE_SATURATION],
"reachable": True,
},
"type": "Dimmable light",
"name": config.get_entity_name(entity),
"modelid": "HASS123",
"uniqueid": entity.entity_id,
"swversion": "123",
}
return {
"state": {HUE_API_STATE_ON: state[STATE_ON], "reachable": True},
"type": "On/off light",
"name": config.get_entity_name(entity),
"modelid": "HASS321",
"uniqueid": entity.entity_id,
"swversion": "123",
}
|
def entity_to_json(config, entity, state):
"""Convert an entity to its Hue bridge JSON representation."""
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (entity_features & SUPPORT_BRIGHTNESS) or entity.domain in [
climate.DOMAIN,
fan.DOMAIN,
media_player.DOMAIN,
cover.DOMAIN,
]:
return {
"state": {
HUE_API_STATE_ON: state[STATE_ON],
HUE_API_STATE_BRI: state[STATE_BRIGHTNESS],
HUE_API_STATE_HUE: state[STATE_HUE],
HUE_API_STATE_SAT: state[STATE_SATURATION],
"reachable": True,
},
"type": "Dimmable light",
"name": config.get_entity_name(entity),
"modelid": "HASS123",
"uniqueid": entity.entity_id,
"swversion": "123",
}
return {
"state": {HUE_API_STATE_ON: state[STATE_ON], "reachable": True},
"type": "On/off light",
"name": config.get_entity_name(entity),
"modelid": "HASS321",
"uniqueid": entity.entity_id,
"swversion": "123",
}
|
22,333 |
def drop_index(index, table, column_name=None, metadata=None):
"""
:param index: Index to drop
:type index: :class:`Index` or str
:param table: Table to drop the index from
:type table: :class:`Table` or str
:param metadata: Needed only if ``table`` is a table name
:type metadata: :class:`Metadata`
"""
try:
if not isinstance(index, Index):
if not isinstance(table, Table):
assert metadata is not None
table = Table(table, metadata, autoload=True)
index = truncate_index_name(index, table.metadata.bind)
if index in [ix.name for ix in table.indexes]:
index = Index(index, table.c[column_name])
else:
log.debug("Index '%s' in table '%s' does not exist.", index, table)
return
index.drop()
except Exception:
log.exception("Dropping index '%s' from table '%s' failed", index, table)
|
def drop_index(index, table, column_name=None, metadata=None):
"""
:param index: Index to drop
:type index: :class:`Index` or str
:param table: Table to drop the index from
:type table: :class:`Table` or str
:param metadata: Needed only if ``table`` is a table name
:type metadata: :class:`Metadata`
"""
try:
if not isinstance(index, Index):
if not isinstance(table, Table):
assert metadata is not None
table = Table(table, metadata, autoload=True)
index_name = truncate_index_name(index, table.metadata.bind)
if index in [ix.name for ix in table.indexes]:
index = Index(index, table.c[column_name])
else:
log.debug("Index '%s' in table '%s' does not exist.", index, table)
return
index.drop()
except Exception:
log.exception("Dropping index '%s' from table '%s' failed", index, table)
|
38,295 |
def _read_fluid_file_descriptor(fname):
"""
Read a file descriptor and returns the array of the fields found.
"""
# Mapping
mapping = [
('density', 'Density'),
('velocity_x', 'x-velocity'),
('velocity_y', 'y-velocity'),
('velocity_z', 'z-velocity'),
('pressure', 'Pressure'),
('metallicity', 'Metallicity'),
]
#Magnetic field file descriptors
magnetic=np.array([['B_{0}_{1}'.format(dim,side) for side in ['left','right']] for dim in ['x','y','z']]).ravel()
# Convert in dictionary
mapping = {k: v for k, v in mapping}
with open(fname, 'r') as f:
line = f.readline()
tmp = VERSION_RE.match(line)
mylog.debug('Reading fluid file descriptor %s.' % fname)
if not tmp:
return []
version = int(tmp.group(1))
if version == 1:
# Skip one line (containing the headers)
line = f.readline()
fields = []
for i, line in enumerate(f.readlines()):
tmp = VAR_DESC_RE.match(line)
if not tmp:
raise YTFileNotParseable(fname, i+1)
# ivar = tmp.group(1)
varname = tmp.group(2)
dtype = tmp.group(3)
if varname in mapping:
varname = mapping[varname]
elif varname in magnetic:
varname = varname
else:
varname = 'hydro_%s' % varname
fields.append((varname, dtype))
else:
mylog.error('Version %s', version)
raise YTParticleOutputFormatNotImplemented()
return fields
|
def _read_fluid_file_descriptor(fname):
"""
Read a file descriptor and returns the array of the fields found.
"""
# Mapping
mapping = [
('density', 'Density'),
('velocity_x', 'x-velocity'),
('velocity_y', 'y-velocity'),
('velocity_z', 'z-velocity'),
('pressure', 'Pressure'),
('metallicity', 'Metallicity'),
]
#Magnetic field file descriptors
magnetic=np.array([['B_{0}_{1}'.format(dim,side) for side in ['left','right']] for dim in ['x','y','z']]).ravel()
# Convert to dictionary
mapping = {k: v for k, v in mapping}
with open(fname, 'r') as f:
line = f.readline()
tmp = VERSION_RE.match(line)
mylog.debug('Reading fluid file descriptor %s.' % fname)
if not tmp:
return []
version = int(tmp.group(1))
if version == 1:
# Skip one line (containing the headers)
line = f.readline()
fields = []
for i, line in enumerate(f.readlines()):
tmp = VAR_DESC_RE.match(line)
if not tmp:
raise YTFileNotParseable(fname, i+1)
# ivar = tmp.group(1)
varname = tmp.group(2)
dtype = tmp.group(3)
if varname in mapping:
varname = mapping[varname]
elif varname in magnetic:
varname = varname
else:
varname = 'hydro_%s' % varname
fields.append((varname, dtype))
else:
mylog.error('Version %s', version)
raise YTParticleOutputFormatNotImplemented()
return fields
|
41,555 |
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=True, type=str,
help="""Input log directory. If using --multiple, this parameter indicates
the suffix path of all log directories of interest. To compare
trainings or set of trainings (using ``--multiple``) with subplots,
please list the paths by separating them with commas, eg
path_log_dir1,path_logdir2.""",
metavar=Metavar.str)
parser.add_argument("--multiple", required=False, dest="multiple", action='store_true',
help="Multiple log directories are considered: all available folders with -i as "
"prefix. The plot represents the mean value (hard line) surrounded by the standard "
"deviation envelope.")
parser.add_argument("-y", "--ylim_loss", required=False, type=str,
help="""Indicates the limits on the y-axis for the loss plots, otherwise
these limits are automatically defined. Please separate the lower
and the upper limit by a comma, eg -1,0. Note: for the validation
metrics: the y-limits are always 0.0 and 1.0.""",
metavar=Metavar.str)
parser.add_argument("-o", "--output", required=True, type=str,
help="Output folder.", metavar=Metavar.file)
return parser
|
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=True, type=str,
help="""Input log directory. If using --multiple, this parameter indicates
the suffix path of all log directories of interest. To compare
trainings or set of trainings (using ``--multiple``) with subplots,
please list the paths by separating them with commas, eg
path_log_dir1,path_logdir2.""",
metavar=Metavar.str)
parser.add_argument("--multiple", required=False, dest="multiple", action='store_true',
help="Multiple log directories are considered: all available folders with -i as "
"prefix. The plot represents the mean value (hard line) surrounded by the standard "
"deviation envelope.")
parser.add_argument("-y", "--ylim_loss", required=False, type=str,
help="""Indicates the limits on the y-axis for the loss plots, otherwise
these limits are automatically defined. Please separate the lower
and the upper limit by a comma, eg -1,0. Note: for the validation
metrics: the y-limits are always 0.0 and 1.0.""",
metavar=Metavar.float)
parser.add_argument("-o", "--output", required=True, type=str,
help="Output folder.", metavar=Metavar.file)
return parser
|
31,661 |
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abnormal-security-manage-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
29,607 |
def SSHCluster(
hosts: List[str] = None,
connect_options: Union[List[dict], dict] = {},
worker_options: dict = {},
scheduler_options: dict = {},
worker_module: str = "distributed.cli.dask_worker",
remote_python: str = None,
**kwargs
):
""" Deploy a Dask cluster using SSH
The SSHCluster function deploys a Dask Scheduler and Workers for you on a
set of machine addresses that you provide. The first address will be used
for the scheduler while the rest will be used for the workers (feel free to
repeat the first hostname if you want to have the scheduler and worker
co-habitate one machine.)
You may configure the scheduler and workers by passing
``scheduler_options`` and ``worker_options`` dictionary keywords. See the
``dask.distributed.Scheduler`` and ``dask.distributed.Worker`` classes for
details on the available options, but the defaults should work in most
situations.
You may configure your use of SSH itself using the ``connect_options``
keyword, which passes values to the ``asyncssh.connect`` function. For
more information on these see the documentation for the ``asyncssh``
library https://asyncssh.readthedocs.io .
Parameters
----------
hosts: List[str]
List of hostnames or addresses on which to launch our cluster.
The first will be used for the scheduler and the rest for workers.
connect_options: dict or list of dict, optional
Keywords to pass through to :func:asyncssh.connect`.
This could include things such as ``port``, ``username``, ``password``
or ``known_hosts``. See docs for :func:`asyncssh.connect` and
:class:`asyncssh.SSHClientConnectionOptions` for full information.
If a list it must have the same length as ``hosts``.
worker_options: dict, optional
Keywords to pass on to workers.
scheduler_options: dict, optional
Keywords to pass on to scheduler.
worker_module: str, optional
Python module to call to start the worker.
remote_python: str, optional
Path to Python on remote nodes.
Examples
--------
The most relevant example is with a remote cluser you have SSH
access to as user ``foo``. Best practice is to generate a key-pair
following the `SSH keygen tutorial`_:
.. code:: bash
$ # Generate a key pair
$ ssh-keygen -t rsa -b 4096 -f ~/.ssh/dask-ssh -P ""
$ # Copy to remote machine
$ ssh-copy-id -i ~/.ssh/dask-ssh user@machine
Now it's possible to login to ``machine`` without entering a
password via ``ssh -i ~/.ssh-dask-ssh user@machine``. Let's
create an ``SSHCluster``:
>>> from dask.distributed import Client, SSHCluster
>>> cluster = SSHCluster(
... ["machine1", "machine1"],
... scheduler_options={"port": 0, "dashboard_address": ":8797"},
... connect_options={...})
>>> client = Client(cluster)
An example using a different worker module, in particular the
``dask-cuda-worker`` command from the ``dask-cuda`` project.
>>> from dask.distributed import Client, SSHCluster
>>> cluster = SSHCluster(
... ["localhost", "hostwithgpus", "anothergpuhost"],
... connect_options={"known_hosts": None},
... scheduler_options={"port": 0, "dashboard_address": ":8797"},
... worker_module='dask_cuda.dask_cuda_worker')
>>> client = Client(cluster)
See Also
--------
dask.distributed.Scheduler
dask.distributed.Worker
asyncssh.connect
.. _SSH keygen tutorial: https://www.ssh.com/ssh/keygen/
"""
if set(kwargs) & old_cluster_kwargs:
from .old_ssh import SSHCluster as OldSSHCluster
warnings.warn(
"Note that the SSHCluster API has been replaced. "
"We're routing you to the older implementation. "
"This will be removed in the future"
)
kwargs.setdefault("worker_addrs", hosts)
return OldSSHCluster(**kwargs)
if isinstance(connect_options, list) and len(connect_options) != len(hosts):
raise RuntimeError(
"When specifying a list of connect_options you must provide a "
"dictionary for each address."
)
scheduler = {
"cls": Scheduler,
"options": {
"address": hosts[0],
"connect_options": connect_options
if isinstance(connect_options, dict)
else connect_options[0],
"kwargs": scheduler_options,
"remote_python": remote_python,
},
}
workers = {
i: {
"cls": Worker,
"options": {
"address": host,
"connect_options": connect_options
if isinstance(connect_options, dict)
else connect_options[i + 1],
"kwargs": worker_options,
"worker_module": worker_module,
"remote_python": remote_python,
},
}
for i, host in enumerate(hosts[1:])
}
return SpecCluster(workers, scheduler, name="SSHCluster", **kwargs)
|
def SSHCluster(
hosts: List[str] = None,
connect_options: Union[List[dict], dict] = {},
worker_options: dict = {},
scheduler_options: dict = {},
worker_module: str = "distributed.cli.dask_worker",
remote_python: str = None,
**kwargs
):
""" Deploy a Dask cluster using SSH
The SSHCluster function deploys a Dask Scheduler and Workers for you on a
set of machine addresses that you provide. The first address will be used
for the scheduler while the rest will be used for the workers (feel free to
repeat the first hostname if you want to have the scheduler and worker
co-habitate one machine.)
You may configure the scheduler and workers by passing
``scheduler_options`` and ``worker_options`` dictionary keywords. See the
``dask.distributed.Scheduler`` and ``dask.distributed.Worker`` classes for
details on the available options, but the defaults should work in most
situations.
You may configure your use of SSH itself using the ``connect_options``
keyword, which passes values to the ``asyncssh.connect`` function. For
more information on these see the documentation for the ``asyncssh``
library https://asyncssh.readthedocs.io .
Parameters
----------
hosts: List[str]
List of hostnames or addresses on which to launch our cluster.
The first will be used for the scheduler and the rest for workers.
connect_options: dict or list of dict, optional
Keywords to pass through to :func:asyncssh.connect`.
This could include things such as ``port``, ``username``, ``password``
or ``known_hosts``. See docs for :func:`asyncssh.connect` and
:class:`asyncssh.SSHClientConnectionOptions` for full information.
If a list it must have the same length as ``hosts``.
worker_options: dict, optional
Keywords to pass on to workers.
scheduler_options: dict, optional
Keywords to pass on to scheduler.
worker_module: str, optional
Python module to call to start the worker.
remote_python: str, optional
Path to Python on remote nodes.
Examples
--------
The most relevant example is with a remote cluser you have SSH
access to as user ``foo``. Best practice is to generate a key-pair
following the `SSH keygen tutorial`_:
.. code:: bash
$ # Generate a key pair
$ ssh-keygen -t rsa -b 4096 -f ~/.ssh/dask-ssh -P ""
$ # Copy to remote machine
$ ssh-copy-id -i ~/.ssh/dask-ssh user@machine
Now it's possible to login to ``machine`` without entering a
password via ``ssh -i ~/.ssh-dask-ssh user@machine``. Let's
create an ``SSHCluster``:
>>> from dask.distributed import Client, SSHCluster
>>> cluster = SSHCluster(
... ["machine1", "machine1"],
... scheduler_options={"port": 0, "dashboard_address": ":8797"},
... connect_options={"username": "user", "client_keys": "~/.ssh/dask-ssh"})
>>> client = Client(cluster)
An example using a different worker module, in particular the
``dask-cuda-worker`` command from the ``dask-cuda`` project.
>>> from dask.distributed import Client, SSHCluster
>>> cluster = SSHCluster(
... ["localhost", "hostwithgpus", "anothergpuhost"],
... connect_options={"known_hosts": None},
... scheduler_options={"port": 0, "dashboard_address": ":8797"},
... worker_module='dask_cuda.dask_cuda_worker')
>>> client = Client(cluster)
See Also
--------
dask.distributed.Scheduler
dask.distributed.Worker
asyncssh.connect
.. _SSH keygen tutorial: https://www.ssh.com/ssh/keygen/
"""
if set(kwargs) & old_cluster_kwargs:
from .old_ssh import SSHCluster as OldSSHCluster
warnings.warn(
"Note that the SSHCluster API has been replaced. "
"We're routing you to the older implementation. "
"This will be removed in the future"
)
kwargs.setdefault("worker_addrs", hosts)
return OldSSHCluster(**kwargs)
if isinstance(connect_options, list) and len(connect_options) != len(hosts):
raise RuntimeError(
"When specifying a list of connect_options you must provide a "
"dictionary for each address."
)
scheduler = {
"cls": Scheduler,
"options": {
"address": hosts[0],
"connect_options": connect_options
if isinstance(connect_options, dict)
else connect_options[0],
"kwargs": scheduler_options,
"remote_python": remote_python,
},
}
workers = {
i: {
"cls": Worker,
"options": {
"address": host,
"connect_options": connect_options
if isinstance(connect_options, dict)
else connect_options[i + 1],
"kwargs": worker_options,
"worker_module": worker_module,
"remote_python": remote_python,
},
}
for i, host in enumerate(hosts[1:])
}
return SpecCluster(workers, scheduler, name="SSHCluster", **kwargs)
|
2,059 |
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, ndarray, sparse} or None
Object to be converted to an indexable iterable.
"""
if sp.issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable)
|
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, ndarray, sparse matrix} or None
Object to be converted to an indexable iterable.
"""
if sp.issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable)
|
29,305 |
def run_beam_job(
job_name: Optional[str] = None,
job_class: Optional[Type[base_jobs.JobBase]] = None
) -> beam_job_domain.BeamJobRun:
"""Starts a new Apache Beam job and returns metadata about its execution.
Args:
job_name: str. The name of the job to run. If not provided, then
job_class must not be None.
job_class: type(JobBase). A subclass of JobBase to begin running. This
value takes precedence over job_name.
Returns:
BeamJobRun. Metadata about the run's execution.
Raises:
ValueError. If both job_name and job_class is not specified.
"""
if job_class is None and job_name is None:
raise ValueError('Must specify the job class or name to run')
if job_class is None:
# MyPy is wrong. We know job_name is not None in this branch because if
# it were, the ValueError above would have been raised.
job_class = jobs_registry.get_job_class_by_name(job_name) # type: ignore[arg-type]
run_synchronously = constants.EMULATOR_MODE
run_model = jobs_manager.run_job(job_class, run_synchronously)
return get_beam_job_run_from_model(run_model)
|
def run_beam_job(
job_name: Optional[str] = None,
job_class: Optional[Type[base_jobs.JobBase]] = None
) -> beam_job_domain.BeamJobRun:
"""Starts a new Apache Beam job and returns metadata about its execution.
Args:
job_name: str. The name of the job to run. If not provided, then
job_class must not be None.
job_class: type(JobBase). A subclass of JobBase to begin running. This
value takes precedence over job_name.
Returns:
BeamJobRun. Metadata about the run's execution.
Raises:
ValueError. If both job_name and job_class are not specified.
"""
if job_class is None and job_name is None:
raise ValueError('Must specify the job class or name to run')
if job_class is None:
# MyPy is wrong. We know job_name is not None in this branch because if
# it were, the ValueError above would have been raised.
job_class = jobs_registry.get_job_class_by_name(job_name) # type: ignore[arg-type]
run_synchronously = constants.EMULATOR_MODE
run_model = jobs_manager.run_job(job_class, run_synchronously)
return get_beam_job_run_from_model(run_model)
|
18,874 |
def ci_rebuild(args):
"""Check a single spec against the remote mirror, and rebuild it from
source if the mirror does not contain the full hash match of the spec
as computed locally. """
env = ev.get_env(args, 'ci rebuild', required=True)
# Make sure the environment is "gitlab-enabled", or else there's nothing
# to do.
yaml_root = ev.config_dict(env.yaml)
gitlab_ci = None
if 'gitlab-ci' in yaml_root:
gitlab_ci = yaml_root['gitlab-ci']
if not gitlab_ci:
tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')
# Grab the environment variables we need. These either come from the
# pipeline generation step ("spack ci generate"), where they were written
# out as variables, or else provided by GitLab itself.
pipeline_artifacts_dir = get_env_var('SPACK_ARTIFACTS_ROOT')
job_log_dir = get_env_var('SPACK_JOB_LOG_DIR')
repro_dir = get_env_var('SPACK_JOB_REPRO_DIR')
local_mirror_dir = get_env_var('SPACK_LOCAL_MIRROR_DIR')
concrete_env_dir = get_env_var('SPACK_CONCRETE_ENV_DIR')
ci_pipeline_id = get_env_var('CI_PIPELINE_ID')
ci_job_name = get_env_var('CI_JOB_NAME')
signing_key = get_env_var('SPACK_SIGNING_KEY')
root_spec = get_env_var('SPACK_ROOT_SPEC')
job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
compiler_action = get_env_var('SPACK_COMPILER_ACTION')
cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
pr_env_var = get_env_var('SPACK_IS_PR_PIPELINE')
dev_env_var = get_env_var('SPACK_IS_DEVELOP_PIPELINE')
pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')
# Debug print some of the key environment variables we should have received
tty.debug('pipeline_artifacts_dir = {0}'.format(pipeline_artifacts_dir))
tty.debug('root_spec = {0}'.format(root_spec))
tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
tty.debug('compiler_action = {0}'.format(compiler_action))
# Query the environment manifest to find out whether we're reporting to a
# CDash instance, and if so, gather some information from the manifest to
# support that task.
enable_cdash = False
if 'cdash' in yaml_root:
enable_cdash = True
ci_cdash = yaml_root['cdash']
job_spec_buildgroup = ci_cdash['build-group']
cdash_base_url = ci_cdash['url']
cdash_project = ci_cdash['project']
proj_enc = urlencode({'project': cdash_project})
eq_idx = proj_enc.find('=') + 1
cdash_project_enc = proj_enc[eq_idx:]
cdash_site = ci_cdash['site']
cdash_id_path = os.path.join(repro_dir, 'cdash_id.txt')
tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
tty.debug('cdash_project = {0}'.format(cdash_project))
tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
tty.debug('cdash_site = {0}'.format(cdash_site))
tty.debug('related_builds = {0}'.format(related_builds))
tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))
# Is this a pipeline run on a spack PR or a merge to develop? It might
# be neither, e.g. a pipeline run on some environment repository.
spack_is_pr_pipeline = True if pr_env_var == 'True' else False
spack_is_develop_pipeline = True if dev_env_var == 'True' else False
# Figure out what is our temporary storage mirror: Is it artifacts
# buildcache? Or temporary-storage-url-prefix? In some cases we need to
# force something or pipelines might not have a way to propagate build
# artifacts from upstream to downstream jobs.
pipeline_mirror_url = None
temp_storage_url_prefix = None
if 'temporary-storage-url-prefix' in gitlab_ci:
temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']
pipeline_mirror_url = url_util.join(
temp_storage_url_prefix, ci_pipeline_id)
enable_artifacts_mirror = False
if 'enable-artifacts-buildcache' in gitlab_ci:
enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
if (enable_artifacts_mirror or (spack_is_pr_pipeline and
not enable_artifacts_mirror and not temp_storage_url_prefix)):
# If you explicitly enabled the artifacts buildcache feature, or
# if this is a PR pipeline but you did not enable either of the
# per-pipeline temporary storage features, we force the use of
# artifacts buildcache. Otherwise jobs will not have binary
# dependencies from previous stages available since we do not
# allow pushing binaries to the remote mirror during PR pipelines.
enable_artifacts_mirror = True
pipeline_mirror_url = 'file://' + local_mirror_dir
mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
pipeline_mirror_url)
tty.debug(mirror_msg)
# Whatever form of root_spec we got, use it to get a map giving us concrete
# specs for this job and all of its dependencies.
spec_map = spack_ci.get_concrete_specs(
env, root_spec, job_spec_pkg_name, related_builds, compiler_action)
job_spec = spec_map[job_spec_pkg_name]
job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)
# To provide logs, cdash reports, etc for developer download/perusal,
# these things have to be put into artifacts. This means downstream
# jobs that "need" this job will get those artifacts too. So here we
# need to clean out the artifacts we may have got from upstream jobs.
cdash_report_dir = os.path.join(pipeline_artifacts_dir, 'cdash_report')
if os.path.exists(cdash_report_dir):
shutil.rmtree(cdash_report_dir)
if os.path.exists(job_log_dir):
shutil.rmtree(job_log_dir)
if os.path.exists(repro_dir):
shutil.rmtree(repro_dir)
# Now that we removed them if they existed, create the directories we
# need for storing artifacts. The cdash_report directory will be
# created internally if needed.
os.makedirs(job_log_dir)
os.makedirs(repro_dir)
# Copy the concrete environment files to the repro directory so we can
# expose them as artifacts and not conflict with the concrete environment
# files we got as artifacts from the upstream pipeline generation job.
# Try to cast a slightly wider net too, and hopefully get the generated
# pipeline yaml. If we miss it, the user will still be able to go to the
# pipeline generation job and get it from there.
target_dirs = [
concrete_env_dir,
pipeline_artifacts_dir
]
for dir_to_list in target_dirs:
for file_name in os.listdir(dir_to_list):
src_file = os.path.join(dir_to_list, file_name)
if os.path.isfile(src_file):
dst_file = os.path.join(repro_dir, file_name)
shutil.copyfile(src_file, dst_file)
# If signing key was provided via "SPACK_SIGNING_KEY", then try to
# import it.
if signing_key:
spack_ci.import_signing_key(signing_key)
# Depending on the specifics of this job, we might need to turn on the
# "config:install_missing compilers" option (to build this job spec
# with a bootstrapped compiler), or possibly run "spack compiler find"
# (to build a bootstrap compiler or one of its deps in a
# compiler-agnostic way), or maybe do nothing at all (to build a spec
# using a compiler already installed on the target system).
spack_ci.configure_compilers(compiler_action)
# Write this job's spec yaml into the reproduction directory, and it will
# also be used in the generated "spack install" command to install the spec
tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
with open(job_spec_yaml_path, 'w') as fd:
fd.write(job_spec.to_yaml(hash=ht.build_hash))
# Write the concrete root spec yaml into the reproduction directory
root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
with open(root_spec_yaml_path, 'w') as fd:
fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))
# Write some other details to aid in reproduction into an artifact
repro_file = os.path.join(repro_dir, 'repro.json')
repro_details = {
'job_name': ci_job_name,
'job_spec_yaml': job_spec_yaml_file,
'root_spec_yaml': 'root.yaml'
}
with open(repro_file, 'w') as fd:
fd.write(json.dumps(repro_details))
# Write information about spack into an artifact in the repro dir
spack_info = spack_ci.get_spack_info()
spack_info_file = os.path.join(repro_dir, 'spack_info.txt')
with open(spack_info_file, 'w') as fd:
fd.write('\n{0}\n'.format(spack_info))
# If we decided there should be a temporary storage mechanism, add that
# mirror now so it's used when we check for a full hash match already
# built for this spec.
if pipeline_mirror_url:
spack_ci.add_mirror(
spack_ci.TEMP_STORAGE_MIRROR_NAME, pipeline_mirror_url)
cdash_build_id = None
cdash_build_stamp = None
# Check configured mirrors for a built spec with a matching full hash
matches = bindist.get_mirrors_for_spec(
job_spec, full_hash_match=True, index_only=False)
if matches:
# Got at full hash match on at least one configured mirror. All
# matches represent the fully up-to-date spec, so should all be
# equivalent. If artifacts mirror is enabled, we just pick one
# of the matches and download the buildcache files from there to
# the artifacts, so they're available to be used by dependent
# jobs in subsequent stages.
tty.msg('No need to rebuild {0}, found full hash match at: '.format(
job_spec_pkg_name))
for match in matches:
tty.msg(' {0}'.format(match['mirror_url']))
if enable_artifacts_mirror:
matching_mirror = matches[0]['mirror_url']
build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')
tty.debug('Getting {0} buildcache from {1}'.format(
job_spec_pkg_name, matching_mirror))
tty.debug('Downloading to {0}'.format(build_cache_dir))
buildcache.download_buildcache_files(
job_spec, build_cache_dir, True, matching_mirror)
# Now we are done and successful
sys.exit(0)
# No full hash match anywhere means we need to rebuild spec
# Start with spack arguments
install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]
install_args.extend([
'install',
'--keep-stage',
'--require-full-hash-match',
])
can_verify = spack_ci.can_verify_binaries()
verify_binaries = can_verify and spack_is_pr_pipeline is False
if not verify_binaries:
install_args.append('--no-check-signature')
# If CDash reporting is enabled, we first register this build with
# the specified CDash instance, then relate the build to those of
# its dependencies.
if enable_cdash:
tty.debug('CDash: Registering build')
(cdash_build_id,
cdash_build_stamp) = spack_ci.register_cdash_build(
cdash_build_name, cdash_base_url, cdash_project,
cdash_site, job_spec_buildgroup)
cdash_upload_url = '{0}/submit.php?project={1}'.format(
cdash_base_url, cdash_project_enc)
install_args.extend([
'--cdash-upload-url', cdash_upload_url,
'--cdash-build', cdash_build_name,
'--cdash-site', cdash_site,
'--cdash-buildstamp', cdash_build_stamp,
])
tty.debug('CDash: Relating build with dependency builds')
spack_ci.relate_cdash_builds(
spec_map, cdash_base_url, cdash_build_id, cdash_project,
[pipeline_mirror_url, pr_mirror_url, remote_mirror_url])
# store the cdash build id on disk for later
with open(cdash_id_path, 'w') as fd:
fd.write(cdash_build_id)
# A compiler action of 'FIND_ANY' means we are building a bootstrap
# compiler or one of its deps.
# TODO: when compilers are dependencies, we should include --no-add
if compiler_action != 'FIND_ANY':
install_args.append('--no-add')
# TODO: once we have the concrete spec registry, use the DAG hash
# to identify the spec to install, rather than the concrete spec
# yaml file.
install_args.extend(['-f', job_spec_yaml_path])
tty.debug('Installing {0} from source'.format(job_spec.name))
tty.debug('spack install arguments: {0}'.format(
install_args))
# Write the install command to a shell script
with open('install.sh', 'w') as fd:
fd.write('#!/bin/bash\n\n')
fd.write('\n# spack install command\n')
fd.write('spack ')
fd.write(' '.join(['"{0}"'.format(i) for i in install_args]))
fd.write('\n')
st = os.stat('install.sh')
os.chmod('install.sh', st.st_mode | stat.S_IEXEC)
install_copy_path = os.path.join(repro_dir, 'install.sh')
shutil.copyfile('install.sh', install_copy_path)
# Run the generated install.sh shell script as if it were being run in
# a login shell.
try:
install_process = subprocess.Popen(['bash', '-l', './install.sh'])
install_process.wait()
install_exit_code = install_process.returncode
except (ValueError, subprocess.CalledProcessError, OSError) as inst:
tty.error('Encountered error running install script')
tty.error(inst)
# Now do the post-install tasks
tty.debug('spack install exited {0}'.format(install_exit_code))
# If a spec fails to build in a spack develop pipeline, we add it to a
# list of known broken full hashes. This allows spack PR pipelines to
# avoid wasting compute cycles attempting to build those hashes.
if install_exit_code != 0 and spack_is_develop_pipeline:
if 'broken-specs-url' in gitlab_ci:
broken_specs_url = gitlab_ci['broken-specs-url']
dev_fail_hash = job_spec.full_hash()
broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
tmpdir = tempfile.mkdtemp()
empty_file_path = os.path.join(tmpdir, 'empty.txt')
try:
with open(empty_file_path, 'w') as efd:
efd.write('')
web_util.push_to_url(
empty_file_path,
broken_spec_path,
keep_original=False,
extra_args={'ContentType': 'text/plain'})
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = 'Error writing to broken specs list {0}: {1}'.format(
broken_spec_path, err)
tty.warn(msg)
finally:
shutil.rmtree(tmpdir)
# We generated the "spack install ..." command to "--keep-stage", copy
# any logs from the staging directory to artifacts now
spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)
# Create buildcache on remote mirror, either on pr-specific mirror or
# on the main mirror defined in the gitlab-enabled spack environment
if spack_is_pr_pipeline:
buildcache_mirror_url = pr_mirror_url
else:
buildcache_mirror_url = remote_mirror_url
# If the install succeeded, create a buildcache entry for this job spec
# and push it to one or more mirrors. If the install did not succeed,
# print out some instructions on how to reproduce this build failure
# outside of the pipeline environment.
if install_exit_code == 0:
can_sign = spack_ci.can_sign_binaries()
sign_binaries = can_sign and spack_is_pr_pipeline is False
# Create buildcache in either the main remote mirror, or in the
# per-PR mirror, if this is a PR pipeline
spack_ci.push_mirror_contents(
env, job_spec, job_spec_yaml_path, buildcache_mirror_url,
cdash_build_id, sign_binaries)
# Create another copy of that buildcache in the per-pipeline
# temporary storage mirror (this is only done if either
# artifacts buildcache is enabled or a temporary storage url
# prefix is set)
spack_ci.push_mirror_contents(
env, job_spec, job_spec_yaml_path, pipeline_mirror_url,
cdash_build_id, sign_binaries)
else:
tty.debug('spack install exited non-zero, will not create buildcache')
api_root_url = get_env_var('CI_API_V4_URL')
ci_project_id = get_env_var('CI_PROJECT_ID')
ci_job_id = get_env_var('CI_JOB_ID')
repro_job_url = '{0}/projects/{1}/jobs/{2}/artifacts'.format(
api_root_url, ci_project_id, ci_job_id)
reproduce_msg = """
To reproduce this build locally, run:
spack ci reproduce-build {0} [--working-dir <dir>]
If this project does not have public pipelines, you will need to first:
export GITLAB_PRIVATE_TOKEN=<generated_token>
... then follow the printed instructions.
""".format(repro_job_url)
print(reproduce_msg)
# Tie job success/failure to the success/failure of building the spec
sys.exit(install_exit_code)
|
def ci_rebuild(args):
"""Check a single spec against the remote mirror, and rebuild it from
source if the mirror does not contain the full hash match of the spec
as computed locally. """
env = ev.get_env(args, 'ci rebuild', required=True)
# Make sure the environment is "gitlab-enabled", or else there's nothing
# to do.
yaml_root = ev.config_dict(env.yaml)
gitlab_ci = None
if 'gitlab-ci' in yaml_root:
gitlab_ci = yaml_root['gitlab-ci']
if not gitlab_ci:
tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')
# Grab the environment variables we need. These either come from the
# pipeline generation step ("spack ci generate"), where they were written
# out as variables, or else provided by GitLab itself.
pipeline_artifacts_dir = get_env_var('SPACK_ARTIFACTS_ROOT')
job_log_dir = get_env_var('SPACK_JOB_LOG_DIR')
repro_dir = get_env_var('SPACK_JOB_REPRO_DIR')
local_mirror_dir = get_env_var('SPACK_LOCAL_MIRROR_DIR')
concrete_env_dir = get_env_var('SPACK_CONCRETE_ENV_DIR')
ci_pipeline_id = get_env_var('CI_PIPELINE_ID')
ci_job_name = get_env_var('CI_JOB_NAME')
signing_key = get_env_var('SPACK_SIGNING_KEY')
root_spec = get_env_var('SPACK_ROOT_SPEC')
job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
compiler_action = get_env_var('SPACK_COMPILER_ACTION')
cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
pr_env_var = get_env_var('SPACK_IS_PR_PIPELINE')
dev_env_var = get_env_var('SPACK_IS_DEVELOP_PIPELINE')
pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')
# Debug print some of the key environment variables we should have received
tty.debug('pipeline_artifacts_dir = {0}'.format(pipeline_artifacts_dir))
tty.debug('root_spec = {0}'.format(root_spec))
tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
tty.debug('compiler_action = {0}'.format(compiler_action))
# Query the environment manifest to find out whether we're reporting to a
# CDash instance, and if so, gather some information from the manifest to
# support that task.
enable_cdash = False
if 'cdash' in yaml_root:
enable_cdash = True
ci_cdash = yaml_root['cdash']
job_spec_buildgroup = ci_cdash['build-group']
cdash_base_url = ci_cdash['url']
cdash_project = ci_cdash['project']
proj_enc = urlencode({'project': cdash_project})
eq_idx = proj_enc.find('=') + 1
cdash_project_enc = proj_enc[eq_idx:]
cdash_site = ci_cdash['site']
cdash_id_path = os.path.join(repro_dir, 'cdash_id.txt')
tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
tty.debug('cdash_project = {0}'.format(cdash_project))
tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
tty.debug('cdash_site = {0}'.format(cdash_site))
tty.debug('related_builds = {0}'.format(related_builds))
tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))
# Is this a pipeline run on a spack PR or a merge to develop? It might
# be neither, e.g. a pipeline run on some environment repository.
spack_is_pr_pipeline = True if pr_env_var == 'True' else False
spack_is_develop_pipeline = True if dev_env_var == 'True' else False
# Figure out what is our temporary storage mirror: Is it artifacts
# buildcache? Or temporary-storage-url-prefix? In some cases we need to
# force something or pipelines might not have a way to propagate build
# artifacts from upstream to downstream jobs.
pipeline_mirror_url = None
temp_storage_url_prefix = None
if 'temporary-storage-url-prefix' in gitlab_ci:
temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']
pipeline_mirror_url = url_util.join(
temp_storage_url_prefix, ci_pipeline_id)
enable_artifacts_mirror = False
if 'enable-artifacts-buildcache' in gitlab_ci:
enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
if (enable_artifacts_mirror or (spack_is_pr_pipeline and
not enable_artifacts_mirror and not temp_storage_url_prefix)):
# If you explicitly enabled the artifacts buildcache feature, or
# if this is a PR pipeline but you did not enable either of the
# per-pipeline temporary storage features, we force the use of
# artifacts buildcache. Otherwise jobs will not have binary
# dependencies from previous stages available since we do not
# allow pushing binaries to the remote mirror during PR pipelines.
enable_artifacts_mirror = True
pipeline_mirror_url = 'file://' + local_mirror_dir
mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
pipeline_mirror_url)
tty.debug(mirror_msg)
# Whatever form of root_spec we got, use it to get a map giving us concrete
# specs for this job and all of its dependencies.
spec_map = spack_ci.get_concrete_specs(
env, root_spec, job_spec_pkg_name, related_builds, compiler_action)
job_spec = spec_map[job_spec_pkg_name]
job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)
# To provide logs, cdash reports, etc for developer download/perusal,
# these things have to be put into artifacts. This means downstream
# jobs that "need" this job will get those artifacts too. So here we
# need to clean out the artifacts we may have got from upstream jobs.
cdash_report_dir = os.path.join(pipeline_artifacts_dir, 'cdash_report')
if os.path.exists(cdash_report_dir):
shutil.rmtree(cdash_report_dir)
if os.path.exists(job_log_dir):
shutil.rmtree(job_log_dir)
if os.path.exists(repro_dir):
shutil.rmtree(repro_dir)
# Now that we removed them if they existed, create the directories we
# need for storing artifacts. The cdash_report directory will be
# created internally if needed.
os.makedirs(job_log_dir)
os.makedirs(repro_dir)
# Copy the concrete environment files to the repro directory so we can
# expose them as artifacts and not conflict with the concrete environment
# files we got as artifacts from the upstream pipeline generation job.
# Try to cast a slightly wider net too, and hopefully get the generated
# pipeline yaml. If we miss it, the user will still be able to go to the
# pipeline generation job and get it from there.
target_dirs = [
concrete_env_dir,
pipeline_artifacts_dir
]
for dir_to_list in target_dirs:
for file_name in os.listdir(dir_to_list):
src_file = os.path.join(dir_to_list, file_name)
if os.path.isfile(src_file):
dst_file = os.path.join(repro_dir, file_name)
shutil.copyfile(src_file, dst_file)
# If signing key was provided via "SPACK_SIGNING_KEY", then try to
# import it.
if signing_key:
spack_ci.import_signing_key(signing_key)
# Depending on the specifics of this job, we might need to turn on the
# "config:install_missing compilers" option (to build this job spec
# with a bootstrapped compiler), or possibly run "spack compiler find"
# (to build a bootstrap compiler or one of its deps in a
# compiler-agnostic way), or maybe do nothing at all (to build a spec
# using a compiler already installed on the target system).
spack_ci.configure_compilers(compiler_action)
# Write this job's spec yaml into the reproduction directory, and it will
# also be used in the generated "spack install" command to install the spec
tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
with open(job_spec_yaml_path, 'w') as fd:
fd.write(job_spec.to_yaml(hash=ht.build_hash))
# Write the concrete root spec yaml into the reproduction directory
root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
with open(root_spec_yaml_path, 'w') as fd:
fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))
# Write some other details to aid in reproduction into an artifact
repro_file = os.path.join(repro_dir, 'repro.json')
repro_details = {
'job_name': ci_job_name,
'job_spec_yaml': job_spec_yaml_file,
'root_spec_yaml': 'root.yaml'
}
with open(repro_file, 'w') as fd:
fd.write(json.dumps(repro_details))
# Write information about spack into an artifact in the repro dir
spack_info = spack_ci.get_spack_info()
spack_info_file = os.path.join(repro_dir, 'spack_info.txt')
with open(spack_info_file, 'w') as fd:
fd.write('\n{0}\n'.format(spack_info))
# If we decided there should be a temporary storage mechanism, add that
# mirror now so it's used when we check for a full hash match already
# built for this spec.
if pipeline_mirror_url:
spack_ci.add_mirror(
spack_ci.TEMP_STORAGE_MIRROR_NAME, pipeline_mirror_url)
cdash_build_id = None
cdash_build_stamp = None
# Check configured mirrors for a built spec with a matching full hash
matches = bindist.get_mirrors_for_spec(
job_spec, full_hash_match=True, index_only=False)
if matches:
# Got a full hash match on at least one configured mirror. All
# matches represent the fully up-to-date spec, so should all be
# equivalent. If artifacts mirror is enabled, we just pick one
# of the matches and download the buildcache files from there to
# the artifacts, so they're available to be used by dependent
# jobs in subsequent stages.
tty.msg('No need to rebuild {0}, found full hash match at: '.format(
job_spec_pkg_name))
for match in matches:
tty.msg(' {0}'.format(match['mirror_url']))
if enable_artifacts_mirror:
matching_mirror = matches[0]['mirror_url']
build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')
tty.debug('Getting {0} buildcache from {1}'.format(
job_spec_pkg_name, matching_mirror))
tty.debug('Downloading to {0}'.format(build_cache_dir))
buildcache.download_buildcache_files(
job_spec, build_cache_dir, True, matching_mirror)
# Now we are done and successful
sys.exit(0)
# No full hash match anywhere means we need to rebuild spec
# Start with spack arguments
install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]
install_args.extend([
'install',
'--keep-stage',
'--require-full-hash-match',
])
can_verify = spack_ci.can_verify_binaries()
verify_binaries = can_verify and spack_is_pr_pipeline is False
if not verify_binaries:
install_args.append('--no-check-signature')
# If CDash reporting is enabled, we first register this build with
# the specified CDash instance, then relate the build to those of
# its dependencies.
if enable_cdash:
tty.debug('CDash: Registering build')
(cdash_build_id,
cdash_build_stamp) = spack_ci.register_cdash_build(
cdash_build_name, cdash_base_url, cdash_project,
cdash_site, job_spec_buildgroup)
cdash_upload_url = '{0}/submit.php?project={1}'.format(
cdash_base_url, cdash_project_enc)
install_args.extend([
'--cdash-upload-url', cdash_upload_url,
'--cdash-build', cdash_build_name,
'--cdash-site', cdash_site,
'--cdash-buildstamp', cdash_build_stamp,
])
tty.debug('CDash: Relating build with dependency builds')
spack_ci.relate_cdash_builds(
spec_map, cdash_base_url, cdash_build_id, cdash_project,
[pipeline_mirror_url, pr_mirror_url, remote_mirror_url])
# store the cdash build id on disk for later
with open(cdash_id_path, 'w') as fd:
fd.write(cdash_build_id)
# A compiler action of 'FIND_ANY' means we are building a bootstrap
# compiler or one of its deps.
# TODO: when compilers are dependencies, we should include --no-add
if compiler_action != 'FIND_ANY':
install_args.append('--no-add')
# TODO: once we have the concrete spec registry, use the DAG hash
# to identify the spec to install, rather than the concrete spec
# yaml file.
install_args.extend(['-f', job_spec_yaml_path])
tty.debug('Installing {0} from source'.format(job_spec.name))
tty.debug('spack install arguments: {0}'.format(
install_args))
# Write the install command to a shell script
with open('install.sh', 'w') as fd:
fd.write('#!/bin/bash\n\n')
fd.write('\n# spack install command\n')
fd.write('spack ')
fd.write(' '.join(['"{0}"'.format(i) for i in install_args]))
fd.write('\n')
st = os.stat('install.sh')
os.chmod('install.sh', st.st_mode | stat.S_IEXEC)
install_copy_path = os.path.join(repro_dir, 'install.sh')
shutil.copyfile('install.sh', install_copy_path)
# Run the generated install.sh shell script as if it were being run in
# a login shell.
try:
install_process = subprocess.Popen(['bash', '-l', './install.sh'])
install_process.wait()
install_exit_code = install_process.returncode
except (ValueError, subprocess.CalledProcessError, OSError) as inst:
tty.error('Encountered error running install script')
tty.error(inst)
# Now do the post-install tasks
tty.debug('spack install exited {0}'.format(install_exit_code))
# If a spec fails to build in a spack develop pipeline, we add it to a
# list of known broken full hashes. This allows spack PR pipelines to
# avoid wasting compute cycles attempting to build those hashes.
if install_exit_code != 0 and spack_is_develop_pipeline:
if 'broken-specs-url' in gitlab_ci:
broken_specs_url = gitlab_ci['broken-specs-url']
dev_fail_hash = job_spec.full_hash()
broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
tmpdir = tempfile.mkdtemp()
empty_file_path = os.path.join(tmpdir, 'empty.txt')
try:
with open(empty_file_path, 'w') as efd:
efd.write('')
web_util.push_to_url(
empty_file_path,
broken_spec_path,
keep_original=False,
extra_args={'ContentType': 'text/plain'})
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = 'Error writing to broken specs list {0}: {1}'.format(
broken_spec_path, err)
tty.warn(msg)
finally:
shutil.rmtree(tmpdir)
# We generated the "spack install ..." command to "--keep-stage", copy
# any logs from the staging directory to artifacts now
spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)
# Create buildcache on remote mirror, either on pr-specific mirror or
# on the main mirror defined in the gitlab-enabled spack environment
if spack_is_pr_pipeline:
buildcache_mirror_url = pr_mirror_url
else:
buildcache_mirror_url = remote_mirror_url
# If the install succeeded, create a buildcache entry for this job spec
# and push it to one or more mirrors. If the install did not succeed,
# print out some instructions on how to reproduce this build failure
# outside of the pipeline environment.
if install_exit_code == 0:
can_sign = spack_ci.can_sign_binaries()
sign_binaries = can_sign and spack_is_pr_pipeline is False
# Create buildcache in either the main remote mirror, or in the
# per-PR mirror, if this is a PR pipeline
spack_ci.push_mirror_contents(
env, job_spec, job_spec_yaml_path, buildcache_mirror_url,
cdash_build_id, sign_binaries)
# Create another copy of that buildcache in the per-pipeline
# temporary storage mirror (this is only done if either
# artifacts buildcache is enabled or a temporary storage url
# prefix is set)
spack_ci.push_mirror_contents(
env, job_spec, job_spec_yaml_path, pipeline_mirror_url,
cdash_build_id, sign_binaries)
else:
tty.debug('spack install exited non-zero, will not create buildcache')
api_root_url = get_env_var('CI_API_V4_URL')
ci_project_id = get_env_var('CI_PROJECT_ID')
ci_job_id = get_env_var('CI_JOB_ID')
repro_job_url = '{0}/projects/{1}/jobs/{2}/artifacts'.format(
api_root_url, ci_project_id, ci_job_id)
reproduce_msg = """
To reproduce this build locally, run:
spack ci reproduce-build {0} [--working-dir <dir>]
If this project does not have public pipelines, you will need to first:
export GITLAB_PRIVATE_TOKEN=<generated_token>
... then follow the printed instructions.
""".format(repro_job_url)
print(reproduce_msg)
# Tie job success/failure to the success/failure of building the spec
sys.exit(install_exit_code)
|
27,139 |
def make_sure_remote_apache_exists_and_fetch(git_update: bool, verbose: bool):
"""
Make sure that apache remote exist in git. We need to take a log from the apache
repository - not locally.
Also, the local repo might be shallow, so we need to un-shallow it.
This will:
* check if the remote exists and add if it does not
* check if the local repo is shallow, mark it to un-shallow in this case
* fetch from the remote including all tags and overriding local tags in case they are set differently
:param git_update: If the git remote already exists, should we try to update it
:param verbose: print verbose messages while fetching
"""
try:
check_remote_command = ["git", "remote", "get-url", HTTPS_REMOTE]
if verbose:
console.print(f"Running command: '{' '.join(check_remote_command)}'")
subprocess.check_call(
check_remote_command,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Remote already exists, don't update it again!
if not git_update:
return
except subprocess.CalledProcessError as ex:
if ex.returncode == 128 or ex.returncode == 2:
remote_add_command = [
"git",
"remote",
"add",
HTTPS_REMOTE,
"https://github.com/apache/airflow.git",
]
if verbose:
console.print(f"Running command: '{' '.join(remote_add_command)}'")
try:
subprocess.check_output(
remote_add_command,
stderr=subprocess.STDOUT,
text=True,
)
except subprocess.CalledProcessError as ex:
console.print("[red]Error: when adding remote:[/]", ex)
sys.exit(128)
else:
raise
if verbose:
console.print("Fetching full history and tags from remote. ")
console.print("This might override your local tags!")
is_shallow_repo = (
subprocess.check_output(["git", "rev-parse", "--is-shallow-repository"], stderr=subprocess.DEVNULL)
== 'true'
)
fetch_command = ["git", "fetch", "--tags", "--force", HTTPS_REMOTE]
if is_shallow_repo:
if verbose:
console.print(
"This will also un-shallow the repository, "
"making all history available and increasing storage!"
)
fetch_command.append("--unshallow")
if verbose:
console.print(f"Running command: '{' '.join(fetch_command)}'")
try:
subprocess.check_call(
fetch_command,
)
except subprocess.CalledProcessError as e:
console.print(
'[yellow]Error when fetching tags from remote. Your tags might not be refreshed. '
f'please refresh the tags manually via {" ".join(fetch_command)}\n'
)
console.print(f'[yellow]The error was: {e}')
|
def make_sure_remote_apache_exists_and_fetch(git_update: bool, verbose: bool):
"""
Make sure that apache remote exist in git. We need to take a log from the apache
repository - not locally.
Also, the local repo might be shallow, so we need to un-shallow it.
This will:
* check if the remote exists and add if it does not
* check if the local repo is shallow, mark it to un-shallow in this case
* fetch from the remote including all tags and overriding local tags in case they are set differently
:param git_update: If the git remote already exists, should we try to update it
:param verbose: print verbose messages while fetching
"""
try:
check_remote_command = ["git", "remote", "get-url", HTTPS_REMOTE]
if verbose:
console.print(f"Running command: '{' '.join(check_remote_command)}'")
subprocess.check_call(
check_remote_command,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Remote already exists, don't update it again!
if not git_update:
return
except subprocess.CalledProcessError as ex:
if ex.returncode == 128 or ex.returncode == 2:
remote_add_command = [
"git",
"remote",
"add",
HTTPS_REMOTE,
"https://github.com/apache/airflow.git",
]
if verbose:
console.print(f"Running command: '{' '.join(remote_add_command)}'")
try:
subprocess.check_output(
remote_add_command,
stderr=subprocess.STDOUT,
text=True,
)
except subprocess.CalledProcessError as ex:
console.print("[red]Error: when adding remote:[/]", ex)
sys.exit(128)
else:
raise
if verbose:
console.print("Fetching full history and tags from remote. ")
console.print("This might override your local tags!")
is_shallow_repo = (
subprocess.check_output(["git", "rev-parse", "--is-shallow-repository"], stderr=subprocess.DEVNULL)
== 'true'
)
fetch_command = ["git", "fetch", "--tags", "--force", HTTPS_REMOTE]
if is_shallow_repo:
if verbose:
console.print(
"This will also un-shallow the repository, "
"making all history available and increasing storage!"
)
fetch_command.append("--unshallow")
if verbose:
console.print(f"Running command: '{' '.join(fetch_command)}'")
try:
subprocess.check_call(
fetch_command,
)
except subprocess.CalledProcessError as e:
console.print(
'[yellow]Error when fetching tags from remote. Your tags might not be refreshed. '
f'Please refresh the tags manually via {" ".join(fetch_command)}\n'
)
console.print(f'[yellow]The error was: {e}')
|
29,014 |
def _receive_socket_data(host: str, sock: socket.socket) -> Optional[bytes]:
server_address = (host, SQL_BROWSER_DEFAULT_PORT)
# The message is a CLNT_UCAST_EX packet to get all instances
# https://msdn.microsoft.com/en-us/library/cc219745.aspx
message = "\x03"
# Encode the message as a bytes array
message = message.encode()
# send data and receive response
try:
logger.info(f"Sending message to requested host: {host}, {message}")
sock.sendto(message, server_address)
data, _ = sock.recvfrom(_BUFFER_SIZE)
return data
except socket.timeout as err:
logger.debug(
f"Socket timeout reached, maybe browser service on host: {host} doesnt " "exist"
)
sock.close()
raise err
except socket.error as err:
if err.errno == errno.ECONNRESET:
error_message = (
f"Connection was forcibly closed by the remote host. The host: {host} is "
"rejecting the packet."
)
else:
error_message = (
"An unknown socket error occurred while trying the mssql fingerprint, "
"closing socket."
)
sock.close()
raise err(error_message)
|
def _receive_socket_data(host: str, sock: socket.socket) -> Optional[bytes]:
server_address = (host, SQL_BROWSER_DEFAULT_PORT)
# The message is a CLNT_UCAST_EX packet to get all instances
# https://msdn.microsoft.com/en-us/library/cc219745.aspx
message = "\x03"
# Encode the message as a bytes array
message = message.encode()
# send data and receive response
try:
logger.info(f"Sending message to requested host: {host}, {message}")
sock.sendto(message, server_address)
data, _ = sock.recvfrom(_BUFFER_SIZE)
return data
except socket.timeout as err:
logger.debug(
f"Socket timeout reached, maybe browser service on host: {host} doesnt " "exist"
)
sock.close()
raise err
except socket.error as err:
if err.errno == errno.ECONNRESET:
error_message = (
f"Connection was forcibly closed by the remote host. The host: {host} is "
"rejecting the packet."
)
else:
error_message = (
"An unknown socket error occurred while trying the mssql fingerprint, "
"closing socket."
)
sock.close()
raise Exception(error_message) from err
|
8,203 |
def system_info():
"""
Prints ones' system info in an "attractive" fashion.
"""
base_reqs = get_distribution("sunpy").requires()
base_reqs = {base_req.name.lower() for base_req in base_reqs}
extra_reqs = get_distribution("sunpy").requires(extras=["all"])
extra_reqs = sorted({extra_req.name.lower() for extra_req in extra_reqs}.difference(base_reqs))
missing_packages, installed_packages = find_dependencies(package="sunpy", extras=["all"])
extra_prop = {'System': platform.system(),
'Arch': f"{platform.architecture()[0]}, ({platform.processor()})",
"Python": platform.python_version(),
"SunPy": get_distribution("sunpy").version}
sys_prop = {**installed_packages, **missing_packages, **extra_prop}
print("==============================")
print("SunPy Installation Information")
print("==============================")
print()
print("General")
print("#######")
if sys_prop['System'] == "Linux":
distro = " ".join(linux_distribution())
print(f"OS: {distro} (Linux {platform.release()})")
elif sys_prop['System'] == "Darwin":
print(f"OS: Mac OS {platform.mac_ver()[0]}")
elif sys_prop['System'] == "Windows":
print(f"OS: Windows {platform.release()} {platform.version()}")
else:
print("Unknown OS")
for sys_info in ['Arch', 'SunPy']:
print('{} : {}'.format(sys_info, sys_prop[sys_info]))
print()
print("Required Dependices")
print("###################")
for req in base_reqs:
print('{}: {}'.format(req, sys_prop[req]))
print()
print("Optional Dependices")
print("###################")
for extra_req in extra_reqs:
print(f'{extra_req}: {sys_prop[extra_req]}')
|
def system_info():
"""
Prints ones' system info in an "attractive" fashion.
"""
base_reqs = get_distribution("sunpy").requires()
base_reqs = {base_req.name.lower() for base_req in base_reqs}
extra_reqs = get_distribution("sunpy").requires(extras=["all"])
extra_reqs = sorted({extra_req.name.lower() for extra_req in extra_reqs}.difference(base_reqs))
missing_packages, installed_packages = find_dependencies(package="sunpy", extras=["all"])
extra_prop = {'System': platform.system(),
"Arch": f"{platform.architecture()[0]}, ({platform.processor()})",
"Python": platform.python_version(),
"SunPy": get_distribution("sunpy").version}
sys_prop = {**installed_packages, **missing_packages, **extra_prop}
print("==============================")
print("SunPy Installation Information")
print("==============================")
print()
print("General")
print("#######")
if sys_prop['System'] == "Linux":
distro = " ".join(linux_distribution())
print(f"OS: {distro} (Linux {platform.release()})")
elif sys_prop['System'] == "Darwin":
print(f"OS: Mac OS {platform.mac_ver()[0]}")
elif sys_prop['System'] == "Windows":
print(f"OS: Windows {platform.release()} {platform.version()}")
else:
print("Unknown OS")
for sys_info in ['Arch', 'SunPy']:
print('{} : {}'.format(sys_info, sys_prop[sys_info]))
print()
print("Required Dependices")
print("###################")
for req in base_reqs:
print('{}: {}'.format(req, sys_prop[req]))
print()
print("Optional Dependices")
print("###################")
for extra_req in extra_reqs:
print(f'{extra_req}: {sys_prop[extra_req]}')
|
33,020 |
def parse_poi_query(north, south, east, west, tags=None, timeout=180, maxsize=''):
"""
Construct an Overpass QL query to load POIs with certain tags.
By default, queries all features with an amenity tag.
Parameters
----------
north : float
Northernmost coordinate from bounding box of the search area.
south : float
Southernmost coordinate from bounding box of the search area.
east : float
Easternmost coordinate from bounding box of the search area.
west : float
Westernmost coordinate of the bounding box of the search area.
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
timeout : int
Timeout for the API request.
"""
# build default tags
if not tags:
tags = {'amenity':True}
# define templates for objects and extents
object_template = '({object_type}[{{keys}}{{op}}"{{values}}"]{{extent}});'
# object_template = '({object_type}[~"^({{keys}})$"{{op}}"{{values}}"]{{extent}});'
re_keys_template = '~"^({keys})$"'
single_key_template = '"{key}"'
extent_template = '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);'
extent = extent_template.format(south=south, west=west, north=north, east=east)
# initate query string
query_template = "[out:json][timeout:{timeout}]{maxsize};("
query_str = query_template.format(timeout=timeout, maxsize=maxsize)
# add statements for each object type
# templates = [object_template.format(object_type=x) for x in ['node','way','relation']]
templates = [object_template.format(object_type=x) for x in ['nwr']]
for template in templates:
# add statements for each key
for keys, values in tags.items():
# ensure keys is a list
keys = [keys] if not isinstance(keys, list) else keys
if values == True:
# get features with any value for these keys
# add positive statement with multiple keys and no specific values
query_str += template.format(keys=re_keys_template.format(keys='|'.join(keys)), values='.*', extent=extent, op='~')
elif values == False:
# get features wihout these keys, not matter their values
for key in keys:
# add negative statement with multiple keys and no specific values
# can only be added one at a time withough key regex
query_str += template.format(keys=single_key_template.format(key=key), values='.*', extent=extent, op='!~')
else:
# get features with specified values for these keys
# ensure values is a list
values = [values] if not isinstance(values, list) else values
# add positive statement with multiple keys in specific values
query_str += template.format(keys='{}'.format('|'.join(keys)), values='|'.join(values), extent=extent, op='~')
# terminate query string
query_str += ");out;"
return query_str
|
def parse_poi_query(north, south, east, west, tags=None, timeout=180, maxsize=''):
"""
Construct an Overpass QL query to load POIs with certain tags.
By default, queries all features with an amenity tag.
Parameters
----------
north : float
Northernmost coordinate from bounding box of the search area.
south : float
Southernmost coordinate from bounding box of the search area.
east : float
Easternmost coordinate from bounding box of the search area.
west : float
Westernmost coordinate of the bounding box of the search area.
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
timeout : int
Timeout for the API request.
"""
# build default tags
if not tags:
tags = {'amenity':True}
# define templates for objects and extents
object_template = '({object_type}[{{keys}}{{op}}"{{values}}"]{{extent}});'
# object_template = '({object_type}[~"^({{keys}})$"{{op}}"{{values}}"]{{extent}});'
re_keys_template = '~"^({keys})$"'
single_key_template = '"{key}"'
extent_template = '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);'
extent = extent_template.format(south=south, west=west, north=north, east=east)
# initiate query string
query_template = "[out:json][timeout:{timeout}]{maxsize};("
query_str = query_template.format(timeout=timeout, maxsize=maxsize)
# add statements for each object type
# templates = [object_template.format(object_type=x) for x in ['node','way','relation']]
templates = [object_template.format(object_type=x) for x in ['nwr']]
for template in templates:
# add statements for each key
for keys, values in tags.items():
# ensure keys is a list
keys = [keys] if not isinstance(keys, list) else keys
if values == True:
# get features with any value for these keys
# add positive statement with multiple keys and no specific values
query_str += template.format(keys=re_keys_template.format(keys='|'.join(keys)), values='.*', extent=extent, op='~')
elif values == False:
# get features wihout these keys, not matter their values
for key in keys:
# add negative statement with multiple keys and no specific values
# can only be added one at a time withough key regex
query_str += template.format(keys=single_key_template.format(key=key), values='.*', extent=extent, op='!~')
else:
# get features with specified values for these keys
# ensure values is a list
values = [values] if not isinstance(values, list) else values
# add positive statement with multiple keys in specific values
query_str += template.format(keys='{}'.format('|'.join(keys)), values='|'.join(values), extent=extent, op='~')
# terminate query string
query_str += ");out;"
return query_str
|
24,328 |
def _varbind_value_to_float(s):
# type: (Any) -> float
"""
Sanitize varbind values
"""
if not isinstance(s, OctetString):
return s
s = s.asOctets()
s = to_native_string(s)
found = s.find('\x00')
if found >= 0:
s = s[:found]
return float(s.strip())
|
def _varbind_value_to_float(s):
# type: (Any) -> float
"""
Sanitize varbind values
"""
if not isinstance(s, OctetString):
return float(s)
s = s.asOctets()
s = to_native_string(s)
found = s.find('\x00')
if found >= 0:
s = s[:found]
return float(s.strip())
|
11,008 |
def action_method(*, permissions=None, description=None):
"""
Conveniently add attributes to an action method::
@admin.action_method(
permissions=['publish'],
description='Mark selected stories as published',
)
def make_published(self, request, queryset):
queryset.update(status='p')
This is equivalent to setting the attributes on the method directly::
def make_published(self, request, queryset):
queryset.update(status='p')
make_published.allowed_permissions = ['publish']
make_published.short_description = 'Mark selected stories as published'
"""
def decorator(f):
if permissions is not None:
f.allowed_permissions = permissions
if description is not None:
f.short_description = description
return f
return decorator
|
def action_method(*, permissions=None, description=None):
"""
Conveniently add attributes to an action method::
@admin.action_method(
permissions=['publish'],
description='Mark selected stories as published',
)
def make_published(self, request, queryset):
queryset.update(status='p')
This is equivalent to setting some attributes (with the original, longer names) on the function directly::
def make_published(self, request, queryset):
queryset.update(status='p')
make_published.allowed_permissions = ['publish']
make_published.short_description = 'Mark selected stories as published'
"""
def decorator(f):
if permissions is not None:
f.allowed_permissions = permissions
if description is not None:
f.short_description = description
return f
return decorator
|
31,488 |
def delete_file_command(credentials: Dict, sensor_id: int, source_path: str):
api = CBCloudAPI(**credentials)
session = api.select(endpoint_standard.Device, sensor_id).lr_session()
session.delete_file(filename=source_path)
return f'The file: {source_path} was deleted'
|
def delete_file_command(credentials: Dict, sensor_id: int, source_path: str):
api = CBCloudAPI(**credentials)
session = api.select(endpoint_standard.Device, sensor_id).lr_session()
session.delete_file(filename=source_path)
return f'The file: {source_path} was deleted successfully.'
|
32,321 |
def commit(topology: Topology, device_filter_string: str = None) -> List[CommitStatus]:
"""
Commit the configuration for the entire topology. Note this only commits the configuration - it does
not push the configuration in the case of Panorama.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
return UniversalCommand.commit(topology, device_filter_string)
|
def commit(topology: Topology, device_filter_string: Optional[str] = None) -> List[CommitStatus]:
"""
Commit the configuration for the entire topology. Note this only commits the configuration - it does
not push the configuration in the case of Panorama.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
return UniversalCommand.commit(topology, device_filter_string)
|
31,749 |
def test_custom_indicator(client: Client) -> CommandResults:
# Command using a custom indicator example
result = client.baseintegration_dummy("test")
score = Common.DBotScore.GOOD
indicator_value = 'custom_value'
# Create a DBotScore object
# Give it an indicator_type of DBotScoreType.CUSTOM
dbot_score = Common.DBotScore(
indicator=indicator_value,
indicator_type=DBotScoreType.CUSTOM,
integration_name='DummyIntegration',
score=score
)
# Create a data dictionary, which is the data of the indicator
data = {
'param1': 'value1',
'param2': 'value2',
}
# Create the CustomIndicator
custom_indicator = Common.CustomIndicator(
indicator_type='MyCustomIndicator',
dbot_score=dbot_score,
value=indicator_value,
data=data,
context_prefix='custom',
)
# Return a CommandResults object containing the CustomIndicator object created
return CommandResults(
readable_output='custom_value',
outputs=result,
outputs_prefix='Demo.Result',
outputs_key_field='test_key_field',
indicator=custom_indicator
)
|
def test_custom_indicator(client: Client) -> CommandResults:
# Command using a custom indicator example
result = client.baseintegration_dummy("test")
score = Common.DBotScore.GOOD
indicator_value = 'custom_value'
# Create a DBotScore object
# Give it an indicator_type of DBotScoreType.CUSTOM
dbot_score = Common.DBotScore(
indicator=indicator_value,
indicator_type=DBotScoreType.CUSTOM,
integration_name='DummyIntegration',
score=score,
)
# Create a data dictionary, which is the data of the indicator
data = {
'param1': 'value1',
'param2': 'value2',
}
# Create the CustomIndicator
custom_indicator = Common.CustomIndicator(
indicator_type='MyCustomIndicator',
dbot_score=dbot_score,
value=indicator_value,
data=data,
context_prefix='custom',
)
# Return a CommandResults object containing the CustomIndicator object created
return CommandResults(
readable_output='custom_value',
outputs=result,
outputs_prefix='Demo.Result',
outputs_key_field='test_key_field',
indicator=custom_indicator
)
|
10,439 |
def check_command(module, command):
"""Tests against a command line to be valid otherwise raise errors
Error on uneven single quote which breaks ansible waiting for further input. Ansible will handle
even single quote failures correctly.
:param command: the command line from current or new config
:type command: string
:raises ValueError:
* if contains odd number of single quotes
:return: command string unchanged
:rtype: string
"""
if command.count("'") % 2 != 0:
module.fail_json(msg="Ansible does not support single (') quotes in values: " + command)
return command
|
def check_command(module, command):
"""Tests against a command line to be valid otherwise raise errors
Error on uneven single quote which breaks ansible waiting for further input. Ansible will handle
even single quote failures correctly.
:param command: the command line from current or new config
:type command: string
:raises ValueError:
* if contains odd number of single quotes
:return: command string unchanged
:rtype: string
"""
if command.count("'") % 2 != 0:
module.fail_json(msg="Unmatched single (') quote found in command: " + command)
return command
|
7,569 |
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, 'select'):
populate_entry_points(ep.select('astropy.modeling', []))
else:
populate_entry_points(ep.get('astropy.modeling', []))
|
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, 'select'):
populate_entry_points(ep.select(group='astropy.modeling'))
else:
populate_entry_points(ep.get('astropy.modeling', []))
|
17,114 |
def _media_mime_type(media_item: dict[str, Any]) -> str:
"""Return the mime type of a media item."""
if ITEM_KEY_MEDIA_SOURCES not in media_item:
raise BrowseError("Unable to determine mime type for item without media source")
media_source = media_item[ITEM_KEY_MEDIA_SOURCES][0]
if MEDIA_SOURCE_KEY_PATH not in media_source:
raise BrowseError("Unable to determine mime type for media source without path")
path = media_source[MEDIA_SOURCE_KEY_PATH]
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise BrowseError(f"Unable to determine mime type for path {path}")
return mime_type
|
def _media_mime_type(media_item: dict[str, Any]) -> str:
"""Return the mime type of a media item."""
if not media_item.get(ITEM_KEY_MEDIA_SOURCES):
raise BrowseError("Unable to determine mime type for item without media source")
media_source = media_item[ITEM_KEY_MEDIA_SOURCES][0]
if MEDIA_SOURCE_KEY_PATH not in media_source:
raise BrowseError("Unable to determine mime type for media source without path")
path = media_source[MEDIA_SOURCE_KEY_PATH]
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise BrowseError(f"Unable to determine mime type for path {path}")
return mime_type
|
8,443 |
def extract_region(spectrum, region):
"""
Extract a region from the input `~specutils.Spectrum1D`
defined by the lower and upper bounds defined by the ``region``
instance. The extracted region will be returned as a new
`~specutils.Spectrum1D`.
Parameters
----------
spectrum: `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object from which the region will be extracted.
Returns
-------
spectrum: `~specutils.spectra.spectrum1d.Spectrum1D`
Excised spectrum.
Notes
-----
The region extracted is a discrete subset of the input spectrum. No interpolation is done
on the left and right side of the spectrum.
The region is assumed to be a closed interval (as opposed to Python which is open
on the upper end). For example:
Given:
A ``spectrum`` with spectral_axis of ``[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]*u.um``.
A ``region`` defined as ``SpectralRegion(0.2*u.um, 0.5*u.um)``
And we calculate ``sub_spectrum = extract_region(spectrum, region)``, then the ``sub_spectrum``
spectral axis will be ``[0.2, 0.3, 0.4, 0.5] * u.um``.
If the ``region`` does not overlap with the ``spectrum`` then an empty Spectrum1D object
will be returned.
"""
extracted_spectrum = []
for subregion in region._subregions:
left_index, right_index = _to_edge_pixel(subregion, spectrum)
# If both indices are out of bounds then return None
if left_index is None and right_index is None:
empty_spectrum = Spectrum1D(spectral_axis=[]*spectrum.spectral_axis.unit,
flux=[]*spectrum.flux.unit)
extracted_spectrum.append(empty_spectrum)
else:
# If only one index is out of bounds then set it to
# the lower or upper extent
if left_index is None:
left_index = 0
if right_index is None:
right_index = len(spectrum.spectral_axis)
if left_index > right_index:
left_index, right_index = right_index, left_index
if len(spectrum.flux.shape)==1:
extracted_spectrum.append(spectrum[left_index:right_index])
else:
extracted_spectrum.append(spectrum[..., left_index:right_index])
# If there is only one subregion in the region then we will
# just return a spectrum.
if len(region) == 1:
extracted_spectrum = extracted_spectrum[0]
return extracted_spectrum
|
def extract_region(spectrum, region):
"""
Extract a region from the input `~specutils.Spectrum1D`
defined by the lower and upper bounds defined by the ``region``
instance. The extracted region will be returned as a new
`~specutils.Spectrum1D`.
Parameters
----------
spectrum: `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object from which the region will be extracted.
Returns
-------
spectrum: `~specutils.spectra.spectrum1d.Spectrum1D`
Excised spectrum.
Notes
-----
The region extracted is a discrete subset of the input spectrum. No interpolation is done
on the left and right side of the spectrum.
The region is assumed to be a closed interval (as opposed to Python which is open
on the upper end). For example:
Given:
A ``spectrum`` with spectral_axis of ``[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]*u.um``.
A ``region`` defined as ``SpectralRegion(0.2*u.um, 0.5*u.um)``
And we calculate ``sub_spectrum = extract_region(spectrum, region)``, then the ``sub_spectrum``
spectral axis will be ``[0.2, 0.3, 0.4, 0.5] * u.um``.
If the ``region`` does not overlap with the ``spectrum`` then an empty Spectrum1D object
will be returned.
"""
extracted_spectrum = []
for subregion in region._subregions:
left_index, right_index = _to_edge_pixel(subregion, spectrum)
# If both indices are out of bounds then return None
if left_index is None and right_index is None:
empty_spectrum = Spectrum1D(spectral_axis=[]*spectrum.spectral_axis.unit,
flux=[]*spectrum.flux.unit)
extracted_spectrum.append(empty_spectrum)
else:
# If only one index is out of bounds then set it to
# the lower or upper extent
if left_index is None:
left_index = 0
if right_index is None:
right_index = len(spectrum.spectral_axis)
if left_index > right_index:
left_index, right_index = right_index, left_index
extracted_spectrum.append(spectrum[..., left_index:right_index])
# If there is only one subregion in the region then we will
# just return a spectrum.
if len(region) == 1:
extracted_spectrum = extracted_spectrum[0]
return extracted_spectrum
|
20,432 |
def app_upgrade(app=[], url=None, file=None, force=False, no_safety_backup=False):
"""
Upgrade app
Keyword argument:
file -- Folder or tarball for upgrade
app -- App(s) to upgrade (default all)
url -- Git url to fetch for upgrade
no_safety_backup -- Disable the safety backup during upgrade
"""
from packaging import version
from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback
from yunohost.permission import permission_sync_to_user
from yunohost.regenconf import manually_modified_files
apps = app
# Check if disk space available
if free_space_in_directory("/") <= 512 * 1000 * 1000:
raise YunohostValidationError("disk_space_not_sufficient_update")
# If no app is specified, upgrade all apps
if not apps:
# FIXME : not sure what's supposed to happen if there is a url and a file but no apps...
if not url and not file:
apps = _installed_apps()
elif not isinstance(app, list):
apps = [app]
# Remove possible duplicates
apps = [app_ for i, app_ in enumerate(apps) if app_ not in apps[:i]]
# Abort if any of those app is in fact not installed..
for app in [app_ for app_ in apps if not _is_installed(app_)]:
raise YunohostValidationError(
"app_not_installed", app=app, all_apps=_get_all_installed_apps_id()
)
if len(apps) == 0:
raise YunohostValidationError("apps_already_up_to_date")
if len(apps) > 1:
logger.info(m18n.n("app_upgrade_several_apps", apps=", ".join(apps)))
for number, app_instance_name in enumerate(apps):
logger.info(m18n.n("app_upgrade_app_name", app=app_instance_name))
app_dict = app_info(app_instance_name, full=True)
if file and isinstance(file, dict):
# We use this dirty hack to test chained upgrades in unit/functional tests
manifest, extracted_app_folder = _extract_app_from_file(
file[app_instance_name]
)
elif file:
manifest, extracted_app_folder = _extract_app_from_file(file)
elif url:
manifest, extracted_app_folder = _fetch_app_from_git(url)
elif app_dict["upgradable"] == "url_required":
logger.warning(m18n.n("custom_app_url_required", app=app_instance_name))
continue
elif app_dict["upgradable"] == "yes" or force:
manifest, extracted_app_folder = _fetch_app_from_git(app_instance_name)
else:
logger.success(m18n.n("app_already_up_to_date", app=app_instance_name))
continue
# Manage upgrade type and avoid any upgrade if there is nothing to do
upgrade_type = "UNKNOWN"
# Get current_version and new version
app_new_version = version.parse(manifest.get("version", "?"))
app_current_version = version.parse(app_dict.get("version", "?"))
if "~ynh" in str(app_current_version) and "~ynh" in str(app_new_version):
if app_current_version >= app_new_version and not force:
# In case of upgrade from file or custom repository
# No new version available
logger.success(m18n.n("app_already_up_to_date", app=app_instance_name))
# Save update time
now = int(time.time())
app_setting(app_instance_name, "update_time", now)
app_setting(
app_instance_name,
"current_revision",
manifest.get("remote", {}).get("revision", "?"),
)
continue
elif app_current_version > app_new_version:
upgrade_type = "DOWNGRADE_FORCED"
elif app_current_version == app_new_version:
upgrade_type = "UPGRADE_FORCED"
else:
app_current_version_upstream, app_current_version_pkg = str(
app_current_version
).split("~ynh")
app_new_version_upstream, app_new_version_pkg = str(
app_new_version
).split("~ynh")
if app_current_version_upstream == app_new_version_upstream:
upgrade_type = "UPGRADE_PACKAGE"
elif app_current_version_pkg == app_new_version_pkg:
upgrade_type = "UPGRADE_APP"
else:
upgrade_type = "UPGRADE_FULL"
# Check requirements
_check_manifest_requirements(manifest, app_instance_name=app_instance_name)
_assert_system_is_sane_for_app(manifest, "pre")
app_setting_path = os.path.join(APPS_SETTING_PATH, app_instance_name)
# Retrieve arguments list for upgrade script
# TODO: Allow to specify arguments
args_odict = _parse_args_from_manifest(manifest, "upgrade")
# Prepare env. var. to pass to script
env_dict = _make_environment_for_app_script(app_instance_name, args=args_odict)
env_dict["YNH_APP_UPGRADE_TYPE"] = upgrade_type
env_dict["YNH_APP_MANIFEST_VERSION"] = str(app_new_version)
env_dict["YNH_APP_CURRENT_VERSION"] = str(app_current_version)
env_dict["YNH_APP_NO_BACKUP_UPGRADE"] = no_safety_backup
# We'll check that the app didn't brutally edit some system configuration
manually_modified_files_before_install = manually_modified_files()
# Attempt to patch legacy helpers ...
_patch_legacy_helpers(extracted_app_folder)
# Apply dirty patch to make php5 apps compatible with php7
_patch_legacy_php_versions(extracted_app_folder)
# Start register change on system
related_to = [("app", app_instance_name)]
operation_logger = OperationLogger("app_upgrade", related_to, env=env_dict)
operation_logger.start()
# Execute the app upgrade script
upgrade_failed = True
try:
upgrade_retcode = hook_exec(
extracted_app_folder + "/scripts/upgrade", env=env_dict
)[0]
upgrade_failed = True if upgrade_retcode != 0 else False
if upgrade_failed:
error = m18n.n("app_upgrade_script_failed")
logger.error(
m18n.n("app_upgrade_failed", app=app_instance_name, error=error)
)
failure_message_with_debug_instructions = operation_logger.error(error)
if msettings.get("interface") != "api":
dump_app_log_extract_for_debugging(operation_logger)
# Script got manually interrupted ... N.B. : KeyboardInterrupt does not inherit from Exception
except (KeyboardInterrupt, EOFError):
upgrade_retcode = -1
error = m18n.n("operation_interrupted")
logger.error(
m18n.n("app_upgrade_failed", app=app_instance_name, error=error)
)
failure_message_with_debug_instructions = operation_logger.error(error)
# Something wrong happened in Yunohost's code (most probably hook_exec)
except Exception:
import traceback
error = m18n.n("unexpected_error", error="\n" + traceback.format_exc())
logger.error(
m18n.n("app_install_failed", app=app_instance_name, error=error)
)
failure_message_with_debug_instructions = operation_logger.error(error)
finally:
# Whatever happened (install success or failure) we check if it broke the system
# and warn the user about it
try:
broke_the_system = False
_assert_system_is_sane_for_app(manifest, "post")
except Exception as e:
broke_the_system = True
logger.error(
m18n.n("app_upgrade_failed", app=app_instance_name, error=str(e))
)
failure_message_with_debug_instructions = operation_logger.error(str(e))
# We'll check that the app didn't brutally edit some system configuration
manually_modified_files_after_install = manually_modified_files()
manually_modified_files_by_app = set(
manually_modified_files_after_install
) - set(manually_modified_files_before_install)
if manually_modified_files_by_app:
logger.error(
"Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - "
+ "\n -".join(manually_modified_files_by_app)
)
# If upgrade failed or broke the system,
# raise an error and interrupt all other pending upgrades
if upgrade_failed or broke_the_system:
# display this if there are remaining apps
if apps[number + 1 :]:
not_upgraded_apps = apps[number:]
logger.error(
m18n.n(
"app_not_upgraded",
failed_app=app_instance_name,
apps=", ".join(not_upgraded_apps),
)
)
raise YunohostError(
failure_message_with_debug_instructions, raw_msg=True
)
# Otherwise we're good and keep going !
now = int(time.time())
app_setting(app_instance_name, "update_time", now)
app_setting(
app_instance_name,
"current_revision",
manifest.get("remote", {}).get("revision", "?"),
)
# Clean hooks and add new ones
hook_remove(app_instance_name)
if "hooks" in os.listdir(extracted_app_folder):
for hook in os.listdir(extracted_app_folder + "/hooks"):
hook_add(app_instance_name, extracted_app_folder + "/hooks/" + hook)
# Replace scripts and manifest and conf (if exists)
os.system(
'rm -rf "%s/scripts" "%s/manifest.toml %s/manifest.json %s/conf"'
% (
app_setting_path,
app_setting_path,
app_setting_path,
app_setting_path,
)
)
if os.path.exists(os.path.join(extracted_app_folder, "manifest.json")):
os.system(
'mv "%s/manifest.json" "%s/scripts" %s'
% (extracted_app_folder, extracted_app_folder, app_setting_path)
)
if os.path.exists(os.path.join(extracted_app_folder, "manifest.toml")):
os.system(
'mv "%s/manifest.toml" "%s/scripts" %s'
% (extracted_app_folder, extracted_app_folder, app_setting_path)
)
for file_to_copy in [
"actions.json",
"actions.toml",
"config_panel.json",
"config_panel.toml",
"conf",
]:
if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)):
os.system(
"cp -R %s/%s %s"
% (extracted_app_folder, file_to_copy, app_setting_path)
)
# Clean and set permissions
shutil.rmtree(extracted_app_folder)
os.system("chmod 600 %s" % app_setting_path)
os.system("chmod 400 %s/settings.yml" % app_setting_path)
os.system("chown -R root: %s" % app_setting_path)
# So much win
logger.success(m18n.n("app_upgraded", app=app_instance_name))
hook_callback("post_app_upgrade", env=env_dict)
operation_logger.success()
permission_sync_to_user()
logger.success(m18n.n("upgrade_complete"))
|
def app_upgrade(app=[], url=None, file=None, force=False, no_safety_backup=False):
"""
Upgrade app
Keyword argument:
file -- Folder or tarball for upgrade
app -- App(s) to upgrade (default all)
url -- Git url to fetch for upgrade
no_safety_backup -- Disable the safety backup during upgrade
"""
from packaging import version
from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback
from yunohost.permission import permission_sync_to_user
from yunohost.regenconf import manually_modified_files
apps = app
# Check if disk space available
if free_space_in_directory("/") <= 512 * 1000 * 1000:
raise YunohostValidationError("disk_space_not_sufficient_update")
# If no app is specified, upgrade all apps
if not apps:
# FIXME : not sure what's supposed to happen if there is a url and a file but no apps...
if not url and not file:
apps = _installed_apps()
elif not isinstance(app, list):
apps = [app]
# Remove possible duplicates
apps = [app_ for i, app_ in enumerate(apps) if app_ not in apps[:i]]
# Abort if any of those app is in fact not installed..
for app in [app_ for app_ in apps if not _is_installed(app_)]:
raise YunohostValidationError(
"app_not_installed", app=app, all_apps=_get_all_installed_apps_id()
)
if len(apps) == 0:
raise YunohostValidationError("apps_already_up_to_date")
if len(apps) > 1:
logger.info(m18n.n("app_upgrade_several_apps", apps=", ".join(apps)))
for number, app_instance_name in enumerate(apps):
logger.info(m18n.n("app_upgrade_app_name", app=app_instance_name))
app_dict = app_info(app_instance_name, full=True)
if file and isinstance(file, dict):
# We use this dirty hack to test chained upgrades in unit/functional tests
manifest, extracted_app_folder = _extract_app_from_file(
file[app_instance_name]
)
elif file:
manifest, extracted_app_folder = _extract_app_from_file(file)
elif url:
manifest, extracted_app_folder = _fetch_app_from_git(url)
elif app_dict["upgradable"] == "url_required":
logger.warning(m18n.n("custom_app_url_required", app=app_instance_name))
continue
elif app_dict["upgradable"] == "yes" or force:
manifest, extracted_app_folder = _fetch_app_from_git(app_instance_name)
else:
logger.success(m18n.n("app_already_up_to_date", app=app_instance_name))
continue
# Manage upgrade type and avoid any upgrade if there is nothing to do
upgrade_type = "UNKNOWN"
# Get current_version and new version
app_new_version = version.parse(manifest.get("version", "?"))
app_current_version = version.parse(app_dict.get("version", "?"))
if "~ynh" in str(app_current_version) and "~ynh" in str(app_new_version):
if app_current_version >= app_new_version and not force:
# In case of upgrade from file or custom repository
# No new version available
logger.success(m18n.n("app_already_up_to_date", app=app_instance_name))
# Save update time
now = int(time.time())
app_setting(app_instance_name, "update_time", now)
app_setting(
app_instance_name,
"current_revision",
manifest.get("remote", {}).get("revision", "?"),
)
continue
elif app_current_version > app_new_version:
upgrade_type = "DOWNGRADE_FORCED"
elif app_current_version == app_new_version:
upgrade_type = "UPGRADE_FORCED"
else:
app_current_version_upstream, app_current_version_pkg = str(
app_current_version
).split("~ynh")
app_new_version_upstream, app_new_version_pkg = str(
app_new_version
).split("~ynh")
if app_current_version_upstream == app_new_version_upstream:
upgrade_type = "UPGRADE_PACKAGE"
elif app_current_version_pkg == app_new_version_pkg:
upgrade_type = "UPGRADE_APP"
else:
upgrade_type = "UPGRADE_FULL"
# Check requirements
_check_manifest_requirements(manifest, app_instance_name=app_instance_name)
_assert_system_is_sane_for_app(manifest, "pre")
app_setting_path = os.path.join(APPS_SETTING_PATH, app_instance_name)
# Retrieve arguments list for upgrade script
# TODO: Allow to specify arguments
args_odict = _parse_args_from_manifest(manifest, "upgrade")
# Prepare env. var. to pass to script
env_dict = _make_environment_for_app_script(app_instance_name, args=args_odict)
env_dict["YNH_APP_UPGRADE_TYPE"] = upgrade_type
env_dict["YNH_APP_MANIFEST_VERSION"] = str(app_new_version)
env_dict["YNH_APP_CURRENT_VERSION"] = str(app_current_version)
env_dict["NO_BACKUP_UPGRADE"] = no_safety_backup
# We'll check that the app didn't brutally edit some system configuration
manually_modified_files_before_install = manually_modified_files()
# Attempt to patch legacy helpers ...
_patch_legacy_helpers(extracted_app_folder)
# Apply dirty patch to make php5 apps compatible with php7
_patch_legacy_php_versions(extracted_app_folder)
# Start register change on system
related_to = [("app", app_instance_name)]
operation_logger = OperationLogger("app_upgrade", related_to, env=env_dict)
operation_logger.start()
# Execute the app upgrade script
upgrade_failed = True
try:
upgrade_retcode = hook_exec(
extracted_app_folder + "/scripts/upgrade", env=env_dict
)[0]
upgrade_failed = True if upgrade_retcode != 0 else False
if upgrade_failed:
error = m18n.n("app_upgrade_script_failed")
logger.error(
m18n.n("app_upgrade_failed", app=app_instance_name, error=error)
)
failure_message_with_debug_instructions = operation_logger.error(error)
if msettings.get("interface") != "api":
dump_app_log_extract_for_debugging(operation_logger)
# Script got manually interrupted ... N.B. : KeyboardInterrupt does not inherit from Exception
except (KeyboardInterrupt, EOFError):
upgrade_retcode = -1
error = m18n.n("operation_interrupted")
logger.error(
m18n.n("app_upgrade_failed", app=app_instance_name, error=error)
)
failure_message_with_debug_instructions = operation_logger.error(error)
# Something wrong happened in Yunohost's code (most probably hook_exec)
except Exception:
import traceback
error = m18n.n("unexpected_error", error="\n" + traceback.format_exc())
logger.error(
m18n.n("app_install_failed", app=app_instance_name, error=error)
)
failure_message_with_debug_instructions = operation_logger.error(error)
finally:
# Whatever happened (install success or failure) we check if it broke the system
# and warn the user about it
try:
broke_the_system = False
_assert_system_is_sane_for_app(manifest, "post")
except Exception as e:
broke_the_system = True
logger.error(
m18n.n("app_upgrade_failed", app=app_instance_name, error=str(e))
)
failure_message_with_debug_instructions = operation_logger.error(str(e))
# We'll check that the app didn't brutally edit some system configuration
manually_modified_files_after_install = manually_modified_files()
manually_modified_files_by_app = set(
manually_modified_files_after_install
) - set(manually_modified_files_before_install)
if manually_modified_files_by_app:
logger.error(
"Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - "
+ "\n -".join(manually_modified_files_by_app)
)
# If upgrade failed or broke the system,
# raise an error and interrupt all other pending upgrades
if upgrade_failed or broke_the_system:
# display this if there are remaining apps
if apps[number + 1 :]:
not_upgraded_apps = apps[number:]
logger.error(
m18n.n(
"app_not_upgraded",
failed_app=app_instance_name,
apps=", ".join(not_upgraded_apps),
)
)
raise YunohostError(
failure_message_with_debug_instructions, raw_msg=True
)
# Otherwise we're good and keep going !
now = int(time.time())
app_setting(app_instance_name, "update_time", now)
app_setting(
app_instance_name,
"current_revision",
manifest.get("remote", {}).get("revision", "?"),
)
# Clean hooks and add new ones
hook_remove(app_instance_name)
if "hooks" in os.listdir(extracted_app_folder):
for hook in os.listdir(extracted_app_folder + "/hooks"):
hook_add(app_instance_name, extracted_app_folder + "/hooks/" + hook)
# Replace scripts and manifest and conf (if exists)
os.system(
'rm -rf "%s/scripts" "%s/manifest.toml %s/manifest.json %s/conf"'
% (
app_setting_path,
app_setting_path,
app_setting_path,
app_setting_path,
)
)
if os.path.exists(os.path.join(extracted_app_folder, "manifest.json")):
os.system(
'mv "%s/manifest.json" "%s/scripts" %s'
% (extracted_app_folder, extracted_app_folder, app_setting_path)
)
if os.path.exists(os.path.join(extracted_app_folder, "manifest.toml")):
os.system(
'mv "%s/manifest.toml" "%s/scripts" %s'
% (extracted_app_folder, extracted_app_folder, app_setting_path)
)
for file_to_copy in [
"actions.json",
"actions.toml",
"config_panel.json",
"config_panel.toml",
"conf",
]:
if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)):
os.system(
"cp -R %s/%s %s"
% (extracted_app_folder, file_to_copy, app_setting_path)
)
# Clean and set permissions
shutil.rmtree(extracted_app_folder)
os.system("chmod 600 %s" % app_setting_path)
os.system("chmod 400 %s/settings.yml" % app_setting_path)
os.system("chown -R root: %s" % app_setting_path)
# So much win
logger.success(m18n.n("app_upgraded", app=app_instance_name))
hook_callback("post_app_upgrade", env=env_dict)
operation_logger.success()
permission_sync_to_user()
logger.success(m18n.n("upgrade_complete"))
|
21,219 |
def cli():
global from_command_line
from_command_line = True
command = " ".join(sys.argv)
change_working_directory()
logger = setup_logging()
logger.info(command)
if len(sys.argv) > 1 and sys.argv[1] not in ("src", ):
check_uid()
change_uid()
change_dir()
if is_dist_editable(bench.PROJECT_NAME) and len(sys.argv) > 1 and sys.argv[1] != "src" and not get_config(".").get("developer_mode"):
log("bench is installed in editable mode!\n\nThis is not the recommended mode of installation for production. Instead, install the package from PyPI with: `pip install frappe-bench`\n", level=3)
if not is_bench_directory() and not cmd_requires_root() and len(sys.argv) > 1 and sys.argv[1] not in ("init", "find", "src"):
log("Command not being executed in bench directory", level=3)
if len(sys.argv) > 2 and sys.argv[1] == "frappe":
return old_frappe_cli()
elif len(sys.argv) > 1:
if sys.argv[1] in get_frappe_commands() + ["--site", "--verbose", "--force", "--profile"]:
return frappe_cmd()
elif sys.argv[1] == "--help":
print(click.Context(bench_command).get_help())
print(get_frappe_help())
return
elif sys.argv[1] in get_apps():
return app_cmd()
if not (len(sys.argv) > 1 and sys.argv[1] == "src"):
atexit.register(check_latest_version)
try:
bench_command()
except BaseException as e:
traceback = bench.get_traceback()
return_code = e or (1 if traceback else 0)
if traceback and return_code in (0, 1):
log(e, level=2)
if any([x for x in ["--verbose", "-v"] if x in sys.argv]):
print(traceback)
if return_code:
logger.error("{0} executed with exit code {1}: {2}".format(command, return_code, traceback))
sys.exit(return_code)
|
def cli():
global from_command_line
from_command_line = True
command = " ".join(sys.argv)
change_working_directory()
logger = setup_logging()
logger.info(command)
if len(sys.argv) > 1 and sys.argv[1] not in ("src", ):
check_uid()
change_uid()
change_dir()
if is_dist_editable(bench.PROJECT_NAME) and len(sys.argv) > 1 and sys.argv[1] != "src" and not get_config(".").get("developer_mode"):
log("bench is installed in editable mode!\n\nThis is not the recommended mode of installation for production. Instead, install the package from PyPI with: `pip install frappe-bench`\n", level=3)
if not is_bench_directory() and not cmd_requires_root() and len(sys.argv) > 1 and sys.argv[1] not in ("init", "find", "src"):
log("Command not being executed in bench directory", level=3)
if len(sys.argv) > 2 and sys.argv[1] == "frappe":
return old_frappe_cli()
elif len(sys.argv) > 1:
if sys.argv[1] in get_frappe_commands() + ["--site", "--verbose", "--force", "--profile"]:
return frappe_cmd()
elif sys.argv[1] == "--help":
print(click.Context(bench_command).get_help())
print(get_frappe_help())
return
elif sys.argv[1] in get_apps():
return app_cmd()
if not (len(sys.argv) > 1 and sys.argv[1] == "src"):
atexit.register(check_latest_version)
try:
bench_command()
except BaseException as e:
traceback = bench.get_traceback()
return_code = e or (1 if traceback else 0)
if traceback and return_code in (0, 1):
log(e, level=2)
if any(x in sys.argv for x in ["--verbose", "-v"]):
print(traceback)
if return_code:
logger.error("{0} executed with exit code {1}: {2}".format(command, return_code, traceback))
sys.exit(return_code)
|
40,514 |
def create_target_image(location, transient_resource_group_name, source_type, source_object_name,
source_os_disk_snapshot_name, source_os_disk_snapshot_url, source_os_type,
target_resource_group_name, azure_pool_frequency, tags, target_name, target_subscription,
time_out):
random_string = get_random_string(STORAGE_ACCOUNT_NAME_LENGTH - len(location))
# create the target storage account. storage account name must be lowercase.
logger.warn(
"%s - Creating target storage account (can be slow sometimes)", location)
target_storage_account_name = location.lower() + random_string
cli_cmd = prepare_cli_command(['storage', 'account', 'create',
'--name', target_storage_account_name,
'--resource-group', transient_resource_group_name,
'--location', location,
'--sku', 'Standard_LRS'],
subscription=target_subscription)
json_output = run_cli_command(cli_cmd, return_as_json=True)
target_blob_endpoint = json_output['primaryEndpoints']['blob']
# Setup the target storage account
cli_cmd = prepare_cli_command(['storage', 'account', 'keys', 'list',
'--account-name', target_storage_account_name,
'--resource-group', transient_resource_group_name],
subscription=target_subscription)
json_output = run_cli_command(cli_cmd, return_as_json=True)
target_storage_account_key = json_output[0]['value']
logger.debug("storage account key: %s", target_storage_account_key)
expiry_format = "%Y-%m-%dT%H:%MZ"
expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=time_out)
logger.warn("create target storage sas using timeout seconds: %d", time_out)
cli_cmd = prepare_cli_command(['storage', 'account', 'generate-sas',
'--account-name', target_storage_account_name,
'--account-key', target_storage_account_key,
'--expiry', expiry.strftime(expiry_format),
'--permissions', 'aclrpuw', '--resource-types',
'sco', '--services', 'b', '--https-only'],
output_as_json=False,
subscription=target_subscription)
sas_token = run_cli_command(cli_cmd)
sas_token = sas_token.rstrip("\n\r") # STRANGE
logger.debug("sas token: %s", sas_token)
# create a container in the target blob storage account
logger.warn(
"%s - Creating container in the target storage account", location)
target_container_name = 'snapshots'
cli_cmd = prepare_cli_command(['storage', 'container', 'create',
'--name', target_container_name,
'--account-name', target_storage_account_name],
subscription=target_subscription)
run_cli_command(cli_cmd)
# Copy the snapshot to the target region using the SAS URL
blob_name = source_os_disk_snapshot_name + '.vhd'
logger.warn(
"%s - Copying blob to target storage account", location)
cli_cmd = prepare_cli_command(['storage', 'blob', 'copy', 'start',
'--source-uri', source_os_disk_snapshot_url,
'--destination-blob', blob_name,
'--destination-container', target_container_name,
'--account-name', target_storage_account_name,
'--sas-token', sas_token],
subscription=target_subscription)
run_cli_command(cli_cmd)
# Wait for the copy to complete
start_datetime = datetime.datetime.now()
wait_for_blob_copy_operation(blob_name, target_container_name, target_storage_account_name,
azure_pool_frequency, location, target_subscription)
msg = "{0} - Copy time: {1}".format(
location, datetime.datetime.now() - start_datetime)
logger.warn(msg)
# Create the snapshot in the target region from the copied blob
logger.warn(
"%s - Creating snapshot in target region from the copied blob", location)
target_blob_path = target_blob_endpoint + \
target_container_name + '/' + blob_name
target_snapshot_name = source_os_disk_snapshot_name + '-' + location
cli_cmd = prepare_cli_command(['snapshot', 'create',
'--resource-group', transient_resource_group_name,
'--name', target_snapshot_name,
'--location', location,
'--source', target_blob_path],
subscription=target_subscription)
json_output = run_cli_command(cli_cmd, return_as_json=True)
target_snapshot_id = json_output['id']
# Create the final image
logger.warn("%s - Creating final image", location)
if target_name is None:
target_image_name = source_object_name
if source_type != 'image':
target_image_name += '-image'
target_image_name += '-' + location
else:
target_image_name = target_name
cli_cmd = prepare_cli_command(['image', 'create',
'--resource-group', target_resource_group_name,
'--name', target_image_name,
'--location', location,
'--source', target_blob_path,
'--os-type', source_os_type,
'--source', target_snapshot_id],
tags=tags,
subscription=target_subscription)
run_cli_command(cli_cmd)
|
def create_target_image(location, transient_resource_group_name, source_type, source_object_name,
source_os_disk_snapshot_name, source_os_disk_snapshot_url, source_os_type,
target_resource_group_name, azure_pool_frequency, tags, target_name, target_subscription,
time_out):
random_string = get_random_string(STORAGE_ACCOUNT_NAME_LENGTH - len(location))
# create the target storage account. storage account name must be lowercase.
logger.warn(
"%s - Creating target storage account (can be slow sometimes)", location)
target_storage_account_name = location.lower() + random_string
cli_cmd = prepare_cli_command(['storage', 'account', 'create',
'--name', target_storage_account_name,
'--resource-group', transient_resource_group_name,
'--location', location,
'--sku', 'Standard_LRS'],
subscription=target_subscription)
json_output = run_cli_command(cli_cmd, return_as_json=True)
target_blob_endpoint = json_output['primaryEndpoints']['blob']
# Setup the target storage account
cli_cmd = prepare_cli_command(['storage', 'account', 'keys', 'list',
'--account-name', target_storage_account_name,
'--resource-group', transient_resource_group_name],
subscription=target_subscription)
json_output = run_cli_command(cli_cmd, return_as_json=True)
target_storage_account_key = json_output[0]['value']
logger.debug("storage account key: %s", target_storage_account_key)
expiry_format = "%Y-%m-%dT%H:%MZ"
expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=time_out)
logger.debug("create target storage sas using timeout seconds: %d", time_out)
cli_cmd = prepare_cli_command(['storage', 'account', 'generate-sas',
'--account-name', target_storage_account_name,
'--account-key', target_storage_account_key,
'--expiry', expiry.strftime(expiry_format),
'--permissions', 'aclrpuw', '--resource-types',
'sco', '--services', 'b', '--https-only'],
output_as_json=False,
subscription=target_subscription)
sas_token = run_cli_command(cli_cmd)
sas_token = sas_token.rstrip("\n\r") # STRANGE
logger.debug("sas token: %s", sas_token)
# create a container in the target blob storage account
logger.warn(
"%s - Creating container in the target storage account", location)
target_container_name = 'snapshots'
cli_cmd = prepare_cli_command(['storage', 'container', 'create',
'--name', target_container_name,
'--account-name', target_storage_account_name],
subscription=target_subscription)
run_cli_command(cli_cmd)
# Copy the snapshot to the target region using the SAS URL
blob_name = source_os_disk_snapshot_name + '.vhd'
logger.warn(
"%s - Copying blob to target storage account", location)
cli_cmd = prepare_cli_command(['storage', 'blob', 'copy', 'start',
'--source-uri', source_os_disk_snapshot_url,
'--destination-blob', blob_name,
'--destination-container', target_container_name,
'--account-name', target_storage_account_name,
'--sas-token', sas_token],
subscription=target_subscription)
run_cli_command(cli_cmd)
# Wait for the copy to complete
start_datetime = datetime.datetime.now()
wait_for_blob_copy_operation(blob_name, target_container_name, target_storage_account_name,
azure_pool_frequency, location, target_subscription)
msg = "{0} - Copy time: {1}".format(
location, datetime.datetime.now() - start_datetime)
logger.warn(msg)
# Create the snapshot in the target region from the copied blob
logger.warn(
"%s - Creating snapshot in target region from the copied blob", location)
target_blob_path = target_blob_endpoint + \
target_container_name + '/' + blob_name
target_snapshot_name = source_os_disk_snapshot_name + '-' + location
cli_cmd = prepare_cli_command(['snapshot', 'create',
'--resource-group', transient_resource_group_name,
'--name', target_snapshot_name,
'--location', location,
'--source', target_blob_path],
subscription=target_subscription)
json_output = run_cli_command(cli_cmd, return_as_json=True)
target_snapshot_id = json_output['id']
# Create the final image
logger.warn("%s - Creating final image", location)
if target_name is None:
target_image_name = source_object_name
if source_type != 'image':
target_image_name += '-image'
target_image_name += '-' + location
else:
target_image_name = target_name
cli_cmd = prepare_cli_command(['image', 'create',
'--resource-group', target_resource_group_name,
'--name', target_image_name,
'--location', location,
'--source', target_blob_path,
'--os-type', source_os_type,
'--source', target_snapshot_id],
tags=tags,
subscription=target_subscription)
run_cli_command(cli_cmd)
|
40,110 |
def load_information(file_path):
dir_checksec = Path(__file__).parent.parent
print(str(dir_checksec))
shell_skript = dir_checksec/'shell_skript/checksec'
print(str(shell_skript))
install_shell_skript = dir_checksec/'install.sh'
print(str(install_shell_skript))
if not shell_skript.exists():
execute_shell_command([str(install_shell_skript)])
json_file_information = execute_shell_command(str(shell_skript) + ' --file=' + str(file_path) + ' --format=json --extended')
dict_file_information = json.loads(json_file_information)
return dict_file_information
|
def load_information(file_path):
dir_checksec = Path(__file__).parent.parent
print(str(dir_checksec))
shell_skript = dir_checksec/'shell_skript/checksec'
print(str(shell_skript))
install_shell_skript = dir_checksec/'install.sh'
print(str(install_shell_skript))
if not shell_skript.exists():
execute_shell_command([str(install_shell_skript)])
json_file_information = execute_shell_command(str(shell_skript) + ' --file=' + str(file_path) + ' --format=json --extended')
dict_file_information = json.loads(json_file_information)
return dict_file_information[str(file_path)]
|
9,564 |
def main():
argument_spec = dict(
network_name=dict(type='str', required=True, aliases=['name']),
connected=dict(type='list', default=[], aliases=['containers'], elements='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str'),
ipam_options=dict(type='dict', default={}, removed_in_version='2.12', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
ipam_config=dict(type='list', elements='dict', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
enable_ipv6=dict(type='bool'),
internal=dict(type='bool'),
labels=dict(type='dict', default={}),
debug=dict(type='bool', default=False),
scope=dict(type='str', choices=['local', 'global', 'swarm']),
attachable=dict(type='bool'),
)
mutually_exclusive = [
('ipam_config', 'ipam_options')
]
option_minimal_versions = dict(
scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
min_docker_version='1.10.0',
min_docker_api_version='1.22',
# "The docker server >= 1.10.0"
option_minimal_versions=option_minimal_versions,
)
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
|
def main():
argument_spec = dict(
network_name=dict(type='str', required=True, aliases=['name']),
connected=dict(type='list', default=[], aliases=['containers'], elements='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str'),
ipam_options=dict(type='dict', default={}, removed_in_version='2.12', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
ipam_config=dict(type='list', elements='dict', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
enable_ipv6=dict(type='bool'),
internal=dict(type='bool'),
labels=dict(type='dict', default={}),
debug=dict(type='bool', default=False),
scope=dict(type='str', choices=['local', 'global', 'swarm']),
attachable=dict(type='bool'),
)
mutually_exclusive = [
('ipam_config', 'ipam_options')
]
option_minimal_versions = dict(
scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
labels=dict(docker_api_version='1.23'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
min_docker_version='1.10.0',
min_docker_api_version='1.22',
# "The docker server >= 1.10.0"
option_minimal_versions=option_minimal_versions,
)
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
|
30,669 |
def safe_get(dict_object, *keys, key_return_value = None):
""" Recursive safe get query, If keys found return value othewisw return None
Args:
key_return_value: Value to return when no key availble
dict_object: dictionary to query.
*keys: keys for recursive get.
Returns:
:return: Value from recursive get.
:rtype: ``obj``
"""
for key in keys:
try:
dict_object = dict_object[key]
except KeyError:
return key_return_value
return dict_object
|
def safe_get(dict_object, *keys, key_return_value = None):
""" Recursive safe get a query, If keys found return value otherwise return None
Args:
key_return_value: Value to return when no key availble
dict_object: dictionary to query.
*keys: keys for recursive get.
Returns:
:return: Value from recursive get.
:rtype: ``obj``
"""
for key in keys:
try:
dict_object = dict_object[key]
except KeyError:
return key_return_value
return dict_object
|
48,930 |
def _find_memtables(expr):
is_in_memory_table = isinstance(op := expr.op(), ops.InMemoryTable)
return lin.proceed, op if is_in_memory_table else None
|
def _find_memtables(expr):
op = expr.op()
return lin.proceed, op if isinstance(op, ops.InMemoryTable) else None
|
32,293 |
def get_conforming_url_filtering_profiles(
topology: Topology,
device_filter_string: str = None
) -> List[PanosObjectReference]:
"""
Returns a list of existing PANOS URL filtering objects that conform to best practices.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
return HygieneLookups.get_all_conforming_url_filtering_profiles(
topology,
device_filter_str=device_filter_string,
)
|
def get_conforming_url_filtering_profiles(
topology: Topology,
device_filter_string: Optional[str] = None
) -> List[PanosObjectReference]:
"""
Returns a list of existing PANOS URL filtering objects that conform to best practices.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
return HygieneLookups.get_all_conforming_url_filtering_profiles(
topology,
device_filter_str=device_filter_string,
)
|
44,924 |
def get_boto_client(
resource: str, credentials: dict = None, use_session: bool = False, **kwargs: Any
) -> "boto3.client":
"""
Utility function for loading boto3 client objects from a given set of credentials.
Args:
- resource (str): the name of the resource to retrieve a client for
- credentials (dict, optional): a dictionary of Google credentials used to initialize the Client; if
not provided, will attempt to load the Client using ambient environment settings
- use_session (bool, optional): a boolean specifying whether to load this client using a session or not;
defaults to `False`
- **kwargs (Any, optional): additional keyword arguments to pass to boto3
Returns:
- Client: an initialized and authenticated Google Client
"""
aws_access_key = None
aws_secret_access_key = None
if credentials:
aws_access_key = credentials["ACCESS_KEY"]
aws_secret_access_key = credentials["SECRET_ACCESS_KEY"]
else:
ctx_credentials = prefect.context.get("secrets", {}).get("AWS_CREDENTIALS", {})
aws_access_key = ctx_credentials.get("ACCESS_KEY")
aws_secret_access_key = ctx_credentials.get("SECRET_ACCESS_KEY")
if use_session:
# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?#multithreading-multiprocessing
session = boto3.session.Session()
return session.client(
resource,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
else:
return boto3.client(
resource,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
|
def get_boto_client(
resource: str, credentials: dict = None, use_session: bool = False, **kwargs: Any
) -> "boto3.client":
"""
Utility function for loading boto3 client objects from a given set of credentials.
Args:
- resource (str): the name of the resource to retrieve a client for
- credentials (dict, optional): a dictionary of AWS credentials used to initialize the Client; if
not provided, will attempt to load the Client using ambient environment settings
- use_session (bool, optional): a boolean specifying whether to load this client using a session or not;
defaults to `False`
- **kwargs (Any, optional): additional keyword arguments to pass to boto3
Returns:
- Client: an initialized and authenticated Google Client
"""
aws_access_key = None
aws_secret_access_key = None
if credentials:
aws_access_key = credentials["ACCESS_KEY"]
aws_secret_access_key = credentials["SECRET_ACCESS_KEY"]
else:
ctx_credentials = prefect.context.get("secrets", {}).get("AWS_CREDENTIALS", {})
aws_access_key = ctx_credentials.get("ACCESS_KEY")
aws_secret_access_key = ctx_credentials.get("SECRET_ACCESS_KEY")
if use_session:
# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?#multithreading-multiprocessing
session = boto3.session.Session()
return session.client(
resource,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
else:
return boto3.client(
resource,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
|
40,339 |
def homophily(edge_index: Adj, y: Tensor, batch: OptTensor = None,
method: str = 'edge') -> Union[float, Tensor]:
r"""The homophily of a graph characterizes how likely nodes with the same
label are near each other in a graph.
There are many measures of homophily that fits this definition.
In particular:
- In the `"Beyond Homophily in Graph Neural Networks: Current Limitations
and Effective Designs" <https://arxiv.org/abs/2006.11468>`_ paper, the
homophily is the fraction of edges in a graph which connects nodes
that have the same class label:
.. math::
\text{homophily} = \frac{| \{ (v,w) : (v,w) \in \mathcal{E} \wedge
y_v = y_w \} | } {|\mathcal{E}|}
That measure is called the *edge homophily ratio*.
- In the `"Geom-GCN: Geometric Graph Convolutional Networks"
<https://arxiv.org/abs/2002.05287>`_ paper, edge homophily is normalized
across neighborhoods:
.. math::
\text{homophily} = \frac{1}{|\mathcal{V}|} \sum_{v \in \mathcal{V}}
\frac{ | \{ (w,v) : w \in \mathcal{N}(v) \wedge y_v = y_w \} | }
{ |\mathcal{N}(v)| }
That measure is called the *node homophily ratio*.
- In the "Large-scale learning on non-homophilous graphs: \
New benchmarks and strong simple methods" paper, the class insensitive
homophily metric better captures the presence or absence of homophily:
.. math::
\text{homophily} = \frac{1}{C-1}\sum_{k=0}^{C-1}\begin{bmatrix}
{h_k - \frac{\lvert C_k \rvert}{n}}\end{bmatrix}_+,
.. math::
h_k = \frac{\sum_{u \in C_k}d_u^{(k_u)}}{\sum_{u \in C_k}d_u}
That measure is called the *class insensitive edge homophily ratio*
Args:
edge_index (Tensor or SparseTensor): The graph connectivity.
y (Tensor): The labels.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots,B-1\}}^N`, which assigns
each node to a specific example. (default: :obj:`None`)
method (str, optional): The method used to calculate the homophily,
either :obj:`"edge"` (first formula), :obj:`"node"`
(second formula) or `"edge-insensitive"`. (default: :obj:`"edge"`)
"""
assert method in ['edge', 'node', 'edge-insensitive']
y = y.squeeze(-1) if y.dim() > 1 else y
if isinstance(edge_index, SparseTensor):
col, row, _ = edge_index.coo()
else:
row, col = edge_index
if method == 'edge':
out = torch.zeros(row.size(0), device=row.device)
out[y[row] == y[col]] = 1.
if batch is None:
return float(out.mean())
else:
return scatter_mean(out, batch[col], dim=0)
elif method == 'node':
out = torch.zeros(row.size(0), device=row.device)
out[y[row] == y[col]] = 1.
out = scatter_mean(out, col, 0, dim_size=y.size(0))
if batch is None:
return float(out.mean())
else:
return scatter_mean(out, batch, dim=0)
else:
c = y.squeeze().max() + 1
nonzero_labels = y[y >= 0]
counts = nonzero_labels.unique(return_counts=True)[1]
proportions = counts.float() / nonzero_labels.shape[0]
h = homophily(edge_index, y, batch=y, method='edge')
out = 0
for k in range(c):
class_add = torch.clamp(h[k] - proportions[k], min=0)
if not torch.isnan(class_add):
out += class_add
out /= c - 1
return out
|
def homophily(edge_index: Adj, y: Tensor, batch: OptTensor = None,
method: str = 'edge') -> Union[float, Tensor]:
r"""The homophily of a graph characterizes how likely nodes with the same
label are near each other in a graph.
There are many measures of homophily that fits this definition.
In particular:
- In the `"Beyond Homophily in Graph Neural Networks: Current Limitations
and Effective Designs" <https://arxiv.org/abs/2006.11468>`_ paper, the
homophily is the fraction of edges in a graph which connects nodes
that have the same class label:
.. math::
\text{homophily} = \frac{| \{ (v,w) : (v,w) \in \mathcal{E} \wedge
y_v = y_w \} | } {|\mathcal{E}|}
That measure is called the *edge homophily ratio*.
- In the `"Geom-GCN: Geometric Graph Convolutional Networks"
<https://arxiv.org/abs/2002.05287>`_ paper, edge homophily is normalized
across neighborhoods:
.. math::
\text{homophily} = \frac{1}{|\mathcal{V}|} \sum_{v \in \mathcal{V}}
\frac{ | \{ (w,v) : w \in \mathcal{N}(v) \wedge y_v = y_w \} | }
{ |\mathcal{N}(v)| }
That measure is called the *node homophily ratio*.
- In the "Large-scale learning on non-homophilous graphs: \
New benchmarks and strong simple methods" paper, the class insensitive
homophily metric better captures the presence or absence of homophily:
.. math::
\text{homophily} = \frac{1}{C-1}\sum_{k=0}^{C-1}\begin{bmatrix}
{h_k - \frac{\lvert C_k \rvert}{n}}\end{bmatrix}_+,
.. math::
h_k = \frac{\sum_{u \in C_k}d_u^{(k_u)}}{\sum_{u \in C_k}d_u}
That measure is called the *class insensitive edge homophily ratio*.
Args:
edge_index (Tensor or SparseTensor): The graph connectivity.
y (Tensor): The labels.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots,B-1\}}^N`, which assigns
each node to a specific example. (default: :obj:`None`)
method (str, optional): The method used to calculate the homophily,
either :obj:`"edge"` (first formula), :obj:`"node"`
(second formula) or `"edge-insensitive"`. (default: :obj:`"edge"`)
"""
assert method in ['edge', 'node', 'edge-insensitive']
y = y.squeeze(-1) if y.dim() > 1 else y
if isinstance(edge_index, SparseTensor):
col, row, _ = edge_index.coo()
else:
row, col = edge_index
if method == 'edge':
out = torch.zeros(row.size(0), device=row.device)
out[y[row] == y[col]] = 1.
if batch is None:
return float(out.mean())
else:
return scatter_mean(out, batch[col], dim=0)
elif method == 'node':
out = torch.zeros(row.size(0), device=row.device)
out[y[row] == y[col]] = 1.
out = scatter_mean(out, col, 0, dim_size=y.size(0))
if batch is None:
return float(out.mean())
else:
return scatter_mean(out, batch, dim=0)
else:
c = y.squeeze().max() + 1
nonzero_labels = y[y >= 0]
counts = nonzero_labels.unique(return_counts=True)[1]
proportions = counts.float() / nonzero_labels.shape[0]
h = homophily(edge_index, y, batch=y, method='edge')
out = 0
for k in range(c):
class_add = torch.clamp(h[k] - proportions[k], min=0)
if not torch.isnan(class_add):
out += class_add
out /= c - 1
return out
|
3,184 |
def build_query_params_from_request(request, projects, environments):
query_kwargs = {
'projects': projects,
'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION),
}
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
raise ValidationError('invalid limit')
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved').strip()
if query:
try:
query_kwargs.update(parse_query(projects, query, request.user, environments))
except InvalidQuery as e:
raise ValidationError(
u'Your search query could not be parsed: {}'.format(
e.message)
)
try:
search_filters = convert_query_values(
parse_search_query(query),
projects,
request.user,
environments,
)
except InvalidSearchQuery as e:
raise ValidationError('Your search query could not be parsed: {}'.format(e.message))
query_kwargs['search_filters'] = search_filters
return query_kwargs
|
def build_query_params_from_request(request, projects, environments):
query_kwargs = {
'projects': projects,
'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION),
}
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
raise ValidationError('invalid limit')
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved').strip()
if query:
try:
query_kwargs.update(parse_query(projects, query, request.user, environments))
except InvalidQuery as e:
raise ValidationError(
u'Your search query could not be parsed: {}'.format(
e.message)
)
try:
search_filters = convert_query_values(
parse_search_query(query),
projects,
request.user,
environments,
)
except InvalidSearchQuery as e:
raise ValidationError(u'Your search query could not be parsed: {}'.format(e.message))
query_kwargs['search_filters'] = search_filters
return query_kwargs
|
32,659 |
def _put_get_user_stage_s3_regional_url(
tmpdir,
conn_cnx,
db_parameters,
number_of_files=1,
number_of_lines=1,
from_path=True,
):
try:
with conn_cnx(
user=db_parameters["user"],
account=db_parameters["account"],
password=db_parameters["password"],
role="accountadmin",
) as cnx:
cnx.cursor().execute(
"alter account set ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = true;"
)
_put_get_user_stage(
tmpdir, conn_cnx, db_parameters, number_of_files, number_of_lines, from_path
)
finally:
with conn_cnx(
user=db_parameters["user"],
account=db_parameters["account"],
password=db_parameters["password"],
role="accountadmin",
) as cnx:
cnx.cursor().execute(
"alter account set ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = false;"
)
|
def _put_get_user_stage_s3_regional_url(
tmpdir,
conn_cnx,
db_parameters,
number_of_files=1,
number_of_lines=1,
from_path=True,
):
try:
with conn_cnx(
user=db_parameters["user"],
account=db_parameters["account"],
password=db_parameters["password"],
role="accountadmin",
) as cnx:
cnx.cursor().execute(
"alter account set ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = true;"
)
_put_get_user_stage(
tmpdir, conn_cnx, db_parameters, number_of_files, number_of_lines, from_path
)
finally:
with conn_cnx(
role="accountadmin",
) as cnx:
cnx.cursor().execute(
"alter account set ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = false;"
)
|
26,972 |
def get_dag_by_file_location(dag_id: str):
"""Returns DAG of a given dag_id by looking up file location"""
from airflow.models import DagBag, DagModel
# Benefit is that logging from other dags in dagbag will not appear
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise AirflowException(
f"Dag '{dag_id}' could not be found; either it does not exist or it failed to parse."
)
dagbag = DagBag(dag_folder=dag_model.fileloc)
return dagbag.dags[dag_id]
|
def get_dag_by_file_location(dag_id: str):
"""Returns DAG of a given dag_id by looking up file location"""
from airflow.models import DagBag, DagModel
# Benefit is that logging from other dags in dagbag will not appear
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise AirflowException(
f"Dag {dag_id!r} could not be found; either it does not exist or it failed to parse."
)
dagbag = DagBag(dag_folder=dag_model.fileloc)
return dagbag.dags[dag_id]
|
11,566 |
def create_build_system(working_dir, buildsys_type=None, package=None, opts=None,
write_build_scripts=False, verbose=False,
build_args=[], child_build_args=[]):
"""Return a new build system that can build the source in working_dir."""
from rez.plugin_managers import plugin_manager
# detect build system if necessary
if not buildsys_type:
clss = get_valid_build_systems(working_dir, package=package)
if not clss:
# Special case - bez. This is an old deprecated build system,
# which expects a rezbuild.py file. Include info in error showing
# how to port to a custom build command.
#
if os.path.exists(os.path.join(working_dir, "rezbuild.py")):
msg = (
"No build system is associated with the path %s.\n"
"\n"
"There is a rezbuild.py file present, suggesting you were "
"using the deprecated bez build system. You need to use a "
"custom build command instead. You port your existing "
"rezbuild.py like so:\n"
"\n"
"Add this line to package.py:\n"
"\n"
" build_command = 'python {root}/rezbuild.py {install}'\n"
"\n"
"Add these lines to rezbuild.py:\n"
"\n"
" if __name__ == '__main__':\n"
" import os, sys\n"
" build(\n"
" source_path=os.environ['REZ_BUILD_SOURCE_PATH'],\n"
" build_path=os.environ['REZ_BUILD_PATH'],\n"
" install_path=os.environ['REZ_BUILD_INSTALL_PATH'],\n"
" targets=sys.argv[1:]\n"
" )"
)
raise BuildSystemError(msg % working_dir)
raise BuildSystemError(
"No build system is associated with the path %s" % working_dir)
if len(clss) != 1:
s = ', '.join(x.name() for x in clss)
raise BuildSystemError(("Source could be built with one of: %s; "
"Please specify a build system") % s)
buildsys_type = next(iter(clss)).name()
# create instance of build system
cls_ = plugin_manager.get_plugin_class('build_system', buildsys_type)
return cls_(working_dir,
opts=opts,
package=package,
write_build_scripts=write_build_scripts,
verbose=verbose,
build_args=build_args,
child_build_args=child_build_args)
|
def create_build_system(working_dir, buildsys_type=None, package=None, opts=None,
write_build_scripts=False, verbose=False,
build_args=[], child_build_args=[]):
"""Return a new build system that can build the source in working_dir."""
from rez.plugin_managers import plugin_manager
# detect build system if necessary
if not buildsys_type:
clss = get_valid_build_systems(working_dir, package=package)
if not clss:
# Special case - bez. This is an old deprecated build system,
# which expects a rezbuild.py file. Include info in error showing
# how to port to a custom build command.
#
if os.path.exists(os.path.join(working_dir, "rezbuild.py")):
msg = (
"No build system is associated with the path %s.\n"
"\n"
"There is a rezbuild.py file present, suggesting you were "
"using the deprecated bez build system. You need to use a "
"custom build command instead. You can port your existing "
"rezbuild.py like so:\n"
"\n"
"Add this line to package.py:\n"
"\n"
" build_command = 'python {root}/rezbuild.py {install}'\n"
"\n"
"Add these lines to rezbuild.py:\n"
"\n"
" if __name__ == '__main__':\n"
" import os, sys\n"
" build(\n"
" source_path=os.environ['REZ_BUILD_SOURCE_PATH'],\n"
" build_path=os.environ['REZ_BUILD_PATH'],\n"
" install_path=os.environ['REZ_BUILD_INSTALL_PATH'],\n"
" targets=sys.argv[1:]\n"
" )"
)
raise BuildSystemError(msg % working_dir)
raise BuildSystemError(
"No build system is associated with the path %s" % working_dir)
if len(clss) != 1:
s = ', '.join(x.name() for x in clss)
raise BuildSystemError(("Source could be built with one of: %s; "
"Please specify a build system") % s)
buildsys_type = next(iter(clss)).name()
# create instance of build system
cls_ = plugin_manager.get_plugin_class('build_system', buildsys_type)
return cls_(working_dir,
opts=opts,
package=package,
write_build_scripts=write_build_scripts,
verbose=verbose,
build_args=build_args,
child_build_args=child_build_args)
|
32,001 |
def get_indicator_list(client: MandiantClient, limit: int, first_fetch: str, indicator_type: str) -> List[Dict]:
"""
Get list of indicators from given type.
Args:
client (MandiantClient): client
limit (int): number of indicators to return.
first_fetch (str): Get indicators newer than first_fetch.
indicator_type (str): indicator type
Returns:
List[Dict]: list of indicators
"""
last_run_dict = demisto.getLastRun()
indicators_list = last_run_dict.get(indicator_type + 'List', [])
if len(indicators_list) < limit:
last_run = last_run_dict.get(indicator_type + 'Last', first_fetch)
new_indicators_list = get_new_indicators(client, last_run, indicator_type, limit)
indicators_list += new_indicators_list
if indicators_list:
new_indicators_list = indicators_list[:limit]
last_run_dict[indicator_type + 'List'] = indicators_list[limit:]
date_key = 'last_seen' if indicator_type == 'Indicators' else 'last_updated'
last_run_dict[indicator_type + 'Last'] = new_indicators_list[-1][date_key]
demisto.setLastRun(last_run_dict)
indicators_list = new_indicators_list
return indicators_list
|
def get_indicator_list(client: MandiantClient, limit: int, first_fetch: str, indicator_type: str) -> List[Dict]:
"""
Get list of indicators from given type.
Args:
client (MandiantClient): client
limit (int): number of indicators to return.
first_fetch (str): Get indicators newer than first_fetch.
indicator_type (str): indicator type
Returns:
List[Dict]: list of indicators
"""
last_run_dict = demisto.getLastRun()
indicators_list = last_run_dict.get(indicator_type + 'List', [])
if len(indicators_list) < limit:
last_run = last_run_dict.get(indicator_type + 'Last', first_fetch)
new_indicators_list = get_new_indicators(client, last_run, indicator_type, limit)
indicators_list += new_indicators_list
if indicators_list:
new_indicators_list = indicators_list[:limit]
last_run_dict[indicator_type + 'List'] = indicators_list[limit:]
date_key = 'last_seen' if indicator_type == 'Indicators' else 'last_updated'
last_run_dict[indicator_type + 'LastFetch'] = new_indicators_list[-1][date_key]
demisto.setLastRun(last_run_dict)
indicators_list = new_indicators_list
return indicators_list
|
38,394 |
def _setup_polar_coordinates(registry, axis_id):
f1, f2 = _get_coord_fields(axis_id["r"])
registry.add_field(
("index", "dr"),
sampling_type="cell",
function=f1,
display_field=False,
units="code_length",
)
registry.add_field(
("index", "r"),
sampling_type="cell",
function=f2,
display_field=False,
units="code_length",
)
f1, f2 = _get_coord_fields(axis_id["theta"], "")
registry.add_field(
("index", "dtheta"),
sampling_type="cell",
function=f1,
display_field=False,
units="",
)
registry.add_field(
("index", "theta"),
sampling_type="cell",
function=f2,
display_field=False,
units="",
)
def _path_r(field, data):
return data["index", "dr"]
registry.add_field(
("index", "path_element_r"),
sampling_type="cell",
function=_path_r,
units="code_length",
)
def _path_theta(field, data):
# Note: this already assumes cell-centered
return data["index", "r"] * data["index", "dtheta"]
registry.add_field(
("index", "path_element_theta"),
sampling_type="cell",
function=_path_theta,
units="code_length",
)
|
def _setup_polar_coordinates(registry, axis_id):
f1, f2 = _get_coord_fields(axis_id["r"])
registry.add_field(
("index", "dr"),
sampling_type="cell",
function=f1,
display_field=False,
units="code_length",
)
registry.add_field(
("index", "r"),
sampling_type="cell",
function=f2,
display_field=False,
units="code_length",
)
f1, f2 = _get_coord_fields(axis_id["theta"], "")
registry.add_field(
("index", "dtheta"),
sampling_type="cell",
function=f1,
display_field=False,
units="dimensionless",
)
registry.add_field(
("index", "theta"),
sampling_type="cell",
function=f2,
display_field=False,
units="",
)
def _path_r(field, data):
return data["index", "dr"]
registry.add_field(
("index", "path_element_r"),
sampling_type="cell",
function=_path_r,
units="code_length",
)
def _path_theta(field, data):
# Note: this already assumes cell-centered
return data["index", "r"] * data["index", "dtheta"]
registry.add_field(
("index", "path_element_theta"),
sampling_type="cell",
function=_path_theta,
units="code_length",
)
|
48,486 |
def _extract_collection_from_git(repo_url, coll_ver, b_path):
name, version, git_url, fragment = parse_scm(repo_url, coll_ver)
b_checkout_path = mkdtemp(
dir=b_path,
prefix=to_bytes(name, errors='surrogate_or_strict'),
) # type: bytes
try:
git_executable = get_bin_path('git')
except ValueError as err:
raise AnsibleError(
"Could not find git executable to extract the collection from the Git repository `{repo_url!s}`.".
format(repo_url=to_native(git_url))
) from err
# Perform a shallow clone if simply cloning HEAD
if version == 'HEAD':
git_clone_cmd = git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path)
else:
git_clone_cmd = git_executable, 'clone', git_url, to_text(b_checkout_path)
# FIXME: '--branch', version
try:
subprocess.check_call(git_clone_cmd)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError( # should probably be LookupError
'Failed to clone a Git repository from `{repo_url!s}`.'.
format(repo_url=to_native(git_url)),
),
proc_err,
)
git_switch_cmd = git_executable, 'checkout', to_text(version)
try:
subprocess.check_call(git_switch_cmd, cwd=b_checkout_path)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError( # should probably be LookupError
'Failed to switch a cloned Git repo `{repo_url!s}` '
'to the requested revision `{commitish!s}`.'.
format(
commitish=to_native(version),
repo_url=to_native(git_url),
),
),
proc_err,
)
if pathlib.Path(str(b_checkout_path) + "/.gitmodules").is_file():
git_submodule_cmd = git_executable, 'submodule', 'update', '--init', '--recursive'
try:
subprocess.check_call(git_submodule_cmd, cwd=b_checkout_path)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError(
'Failed to download submodules'
),
proc_err,
)
return (
os.path.join(b_checkout_path, to_bytes(fragment))
if fragment else b_checkout_path
)
|
def _extract_collection_from_git(repo_url, coll_ver, b_path):
name, version, git_url, fragment = parse_scm(repo_url, coll_ver)
b_checkout_path = mkdtemp(
dir=b_path,
prefix=to_bytes(name, errors='surrogate_or_strict'),
) # type: bytes
try:
git_executable = get_bin_path('git')
except ValueError as err:
raise AnsibleError(
"Could not find git executable to extract the collection from the Git repository `{repo_url!s}`.".
format(repo_url=to_native(git_url))
) from err
# Perform a shallow clone if simply cloning HEAD
if version == 'HEAD':
git_clone_cmd = git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path)
else:
git_clone_cmd = git_executable, 'clone', git_url, to_text(b_checkout_path)
# FIXME: '--branch', version
try:
subprocess.check_call(git_clone_cmd)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError( # should probably be LookupError
'Failed to clone a Git repository from `{repo_url!s}`.'.
format(repo_url=to_native(git_url)),
),
proc_err,
)
git_switch_cmd = git_executable, 'checkout', to_text(version)
try:
subprocess.check_call(git_switch_cmd, cwd=b_checkout_path)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError( # should probably be LookupError
'Failed to switch a cloned Git repo `{repo_url!s}` '
'to the requested revision `{commitish!s}`.'.
format(
commitish=to_native(version),
repo_url=to_native(git_url),
),
),
proc_err,
)
if pathlib.Path(str(b_checkout_path) + "/.gitmodules").is_file():
git_submodule_cmd = git_executable, 'submodule', 'update', '--init', '--recursive'
try:
subprocess.check_call(git_submodule_cmd, cwd=b_checkout_path)
except subprocess.CalledProcessError as proc_err:
raise AnsibleError('Failed to download submodules') from proc_err
return (
os.path.join(b_checkout_path, to_bytes(fragment))
if fragment else b_checkout_path
)
|
7,919 |
def test_cell_rotation(pincell_model_w_univ):
# Cell 1 is filled with a material so we cannot rotate it, but we can get
# its rotation matrix (which will be the identity matrix)
cell = openmc.lib.cells[1]
assert cell.get_rotation() is None
with pytest.raises(exc.GeometryError, match='not filled with'):
cell.set_rotation(np.array([180., 0., 0.]))
# Now repeat with Cell 2 and we will be allowed to do it
cell = openmc.lib.cells[2]
assert cell.get_rotation() is None
cell.set_rotation(np.array([180., 0., 0.]))
assert cell.get_rotation() == pytest.approx([180., 0., 0.])
|
def test_cell_rotation(pincell_model_w_univ):
# Cell 1 is filled with a material so we cannot rotate it, but we can get
# its rotation matrix (which will be the identity matrix)
cell = openmc.lib.cells[1]
assert cell.get_rotation() is None
with pytest.raises(exc.GeometryError, match='not filled with'):
cell.set_rotation((180., 0., 0.))
# Now repeat with Cell 2 and we will be allowed to do it
cell = openmc.lib.cells[2]
assert cell.get_rotation() is None
cell.set_rotation(np.array([180., 0., 0.]))
assert cell.get_rotation() == pytest.approx([180., 0., 0.])
|
6,099 |
def strToIntDict(inDict):
""" Because JSON will transform dict with int keys to str keys,
this utility method is just to cast it back.
This shows useful in cases when sending dict indexed on
jobID or requestID for example
:param inDict: dictionnary with strings as keys e.g. {'1': 1, '2': 2}
:returns: dictionnary with int as keys e.g. {1: 1, 2: 2}
"""
return dict((int(key), value) for key, value in inDict.iteritems())
|
def strToIntDict(inDict):
""" Because JSON will transform dict with int keys to str keys,
this utility method is just to cast it back.
This shows useful in cases when sending dict indexed on
jobID or requestID for example
:param inDict: dictionnary with strings as keys e.g. {'1': 1, '2': 2}
:returns: dictionnary with int as keys e.g. {1: 1, 2: 2}
"""
return {int(key): value for key, value in inDict.items()}
|
55,404 |
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the
software environment for model inference. Default value is ``local``,
The following values are supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model. Note that environment is only restored
in the context of the PySpark UDF; the software environment outside of
the UDF is unaffected.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
if env_manager not in ["local", "conda"]:
raise MlflowException(
f"Illegal env_manager value '{env_manager}'.", error_code=INVALID_PARAMETER_VALUE
)
if env_manager != "local" and os.name == "nt":
# TODO:
# support windows system.
# 1. Address the issue killing mlflow server process when pyspark UDF task process died
# (spark job canceled)
# 2. Addresss potential pyenv race condition issues on windows.
raise MlflowException(
f"`mlflow.pyfunc.spark_udf` env_manager value '{env_manager}' dose not support "
"windows system."
)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
conda_env_root_dir = _get_or_create_conda_env_root_dir(nfs_root_dir)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_get_or_create_model_cache_dir()
)
if env_manager == "local":
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
if env_manager == "conda":
_get_flavor_backend(
local_model_path, no_conda=False, install_mlflow=False,
conda_env_root_dir=conda_env_root_dir
).prepare_env(
model_uri=local_model_path, capture_output=True
)
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
# TODO: Support virtual env.
if env_manager == "conda":
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Create individual conda_env_root_dir for each spark UDF task process.
conda_env_root_dir_on_executor = _get_or_create_conda_env_root_dir(nfs_root_dir)
else:
local_model_path_on_executor = local_model_path
conda_env_root_dir_on_executor = conda_env_root_dir
pyfunc_backend = _get_flavor_backend(
local_model_path_on_executor, no_conda=False, workers=1, install_mlflow=False,
conda_env_root_dir=conda_env_root_dir_on_executor
)
if should_use_spark_to_broadcast_file:
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(model_uri=local_model_path_on_executor, capture_output=True)
server_port = find_free_port()
# launch scoring server
# TODO: adjust timeout for server requests handler.
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager == "local":
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
|
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the
software environment for model inference. Default value is ``local``,
The following values are supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model. Note that environment is only restored
in the context of the PySpark UDF; the software environment outside of
the UDF is unaffected.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
if env_manager not in ["local", "conda"]:
raise MlflowException(
f"Illegal env_manager value '{env_manager}'.", error_code=INVALID_PARAMETER_VALUE
)
if env_manager != "local" and os.name == "nt":
# TODO:
# support windows system.
# 1. Address the issue killing mlflow server process when pyspark UDF task process died
# (spark job canceled)
# 2. Addresss potential pyenv race condition issues on windows.
raise MlflowException(
f"`mlflow.pyfunc.spark_udf` env_manager value '{env_manager}' does not support "
"Windows systems."
)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
conda_env_root_dir = _get_or_create_conda_env_root_dir(nfs_root_dir)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_get_or_create_model_cache_dir()
)
if env_manager == "local":
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
if env_manager == "conda":
_get_flavor_backend(
local_model_path, no_conda=False, install_mlflow=False,
conda_env_root_dir=conda_env_root_dir
).prepare_env(
model_uri=local_model_path, capture_output=True
)
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
# TODO: Support virtual env.
if env_manager == "conda":
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Create individual conda_env_root_dir for each spark UDF task process.
conda_env_root_dir_on_executor = _get_or_create_conda_env_root_dir(nfs_root_dir)
else:
local_model_path_on_executor = local_model_path
conda_env_root_dir_on_executor = conda_env_root_dir
pyfunc_backend = _get_flavor_backend(
local_model_path_on_executor, no_conda=False, workers=1, install_mlflow=False,
conda_env_root_dir=conda_env_root_dir_on_executor
)
if should_use_spark_to_broadcast_file:
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(model_uri=local_model_path_on_executor, capture_output=True)
server_port = find_free_port()
# launch scoring server
# TODO: adjust timeout for server requests handler.
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager == "local":
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
|
31,365 |
def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal. If there is one private pack that has a different
content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT
be skipped.
Args:
public_index_json (dict) : The public index file.
private_index_path : Path to where the private index is located.
Returns:
(bool) True is there is at least one private pack that should be upload.
False otherwise (i.e there are no private packs that should upload)
"""
logging.debug("Checking if there are private packs to upload")
with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file:
private_index_json = json.load(private_index_file)
if was_private_pack_updated(private_index_json, public_index_json):
logging.debug(f"There is at least one private pack that was updated, upload should not be skipped")
return True
return False
|
def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal. If there is one private pack that has a different
content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT
be skipped.
Args:
public_index_json (dict) : The public index.json file.
private_index_path : Path to where the private index is located.
Returns:
(bool) True is there is at least one private pack that should be upload.
False otherwise (i.e there are no private packs that should upload)
"""
logging.debug("Checking if there are private packs to upload")
with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file:
private_index_json = json.load(private_index_file)
if was_private_pack_updated(private_index_json, public_index_json):
logging.debug(f"There is at least one private pack that was updated, upload should not be skipped")
return True
return False
|
54,371 |
def test_authenticator_azure_feed_guid_credentials(
config: Config,
mock_remote: None,
http: type[httpretty.httpretty],
with_simple_keyring: None,
dummy_keyring: DummyBackend,
):
config.merge(
{
"repositories": {
"feed": {"url": "https://foo.bar/org/_packaging/feed/pypi/simple/"},
}
}
)
dummy_keyring.set_password(
"https://foo.bar/org/_packaging/feed/pypi/simple/",
None,
SimpleCredential("foo", "bar"),
)
dummy_keyring.set_password(
"https://foo.bar/other-org/_packaging/feed/pypi/simple/",
None,
SimpleCredential("baz", "qux"),
)
authenticator = Authenticator(config, NullIO())
authenticator.request(
"get", "https://foo.bar/org/_packaging/GUID/pypi/simple/a/1.0.0/a-1.0.0.whl"
)
request = http.last_request()
basic_auth = base64.b64encode(b"foo:bar").decode()
assert request.headers["Authorization"] == f"Basic {basic_auth}"
|
def test_authenticator_azure_feed_guid_credentials(
config: Config,
mock_remote: None,
http: type[httpretty.httpretty],
with_simple_keyring: None,
dummy_keyring: DummyBackend,
):
config.merge(
{
"repositories": {
"alpha": {
"url": "https://foo.bar/org-alpha/_packaging/feed/pypi/simple/"
},
"beta": {
"url": "https://foo.bar/org-beta/_packaging/feed/pypi/simple/"
},
},
"http-basic": {
"alpha": {"username": "foo", "password": "bar"},
"beta": {"username": "baz", "password": "qux"},
},
}
)
authenticator = Authenticator(config, NullIO())
authenticator.request(
"get",
"https://foo.bar/org-alpha/_packaging/GUID/pypi/simple/a/1.0.0/a-1.0.0.whl",
)
request = http.last_request()
basic_auth = base64.b64encode(b"foo:bar").decode()
assert request.headers["Authorization"] == f"Basic {basic_auth}"
authenticator.request(
"get",
"https://foo.bar/org-beta/_packaging/GUID/pypi/simple/b/1.0.0/a-1.0.0.whl",
)
request = http.last_request()
basic_auth = base64.b64encode(b"baz:qux").decode()
assert request.headers["Authorization"] == f"Basic {basic_auth}"
assert request.headers["Authorization"] == f"Basic {basic_auth}"
|
29,722 |
def get_device_index_and_uuid(device):
"""Get both device index and UUID from device index or UUID
Parameters
----------
device: ``int``, ``bytes`` or``str``
An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID
of a CUDA (either GPU or MIG) device.
Returns
-------
out: ``dict``
Dictionary containing ``"device-index"`` and ``"uuid"`` keys.
Examples
--------
>>> get_device_index_and_uuid(0)
{'device-index': 0, 'uuid': b'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'}
>>> get_device_index_and_uuid('GPU-e1006a74-5836-264f-5c26-53d19d212dfe')
{'device-index': 0, 'uuid': b'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'}
>>> get_device_index_and_uuid('MIG-7feb6df5-eccf-5faa-ab00-9a441867e237')
{'device-index': 0, 'uuid': b'MIG-7feb6df5-eccf-5faa-ab00-9a441867e237'}
"""
init_once()
try:
device_index = int(device)
device_handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)
uuid = pynvml.nvmlDeviceGetUUID(device_handle)
except ValueError:
uuid = device if isinstance(device, bytes) else bytes(device, "utf-8")
# Validate UUID, get index and UUID as seen with `nvidia-smi -L`
uuid_handle = pynvml.nvmlDeviceGetHandleByUUID(uuid)
device_index = pynvml.nvmlDeviceGetIndex(uuid_handle)
uuid = pynvml.nvmlDeviceGetUUID(uuid_handle)
return {"device-index": device_index, "uuid": uuid}
|
def get_device_index_and_uuid(device):
"""Get both device index and UUID from device index or UUID
Parameters
----------
device : int, bytes, or str
An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID
of a CUDA (either GPU or MIG) device.
Returns
-------
out: ``dict``
Dictionary containing ``"device-index"`` and ``"uuid"`` keys.
Examples
--------
>>> get_device_index_and_uuid(0)
{'device-index': 0, 'uuid': b'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'}
>>> get_device_index_and_uuid('GPU-e1006a74-5836-264f-5c26-53d19d212dfe')
{'device-index': 0, 'uuid': b'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'}
>>> get_device_index_and_uuid('MIG-7feb6df5-eccf-5faa-ab00-9a441867e237')
{'device-index': 0, 'uuid': b'MIG-7feb6df5-eccf-5faa-ab00-9a441867e237'}
"""
init_once()
try:
device_index = int(device)
device_handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)
uuid = pynvml.nvmlDeviceGetUUID(device_handle)
except ValueError:
uuid = device if isinstance(device, bytes) else bytes(device, "utf-8")
# Validate UUID, get index and UUID as seen with `nvidia-smi -L`
uuid_handle = pynvml.nvmlDeviceGetHandleByUUID(uuid)
device_index = pynvml.nvmlDeviceGetIndex(uuid_handle)
uuid = pynvml.nvmlDeviceGetUUID(uuid_handle)
return {"device-index": device_index, "uuid": uuid}
|
10,810 |
def deprecate_moved_module(old_module, new_module, stacklevel=2):
"""Warn about a module level location move of some part of Numba's
internals. stacklevel is 3 by default as most warning locations are
from `numba.XYZ` shims.
"""
if new_module is None:
msg = _moved_no_replacement
else:
msg = _moved_msg1.format(old_module, new_module)
warnings.warn(msg, category=NumbaDeprecationWarning, stacklevel=stacklevel+1)
|
def deprecate_moved_module(old_module, new_module, stacklevel=2):
"""Warn about a module level location move of some part of Numba's
internals. stacklevel is 3 by default as most warning locations are
from `numba.XYZ` shims.
"""
if new_module is None:
msg = _moved_no_replacement
else:
msg = _moved_msg1.format(old_module, new_module)
warnings.warn(msg, category=NumbaDeprecationWarning, stacklevel=stacklevel + 1)
|
35,983 |
def create_option(name, spec):
"""Create a click option from a name and partial specs as used in transport auth_options."""
from copy import deepcopy
spec = deepcopy(spec)
name_dashed = name.replace('_', '-')
option_name = '--{}'.format(name_dashed)
existing_option = spec.pop('option', None)
if spec.pop('switch', False):
option_name = '--{name}/--no-{name}'.format(name=name_dashed)
kwargs = {}
if 'default' not in spec:
kwargs['contextual_default'] = interactive_default(
'ssh', name, also_noninteractive=spec.pop('non_interactive_default', False)
)
if 'default' in spec:
kwargs['show_default'] = True
kwargs['cls'] = InteractiveOption
kwargs.update(spec)
if existing_option:
return existing_option(**kwargs)
return click.option(option_name, **kwargs)
|
def create_option(name, spec):
"""Create a click option from a name and partial specs as used in transport auth_options."""
from copy import deepcopy
spec = deepcopy(spec)
name_dashed = name.replace('_', '-')
option_name = '--{}'.format(name_dashed)
existing_option = spec.pop('option', None)
if spec.pop('switch', False):
option_name = '--{name}/--no-{name}'.format(name=name_dashed)
kwargs = {}
if 'default' not in spec:
kwargs['contextual_default'] = interactive_default(
'ssh', name, also_noninteractive=spec.pop('non_interactive_default', False)
)
else:
kwargs['show_default'] = True
kwargs['cls'] = InteractiveOption
kwargs.update(spec)
if existing_option:
return existing_option(**kwargs)
return click.option(option_name, **kwargs)
|
39,583 |
def ptrref_from_ptrcls(
*,
schema: s_schema.Schema,
ptrcls: s_pointers.PointerLike,
direction: s_pointers.PointerDirection = (
s_pointers.PointerDirection.Outbound),
cache: Optional[Dict[PtrRefCacheKey, irast.BasePointerRef]] = None,
typeref_cache: Optional[Dict[TypeRefCacheKey, irast.TypeRef]] = None,
include_descendants: bool = False,
) -> irast.BasePointerRef:
"""Return an IR pointer descriptor for a given schema pointer.
An IR PointerRef is an object that fully describes a schema pointer for
the purposes of query compilation.
Args:
schema:
A schema instance, in which the type *t* is defined.
ptrcls:
A :class:`schema.pointers.Pointer` instance for which to
return the PointerRef.
direction:
The direction of the pointer in the path expression.
Returns:
An instance of a subclass of :class:`ir.ast.BasePointerRef`
corresponding to the given schema pointer.
"""
if cache is not None:
cached = cache.get((ptrcls, direction, include_descendants))
if cached is not None:
return cached
kwargs: Dict[str, Any] = {}
ircls: Type[irast.BasePointerRef]
source_ref: Optional[irast.TypeRef]
target_ref: Optional[irast.TypeRef]
out_source: Optional[irast.TypeRef]
if isinstance(ptrcls, irast.TupleIndirectionLink):
ircls = irast.TupleIndirectionPointerRef
elif isinstance(ptrcls, irast.TypeIntersectionLink):
ircls = irast.TypeIntersectionPointerRef
kwargs['optional'] = ptrcls.is_optional()
kwargs['is_empty'] = ptrcls.is_empty()
kwargs['is_subtype'] = ptrcls.is_subtype()
kwargs['rptr_specialization'] = ptrcls.get_rptr_specialization()
elif isinstance(ptrcls, s_pointers.Pointer):
ircls = irast.PointerRef
kwargs['id'] = ptrcls.id
name = ptrcls.get_name(schema)
kwargs['module_id'] = schema.get_global(
s_mod.Module, name.module).id
else:
raise AssertionError(f'unexpected pointer class: {ptrcls}')
target = ptrcls.get_far_endpoint(schema, direction)
if target is not None and not isinstance(target, irast.TypeRef):
assert isinstance(target, s_types.Type)
target_ref = type_to_typeref(schema, target, cache=typeref_cache)
else:
target_ref = target
source = ptrcls.get_near_endpoint(schema, direction)
source_ptr: Optional[irast.BasePointerRef]
if (isinstance(ptrcls, s_props.Property)
and isinstance(source, s_links.Link)):
source_ptr = ptrref_from_ptrcls(
ptrcls=source,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
)
source_ref = None
else:
if source is not None and not isinstance(source, irast.TypeRef):
assert isinstance(source, s_types.Type)
source_ref = type_to_typeref(schema,
source,
cache=typeref_cache)
else:
source_ref = source
source_ptr = None
if direction is s_pointers.PointerDirection.Inbound:
out_source = target_ref
out_target = source_ref
else:
out_source = source_ref
out_target = target_ref
out_cardinality, dir_cardinality = cardinality_from_ptrcls(
schema, ptrcls, direction=direction)
material_ptrcls = ptrcls.material_type(schema)
material_ptr: Optional[irast.BasePointerRef]
if material_ptrcls is not None and material_ptrcls is not ptrcls:
material_ptr = ptrref_from_ptrcls(
ptrcls=material_ptrcls,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
include_descendants=include_descendants,
)
else:
material_ptr = None
union_components: Set[irast.BasePointerRef] = set()
union_of = ptrcls.get_union_of(schema)
union_is_concrete = False
if union_of:
union_ptrs = set()
for component in union_of.objects(schema):
assert isinstance(component, s_pointers.Pointer)
material_comp = component.material_type(schema)
union_ptrs.add(material_comp)
non_overlapping, union_is_concrete = s_utils.get_non_overlapping_union(
schema,
union_ptrs,
)
union_components = {
ptrref_from_ptrcls(
ptrcls=p,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
) for p in non_overlapping
}
std_parent_name = None
for ancestor in ptrcls.get_ancestors(schema).objects(schema):
ancestor_name = ancestor.get_name(schema)
if ancestor_name.module == 'std' and ancestor.generic(schema):
std_parent_name = ancestor_name
break
is_derived = ptrcls.get_is_derived(schema)
base_ptr: Optional[irast.BasePointerRef]
if is_derived:
base_ptrcls = ptrcls.get_bases(schema).first(schema)
top = type(base_ptrcls).get_default_base_name()
if base_ptrcls.get_name(schema) != top:
base_ptr = ptrref_from_ptrcls(
ptrcls=base_ptrcls,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
)
else:
base_ptr = None
else:
base_ptr = None
if (
material_ptr is None
and include_descendants
and isinstance(ptrcls, s_pointers.Pointer)
):
descendants = frozenset(
ptrref_from_ptrcls(
ptrcls=child,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
)
for child in ptrcls.children(schema)
if not child.get_is_derived(schema)
)
else:
descendants = frozenset()
kwargs.update(dict(
out_source=out_source,
out_target=out_target,
name=ptrcls.get_name(schema),
shortname=ptrcls.get_shortname(schema),
path_id_name=ptrcls.get_path_id_name(schema),
std_parent_name=std_parent_name,
direction=direction,
source_ptr=source_ptr,
base_ptr=base_ptr,
material_ptr=material_ptr,
descendants=descendants,
is_derived=ptrcls.get_is_derived(schema),
is_computable=ptrcls.get_computable(schema),
union_components=union_components,
union_is_concrete=union_is_concrete,
has_properties=ptrcls.has_user_defined_properties(schema),
dir_cardinality=dir_cardinality,
out_cardinality=out_cardinality,
))
ptrref = ircls(**kwargs)
if cache is not None:
cache[ptrcls, direction, include_descendants] = ptrref
return ptrref
|
def ptrref_from_ptrcls(
*,
schema: s_schema.Schema,
ptrcls: s_pointers.PointerLike,
direction: s_pointers.PointerDirection = (
s_pointers.PointerDirection.Outbound),
cache: Optional[Dict[PtrRefCacheKey, irast.BasePointerRef]] = None,
typeref_cache: Optional[Dict[TypeRefCacheKey, irast.TypeRef]] = None,
include_descendants: bool = False,
) -> irast.BasePointerRef:
"""Return an IR pointer descriptor for a given schema pointer.
An IR PointerRef is an object that fully describes a schema pointer for
the purposes of query compilation.
Args:
schema:
A schema instance, in which the type *t* is defined.
ptrcls:
A :class:`schema.pointers.Pointer` instance for which to
return the PointerRef.
direction:
The direction of the pointer in the path expression.
Returns:
An instance of a subclass of :class:`ir.ast.BasePointerRef`
corresponding to the given schema pointer.
"""
if cache is not None:
cached = cache.get((ptrcls, direction, include_descendants))
if cached is not None:
return cached
kwargs: Dict[str, Any] = {}
ircls: Type[irast.BasePointerRef]
source_ref: Optional[irast.TypeRef]
target_ref: Optional[irast.TypeRef]
out_source: Optional[irast.TypeRef]
if isinstance(ptrcls, irast.TupleIndirectionLink):
ircls = irast.TupleIndirectionPointerRef
elif isinstance(ptrcls, irast.TypeIntersectionLink):
ircls = irast.TypeIntersectionPointerRef
kwargs['optional'] = ptrcls.is_optional()
kwargs['is_empty'] = ptrcls.is_empty()
kwargs['is_subtype'] = ptrcls.is_subtype()
kwargs['rptr_specialization'] = ptrcls.get_rptr_specialization()
elif isinstance(ptrcls, s_pointers.Pointer):
ircls = irast.PointerRef
kwargs['id'] = ptrcls.id
name = ptrcls.get_name(schema)
kwargs['module_id'] = schema.get_global(
s_mod.Module, name.module).id
else:
raise AssertionError(f'unexpected pointer class: {ptrcls}')
target = ptrcls.get_far_endpoint(schema, direction)
if target is not None and not isinstance(target, irast.TypeRef):
assert isinstance(target, s_types.Type)
target_ref = type_to_typeref(schema, target, cache=typeref_cache)
else:
target_ref = target
source = ptrcls.get_near_endpoint(schema, direction)
source_ptr: Optional[irast.BasePointerRef]
if (isinstance(ptrcls, s_props.Property)
and isinstance(source, s_links.Link)):
source_ptr = ptrref_from_ptrcls(
ptrcls=source,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
)
source_ref = None
else:
if source is not None and not isinstance(source, irast.TypeRef):
assert isinstance(source, s_types.Type)
source_ref = type_to_typeref(schema,
source,
cache=typeref_cache)
else:
source_ref = source
source_ptr = None
if direction is s_pointers.PointerDirection.Inbound:
out_source = target_ref
out_target = source_ref
else:
out_source = source_ref
out_target = target_ref
out_cardinality, dir_cardinality = cardinality_from_ptrcls(
schema, ptrcls, direction=direction)
material_ptrcls = ptrcls.material_type(schema)
material_ptr: Optional[irast.BasePointerRef]
if material_ptrcls is not None and material_ptrcls is not ptrcls:
material_ptr = ptrref_from_ptrcls(
ptrcls=material_ptrcls,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
include_descendants=include_descendants,
)
else:
material_ptr = None
union_components: Set[irast.BasePointerRef] = set()
union_of = ptrcls.get_union_of(schema)
union_is_concrete = False
if union_of:
union_ptrs = set()
for component in union_of.objects(schema):
assert isinstance(component, s_pointers.Pointer)
material_comp = component.material_type(schema)
union_ptrs.add(material_comp)
non_overlapping, union_is_concrete = s_utils.get_non_overlapping_union(
schema,
union_ptrs,
)
union_components = {
ptrref_from_ptrcls(
ptrcls=p,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
) for p in non_overlapping
}
std_parent_name = None
for ancestor in ptrcls.get_ancestors(schema).objects(schema):
ancestor_name = ancestor.get_name(schema)
if ancestor_name.module == 'std' and ancestor.generic(schema):
std_parent_name = ancestor_name
break
is_derived = ptrcls.get_is_derived(schema)
base_ptr: Optional[irast.BasePointerRef]
if is_derived:
base_ptrcls = ptrcls.get_bases(schema).first(schema)
top_name = type(base_ptrcls).get_default_base_name()
if base_ptrcls.get_name(schema) != top:
base_ptr = ptrref_from_ptrcls(
ptrcls=base_ptrcls,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
)
else:
base_ptr = None
else:
base_ptr = None
if (
material_ptr is None
and include_descendants
and isinstance(ptrcls, s_pointers.Pointer)
):
descendants = frozenset(
ptrref_from_ptrcls(
ptrcls=child,
direction=direction,
schema=schema,
cache=cache,
typeref_cache=typeref_cache,
)
for child in ptrcls.children(schema)
if not child.get_is_derived(schema)
)
else:
descendants = frozenset()
kwargs.update(dict(
out_source=out_source,
out_target=out_target,
name=ptrcls.get_name(schema),
shortname=ptrcls.get_shortname(schema),
path_id_name=ptrcls.get_path_id_name(schema),
std_parent_name=std_parent_name,
direction=direction,
source_ptr=source_ptr,
base_ptr=base_ptr,
material_ptr=material_ptr,
descendants=descendants,
is_derived=ptrcls.get_is_derived(schema),
is_computable=ptrcls.get_computable(schema),
union_components=union_components,
union_is_concrete=union_is_concrete,
has_properties=ptrcls.has_user_defined_properties(schema),
dir_cardinality=dir_cardinality,
out_cardinality=out_cardinality,
))
ptrref = ircls(**kwargs)
if cache is not None:
cache[ptrcls, direction, include_descendants] = ptrref
return ptrref
|
56,831 |
def parse_mobile_users(domain, user_filters, task=None, total_count=None):
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
fields_definition = CustomDataFieldsDefinition.get_or_create(
domain,
UserFieldsView.field_type
)
unrecognized_user_data_keys = set()
user_groups_length = 0
max_location_length = 0
phone_numbers_length = 0
user_dicts = []
(is_cross_domain, domains_list) = get_domains_from_user_filters(domain, user_filters)
current_user_downloaded_count = 0
for current_domain in domains_list:
location_cache = LocationIdToSiteCodeCache(current_domain)
if EnterpriseMobileWorkerSettings.is_domain_using_custom_deactivation(domain):
deactivation_triggers = dict(
(f.user_id, f.deactivate_after.strftime('%m-%Y'))
for f in DeactivateMobileWorkerTrigger.objects.filter(domain=domain)
)
else:
deactivation_triggers = {}
for n, user in enumerate(get_mobile_users_by_filters(current_domain, user_filters)):
group_memoizer = load_memoizer(current_domain)
group_names = sorted([
group_memoizer.get(id).name for id in Group.by_user_id(user.user_id, wrap=False)
], key=alphanumeric_sort_key)
user_dict = make_mobile_user_dict(
user,
group_names,
location_cache,
current_domain,
fields_definition,
deactivation_triggers,
)
user_dicts.append(user_dict)
unrecognized_user_data_keys.update(user_dict['uncategorized_data'])
user_groups_length = max(user_groups_length, len(group_names))
max_location_length = max(max_location_length, len(user_dict["location_code"]))
user_phone_numbers = [k for k in user_dict.keys() if 'phone-number' in k]
phone_numbers_length = max(phone_numbers_length, len(user_phone_numbers))
current_user_downloaded_count += 1
DownloadBase.set_progress(task, current_user_downloaded_count, total_count)
user_headers = [
'username', 'password', 'name', 'email', 'language', 'role',
'user_id', 'is_active', 'User IMEIs (read only)', 'registered_on (read only)',
'last_submission (read only)', 'last_sync (read only)',
]
user_headers.extend(json_to_headers(
{'phone-number': list(range(1, phone_numbers_length + 1))}
))
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_headers += ['user_profile']
if EnterpriseMobileWorkerSettings.is_domain_using_custom_deactivation(domain):
user_headers += ['deactivate_after']
user_data_fields = [f.slug for f in fields_definition.get_fields(include_system=False)]
user_headers.extend(build_data_headers(user_data_fields))
user_headers.extend(build_data_headers(
unrecognized_user_data_keys,
header_prefix='uncategorized_data'
))
user_headers.extend(json_to_headers(
{'group': list(range(1, user_groups_length + 1))}
))
if domain_has_privilege(domain, privileges.LOCATIONS):
user_headers.extend(json_to_headers(
{'location_code': list(range(1, max_location_length + 1))}
))
if is_cross_domain:
user_headers += ['domain']
return user_headers, get_user_rows(user_dicts, user_headers)
|
def parse_mobile_users(domain, user_filters, task=None, total_count=None):
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
fields_definition = CustomDataFieldsDefinition.get_or_create(
domain,
UserFieldsView.field_type
)
unrecognized_user_data_keys = set()
user_groups_length = 0
max_location_length = 0
phone_numbers_length = 0
user_dicts = []
(is_cross_domain, domains_list) = get_domains_from_user_filters(domain, user_filters)
current_user_downloaded_count = 0
for current_domain in domains_list:
location_cache = LocationIdToSiteCodeCache(current_domain)
if EnterpriseMobileWorkerSettings.is_domain_using_custom_deactivation(domain):
deactivation_triggers = {
f.user_id: f.deactivate_after.strftime('%m-%Y')
for f in DeactivateMobileWorkerTrigger.objects.filter(domain=domain)
}
else:
deactivation_triggers = {}
for n, user in enumerate(get_mobile_users_by_filters(current_domain, user_filters)):
group_memoizer = load_memoizer(current_domain)
group_names = sorted([
group_memoizer.get(id).name for id in Group.by_user_id(user.user_id, wrap=False)
], key=alphanumeric_sort_key)
user_dict = make_mobile_user_dict(
user,
group_names,
location_cache,
current_domain,
fields_definition,
deactivation_triggers,
)
user_dicts.append(user_dict)
unrecognized_user_data_keys.update(user_dict['uncategorized_data'])
user_groups_length = max(user_groups_length, len(group_names))
max_location_length = max(max_location_length, len(user_dict["location_code"]))
user_phone_numbers = [k for k in user_dict.keys() if 'phone-number' in k]
phone_numbers_length = max(phone_numbers_length, len(user_phone_numbers))
current_user_downloaded_count += 1
DownloadBase.set_progress(task, current_user_downloaded_count, total_count)
user_headers = [
'username', 'password', 'name', 'email', 'language', 'role',
'user_id', 'is_active', 'User IMEIs (read only)', 'registered_on (read only)',
'last_submission (read only)', 'last_sync (read only)',
]
user_headers.extend(json_to_headers(
{'phone-number': list(range(1, phone_numbers_length + 1))}
))
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_headers += ['user_profile']
if EnterpriseMobileWorkerSettings.is_domain_using_custom_deactivation(domain):
user_headers += ['deactivate_after']
user_data_fields = [f.slug for f in fields_definition.get_fields(include_system=False)]
user_headers.extend(build_data_headers(user_data_fields))
user_headers.extend(build_data_headers(
unrecognized_user_data_keys,
header_prefix='uncategorized_data'
))
user_headers.extend(json_to_headers(
{'group': list(range(1, user_groups_length + 1))}
))
if domain_has_privilege(domain, privileges.LOCATIONS):
user_headers.extend(json_to_headers(
{'location_code': list(range(1, max_location_length + 1))}
))
if is_cross_domain:
user_headers += ['domain']
return user_headers, get_user_rows(user_dicts, user_headers)
|
119 |
def main(ol_config: str, filename: str, batch_size=5000, dry_run=False):
if not dry_run:
load_config(ol_config)
date = datetime.date.today()
batch_name = "%s-%04d%02d" % ('pressbooks', date.year, date.month)
batch = Batch.find(batch_name) or Batch.new(batch_name)
with open(filename, 'rb') as f:
book_items = []
books = json.load(f)
for line_num, record in enumerate(books):
# try:
b = convert_pressbooks_to_ol(record)
book_items.append({'ia_id': b['source_records'][0], 'data': b})
# except (AssertionError, IndexError) as e:
# logger.info(f"Error: {e} from {line}")
if dry_run:
print(json.dumps(b))
# If we have enough items, submit a batch
elif not ((line_num + 1) % batch_size):
batch.add_items(book_items)
book_items = [] # clear added items
# Add any remaining book_items to batch
if not dry_run and book_items:
batch.add_items(book_items)
|
def main(ol_config: str, filename: str, batch_size=5000, dry_run=False):
if not dry_run:
load_config(ol_config)
date = datetime.date.today()
batch_name = f"pressbooks-{date:%Y}{date:%m}"
batch = Batch.find(batch_name) or Batch.new(batch_name)
with open(filename, 'rb') as f:
book_items = []
books = json.load(f)
for line_num, record in enumerate(books):
# try:
b = convert_pressbooks_to_ol(record)
book_items.append({'ia_id': b['source_records'][0], 'data': b})
# except (AssertionError, IndexError) as e:
# logger.info(f"Error: {e} from {line}")
if dry_run:
print(json.dumps(b))
# If we have enough items, submit a batch
elif not ((line_num + 1) % batch_size):
batch.add_items(book_items)
book_items = [] # clear added items
# Add any remaining book_items to batch
if not dry_run and book_items:
batch.add_items(book_items)
|
1,507 |
def plot_roc_curve(estimator, X, y, sample_weight=None,
drop_intermediate=True, response_method="auto",
name=None, ax=None, **kwargs):
"""Plot Receiver operating characteristic (ROC) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : estimator instance
Trained classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : boolean, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
Returns
-------
display : :class:`~sklearn.metrics.RocCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> from sklearn import datasets, metrics, model_selection, svm
>>> X, y = datasets.make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = model_selection.train_test_split(\
X, y, random_state=0)
>>> clf = svm.SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> metrics.plot_roc_curve(clf, X_test, y_test) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
check_matplotlib_support('plot_roc_curve')
check_is_fitted(estimator)
if response_method not in ("predict_proba", "decision_function", "auto"):
raise ValueError("response_method must be 'predict_proba', "
"'decision_function' or 'auto'")
classificaiton_error = ("{} should be a binary classifer".format(
estimator.__class__.__name__))
if is_classifier(estimator):
if len(estimator.classes_) != 2:
raise ValueError(classificaiton_error)
pos_label = estimator.classes_[1]
else:
raise ValueError(classificaiton_error)
if response_method != "auto":
prediction_method = getattr(estimator, response_method, None)
if prediction_method is None:
raise ValueError(
"response method {} is not defined".format(response_method))
else:
predict_proba = getattr(estimator, 'predict_proba', None)
decision_function = getattr(estimator, 'decision_function', None)
prediction_method = predict_proba or decision_function
if prediction_method is None:
raise ValueError('response methods not defined')
y_pred = prediction_method(X)
if y_pred.ndim != 1:
y_pred = y_pred[:, 1]
fpr, tpr, _ = roc_curve(y, y_pred, pos_label=pos_label,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate)
roc_auc = auc(fpr, tpr)
viz = RocCurveDisplay(fpr, tpr, roc_auc, estimator.__class__.__name__)
return viz.plot(ax=ax, name=name, **kwargs)
|
def plot_roc_curve(estimator, X, y, sample_weight=None,
drop_intermediate=True, response_method="auto",
name=None, ax=None, **kwargs):
"""Plot Receiver operating characteristic (ROC) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : estimator instance
Trained classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : boolean, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
Returns
-------
display : :class:`~sklearn.metrics.RocCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> from sklearn import datasets, metrics, model_selection, svm
>>> X, y = datasets.make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = model_selection.train_test_split(\
X, y, random_state=0)
>>> clf = svm.SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> metrics.plot_roc_curve(clf, X_test, y_test) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
check_matplotlib_support('plot_roc_curve')
check_is_fitted(estimator)
if response_method not in ("predict_proba", "decision_function", "auto"):
raise ValueError("response_method must be 'predict_proba', "
"'decision_function' or 'auto'")
classification_error = ("{} should be a binary classifer".format(
estimator.__class__.__name__))
if is_classifier(estimator):
if len(estimator.classes_) != 2:
raise ValueError(classificaiton_error)
pos_label = estimator.classes_[1]
else:
raise ValueError(classificaiton_error)
if response_method != "auto":
prediction_method = getattr(estimator, response_method, None)
if prediction_method is None:
raise ValueError(
"response method {} is not defined".format(response_method))
else:
predict_proba = getattr(estimator, 'predict_proba', None)
decision_function = getattr(estimator, 'decision_function', None)
prediction_method = predict_proba or decision_function
if prediction_method is None:
raise ValueError('response methods not defined')
y_pred = prediction_method(X)
if y_pred.ndim != 1:
y_pred = y_pred[:, 1]
fpr, tpr, _ = roc_curve(y, y_pred, pos_label=pos_label,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate)
roc_auc = auc(fpr, tpr)
viz = RocCurveDisplay(fpr, tpr, roc_auc, estimator.__class__.__name__)
return viz.plot(ax=ax, name=name, **kwargs)
|
17,344 |
def ones_like(other, dtype: DTypeLike = None):
"""Return a new object of ones with the same shape and
type as a given dataarray or dataset.
Parameters
----------
other : DataArray, Dataset, or Variable
The reference object in input
dtype : dtype, optional
dtype of the new array. If omitted, it defaults to other.dtype.
Returns
-------
out : same as object
New object of ones with the same shape and type as other.
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> x = xr.DataArray(np.arange(6).reshape(2, 3),
... dims=['lat', 'lon'],
... coords={'lat': [1, 2], 'lon': [0, 1, 2]})
>>> x
<xarray.DataArray (lat: 2, lon: 3)>
array([[0, 1, 2],
[3, 4, 5]])
Coordinates:
* lat (lat) int64 1 2
* lon (lon) int64 0 1 2
>>> >>> xr.ones_like(x)
<xarray.DataArray (lat: 2, lon: 3)>
array([[1, 1, 1],
[1, 1, 1]])
Coordinates:
* lat (lat) int64 1 2
* lon (lon) int64 0 1 2
See also
--------
zeros_like
full_like
"""
return full_like(other, 1, dtype)
|
def ones_like(other, dtype: DTypeLike = None):
"""Return a new object of ones with the same shape and
type as a given dataarray or dataset.
Parameters
----------
other : DataArray, Dataset, or Variable
The reference object. The output will have the same dimensions and coordinates as this object.
dtype : dtype, optional
dtype of the new array. If omitted, it defaults to other.dtype.
Returns
-------
out : same as object
New object of ones with the same shape and type as other.
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> x = xr.DataArray(np.arange(6).reshape(2, 3),
... dims=['lat', 'lon'],
... coords={'lat': [1, 2], 'lon': [0, 1, 2]})
>>> x
<xarray.DataArray (lat: 2, lon: 3)>
array([[0, 1, 2],
[3, 4, 5]])
Coordinates:
* lat (lat) int64 1 2
* lon (lon) int64 0 1 2
>>> >>> xr.ones_like(x)
<xarray.DataArray (lat: 2, lon: 3)>
array([[1, 1, 1],
[1, 1, 1]])
Coordinates:
* lat (lat) int64 1 2
* lon (lon) int64 0 1 2
See also
--------
zeros_like
full_like
"""
return full_like(other, 1, dtype)
|
26,951 |
def task_instance_link(attr):
"""Generates a URL to the Graph view for a TaskInstance."""
dag_id = attr.get('dag_id')
task_id = attr.get('task_id')
execution_date = attr.get('dag_run.execution_date') or attr.get('execution_date') or timezone.utcnow()
url = url_for('Airflow.task', dag_id=dag_id, task_id=task_id, execution_date=execution_date)
url_root = url_for(
'Airflow.graph', dag_id=dag_id, root=task_id, execution_date=execution_date.isoformat()
)
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="material-icons" style="margin-left:0;"
aria-hidden="true">filter_alt</span>
</a>
</span>
"""
).format(url=url, task_id=task_id, url_root=url_root)
|
def task_instance_link(attr):
"""Generates a URL to the Graph view for a TaskInstance."""
dag_id = attr.get('dag_id')
task_id = attr.get('task_id')
execution_date = attr.get('dag_run.execution_date') or attr.get('execution_date') or timezone.utcnow()
url = url_for('Airflow.task', dag_id=dag_id, task_id=task_id, execution_date=execution_date.isoformat())
url_root = url_for(
'Airflow.graph', dag_id=dag_id, root=task_id, execution_date=execution_date.isoformat()
)
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="material-icons" style="margin-left:0;"
aria-hidden="true">filter_alt</span>
</a>
</span>
"""
).format(url=url, task_id=task_id, url_root=url_root)
|
34,839 |
def test_resources_fingerprints_remain_after_being_cached(
tmp_path: Path,
temp_cache: LocalTrainingCache,
train_with_schema: Callable,
caplog: LogCaptureFixture,
):
train_schema = GraphSchema(
{
"train": SchemaNode(
needs={},
uses=PersistableTestComponent,
fn="train",
constructor_name="create",
config={"test_value": "4"},
is_target=True,
),
"process": SchemaNode(
needs={"resource": "train"},
uses=PersistableTestComponent,
fn="run_inference",
constructor_name="load",
config={},
is_target=True,
),
}
)
# Train and cache.
train_with_schema(train_schema, temp_cache)
# We can determine if a cached `Resource` has a static fingerprint by comparing two
# subsequent cache entries of a child node.
import sqlalchemy as sa
with temp_cache._sessionmaker.begin() as session:
# This will get the cache entry for the "process" node.
query_for_most_recently_used_entry = sa.select(temp_cache.CacheEntry).order_by(
temp_cache.CacheEntry.last_used.desc()
)
entry = session.execute(query_for_most_recently_used_entry).scalars().first()
# The fingerprint key will incorporate the fingerprint of the `Resource`
# provided by the "train" node. We save this key to compare after the next run.
fingerprint_key = entry.fingerprint_key
# Deleting the entry will force it to be recreated next train.
delete_query = sa.delete(temp_cache.CacheEntry).where(
temp_cache.CacheEntry.fingerprint_key == fingerprint_key
)
session.execute(delete_query)
# In this second train, the Resource output of "train" will be retrieved from the
# cache.
train_with_schema(train_schema, temp_cache)
with temp_cache._sessionmaker.begin() as session:
# This will get the new cache entry for the "process" node.
query_for_most_recently_used_entry = sa.select(temp_cache.CacheEntry).order_by(
temp_cache.CacheEntry.last_used.desc()
)
entry = session.execute(query_for_most_recently_used_entry).scalars().first()
# Assert the fingerptint key of the new entry is the same. This confirms that
# the Resource from the cache has the same fingerprint.
assert entry.fingerprint_key == fingerprint_key
|
def test_resources_fingerprints_remain_after_being_cached(
tmp_path: Path,
temp_cache: LocalTrainingCache,
train_with_schema: Callable,
caplog: LogCaptureFixture,
):
train_schema = GraphSchema(
{
"train": SchemaNode(
needs={},
uses=PersistableTestComponent,
fn="train",
constructor_name="create",
config={"test_value": "4"},
is_target=True,
),
"process": SchemaNode(
needs={"resource": "train"},
uses=PersistableTestComponent,
fn="run_inference",
constructor_name="load",
config={},
is_target=True,
),
}
)
# Train and cache.
train_with_schema(train_schema, temp_cache)
# We can determine if a cached `Resource` has a static fingerprint by comparing two
# subsequent cache entries of a child node.
import sqlalchemy as sa
with temp_cache._sessionmaker.begin() as session:
# This will get the cache entry for the "process" node.
query_for_most_recently_used_entry = sa.select(temp_cache.CacheEntry).order_by(
temp_cache.CacheEntry.last_used.desc()
)
entry = session.execute(query_for_most_recently_used_entry).scalars().first()
# The fingerprint key will incorporate the fingerprint of the `Resource`
# provided by the "train" node. We save this key to compare after the next run.
fingerprint_key = entry.fingerprint_key
# Deleting the entry will force it to be recreated next train.
delete_query = sa.delete(temp_cache.CacheEntry).where(
temp_cache.CacheEntry.fingerprint_key == fingerprint_key
)
session.execute(delete_query)
# In this second train, the Resource output of "train" will be retrieved from the
# cache.
train_with_schema(train_schema, temp_cache)
with temp_cache._sessionmaker.begin() as session:
# This will get the new cache entry for the "process" node.
query_for_most_recently_used_entry = sa.select(temp_cache.CacheEntry).order_by(
temp_cache.CacheEntry.last_used.desc()
)
entry = session.execute(query_for_most_recently_used_entry).scalars().first()
# Assert the fingerprint key of the new entry is the same. This confirms that
# the Resource from the cache has the same fingerprint.
assert entry.fingerprint_key == fingerprint_key
|
32,023 |
def main():
params: dict = demisto.params()
url = params.get('host', '').rstrip('/') + '/v1.0/'
tenant = params.get('tenant_id')
auth_and_token_url = params.get('auth_id', '')
enc_key = params.get('enc_key')
verify = not params.get('insecure', False)
self_deployed: bool = params.get('self_deployed', False)
redirect_uri = params.get('redirect_uri', '')
auth_code = params.get('auth_code', '')
proxy = params.get('proxy', False)
commands = {
'msgraph-user-test': test_function,
'test-module': test_function,
'msgraph-user-unblock': unblock_user_command,
'msgraph-user-terminate-session': disable_user_account_command,
'msgraph-user-account-disable': disable_user_account_command,
'msgraph-user-update': update_user_command,
'msgraph-user-change-password': change_password_user_command,
'msgraph-user-delete': delete_user_command,
'msgraph-user-create': create_user_command,
'msgraph-user-get-delta': get_delta_command,
'msgraph-user-get': get_user_command,
'msgraph-user-list': list_users_command,
'msgraph-direct-reports': get_direct_reports_command,
'msgraph-user-get-manager': get_manager_command,
'msgraph-user-assign-manager': assign_manager_command,
'msgraph-user-session-revoke': revoke_user_session_command
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client: MsGraphClient = MsGraphClient(tenant_id=tenant, auth_id=auth_and_token_url, enc_key=enc_key,
app_name=APP_NAME, base_url=url, verify=verify, proxy=proxy,
self_deployed=self_deployed, redirect_uri=redirect_uri,
auth_code=auth_code)
human_readable, entry_context, raw_response = commands[command](client, demisto.args()) # type: ignore
return_outputs(readable_output=human_readable, outputs=entry_context, raw_response=raw_response)
except Exception as err:
return_error(str(err))
|
def main():
params: dict = demisto.params()
url = params.get('host', '').rstrip('/') + '/v1.0/'
tenant = params.get('tenant_id')
auth_and_token_url = params.get('auth_id', '')
enc_key = params.get('enc_key')
verify = not params.get('insecure', False)
self_deployed: bool = params.get('self_deployed', False)
redirect_uri = params.get('redirect_uri', '')
auth_code = params.get('auth_code', '')
proxy = params.get('proxy', False)
commands = {
'msgraph-user-test': test_function,
'test-module': test_function,
'msgraph-user-unblock': unblock_user_command,
'msgraph-user-terminate-session': disable_user_account_command,
'msgraph-user-account-disable': disable_user_account_command,
'msgraph-user-update': update_user_command,
'msgraph-user-change-password': change_password_user_command,
'msgraph-user-delete': delete_user_command,
'msgraph-user-create': create_user_command,
'msgraph-user-get-delta': get_delta_command,
'msgraph-user-get': get_user_command,
'msgraph-user-list': list_users_command,
'msgraph-direct-reports': get_direct_reports_command,
'msgraph-user-get-manager': get_manager_command,
'msgraph-user-assign-manager': assign_manager_command,
'msgraph-user-session-revoke': revoke_user_session_command,
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client: MsGraphClient = MsGraphClient(tenant_id=tenant, auth_id=auth_and_token_url, enc_key=enc_key,
app_name=APP_NAME, base_url=url, verify=verify, proxy=proxy,
self_deployed=self_deployed, redirect_uri=redirect_uri,
auth_code=auth_code)
human_readable, entry_context, raw_response = commands[command](client, demisto.args()) # type: ignore
return_outputs(readable_output=human_readable, outputs=entry_context, raw_response=raw_response)
except Exception as err:
return_error(str(err))
|
171 |
def get_template(update, use_template='fedora_errata_template'):
"""
Build the update notice for a given update.
Args:
update (bodhi.server.models.Update): The update to generate a template about.
use_template (basestring): The name of the variable in bodhi.server.mail that references the
template to generate this notice with.
Returns:
list: A list of templates for the given update.
"""
from bodhi.server.models import UpdateStatus, UpdateType
use_template = read_template(use_template)
line = six.text_type('-' * 80) + '\n'
templates = []
for build in update.builds:
h = get_rpm_header(build.nvr)
info = {}
info['date'] = str(update.date_pushed)
info['name'] = h['name']
info['summary'] = h['summary']
info['version'] = h['version']
info['release'] = h['release']
info['url'] = h['url']
if update.status is UpdateStatus.testing:
info['testing'] = ' Test'
info['yum_repository'] = ' --enablerepo=updates-testing'
else:
info['testing'] = ''
info['yum_repository'] = ''
info['subject'] = u"%s%s%s Update: %s" % (
update.type is UpdateType.security and '[SECURITY] ' or '',
update.release.long_name, info['testing'], build.nvr)
info['updateid'] = update.alias
info['description'] = h['description']
info['product'] = update.release.long_name
info['notes'] = ""
if update.notes and len(update.notes):
info['notes'] = u"Update Information:\n\n%s\n" % \
'\n'.join(wrap(update.notes, width=80))
info['notes'] += line
# Add this updates referenced Bugzillas and CVEs
i = 1
info['references'] = ""
if len(update.bugs) or len(update.cves):
info['references'] = u"References:\n\n"
parent = True in [bug.parent for bug in update.bugs]
for bug in update.bugs:
# Don't show any tracker bugs for security updates
if update.type is UpdateType.security:
# If there is a parent bug, don't show trackers
if parent and not bug.parent:
log.debug("Skipping tracker bug %s" % bug)
continue
title = (bug.title != 'Unable to fetch title'
and bug.title != 'Invalid bug number'
and bug.title != 'Private bug') \
and ' - %s' % bug.title or ''
info['references'] += u" [ %d ] Bug #%d%s\n %s\n" % \
(i, bug.bug_id, title, bug.url)
i += 1
for cve in update.cves:
info['references'] += u" [ %d ] %s\n %s\n" % \
(i, cve.cve_id, cve.url)
i += 1
info['references'] += line
# Find the most recent update for this package, other than this one
try:
lastpkg = build.get_latest()
except AttributeError:
# Not all build types have the get_latest() method, such as ModuleBuilds.
lastpkg = None
# Grab the RPM header of the previous update, and generate a ChangeLog
info['changelog'] = u""
if lastpkg:
oldh = get_rpm_header(lastpkg)
oldtime = oldh['changelogtime']
text = oldh['changelogtext']
del oldh
if not text:
oldtime = 0
elif len(text) != 1:
oldtime = oldtime[0]
info['changelog'] = u"ChangeLog:\n\n%s%s" % \
(to_unicode(build.get_changelog(oldtime)), line)
try:
templates.append((info['subject'], use_template % info))
except UnicodeDecodeError:
# We can't trust the strings we get from RPM
log.debug("UnicodeDecodeError! Will try again after decoding")
for (key, value) in info.items():
if value:
info[key] = to_unicode(value)
templates.append((info['subject'], use_template % info))
return templates
|
def get_template(update, use_template='fedora_errata_template'):
"""
Build the update notice for a given update.
Args:
update (bodhi.server.models.Update): The update to generate a template about.
use_template (basestring): The name of the variable in bodhi.server.mail that references the
template to generate this notice with.
Returns:
list: A list of templates for the given update.
"""
from bodhi.server.models import UpdateStatus, UpdateType
use_template = read_template(use_template)
line = six.text_type('-' * 80) + '\n'
templates = []
for build in update.builds:
h = get_rpm_header(build.nvr)
info = {}
info['date'] = str(update.date_pushed)
info['name'] = h['name']
info['summary'] = h['summary']
info['version'] = h['version']
info['release'] = h['release']
info['url'] = h['url']
if update.status is UpdateStatus.testing:
info['testing'] = ' Test'
info['yum_repository'] = ' --enablerepo=updates-testing'
else:
info['testing'] = ''
info['yum_repository'] = ''
info['subject'] = u"%s%s%s Update: %s" % (
update.type is UpdateType.security and '[SECURITY] ' or '',
update.release.long_name, info['testing'], build.nvr)
info['updateid'] = update.alias
info['description'] = h['description']
info['product'] = update.release.long_name
info['notes'] = ""
if update.notes and len(update.notes):
info['notes'] = u"Update Information:\n\n%s\n" % \
'\n'.join(wrap(update.notes, width=80))
info['notes'] += line
# Add this updates referenced Bugzillas and CVEs
i = 1
info['references'] = ""
if len(update.bugs) or len(update.cves):
info['references'] = u"References:\n\n"
parent = True in [bug.parent for bug in update.bugs]
for bug in update.bugs:
# Don't show any tracker bugs for security updates
if update.type is UpdateType.security:
# If there is a parent bug, don't show trackers
if parent and not bug.parent:
log.debug("Skipping tracker bug %s" % bug)
continue
title = (bug.title != 'Unable to fetch title'
and bug.title != 'Invalid bug number'
and not bug.private) \
and ' - %s' % bug.title or ''
info['references'] += u" [ %d ] Bug #%d%s\n %s\n" % \
(i, bug.bug_id, title, bug.url)
i += 1
for cve in update.cves:
info['references'] += u" [ %d ] %s\n %s\n" % \
(i, cve.cve_id, cve.url)
i += 1
info['references'] += line
# Find the most recent update for this package, other than this one
try:
lastpkg = build.get_latest()
except AttributeError:
# Not all build types have the get_latest() method, such as ModuleBuilds.
lastpkg = None
# Grab the RPM header of the previous update, and generate a ChangeLog
info['changelog'] = u""
if lastpkg:
oldh = get_rpm_header(lastpkg)
oldtime = oldh['changelogtime']
text = oldh['changelogtext']
del oldh
if not text:
oldtime = 0
elif len(text) != 1:
oldtime = oldtime[0]
info['changelog'] = u"ChangeLog:\n\n%s%s" % \
(to_unicode(build.get_changelog(oldtime)), line)
try:
templates.append((info['subject'], use_template % info))
except UnicodeDecodeError:
# We can't trust the strings we get from RPM
log.debug("UnicodeDecodeError! Will try again after decoding")
for (key, value) in info.items():
if value:
info[key] = to_unicode(value)
templates.append((info['subject'], use_template % info))
return templates
|
8,382 |
def air_to_vac(wavelength, scheme='inversion', method='Griesen2006', co2=None,
precision=1e-12, maxiter=30):
"""
Converts air to vacuum wavelengths using different methods.
Parameters
----------
wavelength : `Quantity` object (number or sequence)
Air wavelengths with an astropy.unit.
scheme : str, optional
How the to convert from vacuum to air wavelengths. Options are:
'inversion' (default) - result is simply the inversion (1 / n) of the
refraction index of air. Griesen et al. (2006) report that the error
in naively inverting is less than 10^-9.
'Piskunov' - uses an analytical solution used derived by Nikolai Piskunov
and used by the Vienna Atomic Line Database (VALD).
'iteration' - uses an iterative scheme to invert the index of refraction.
method : str, optional
Only used if scheme is 'inversion' or 'iteration'. One of the methods
in refraction_index().
co2 : number, optional
Atmospheric CO2 concentration in ppm. Only used of scheme='inversion' and
method='Ciddor1996'. If not given, a default concentration of 450 ppm is used.
precision : float
Maximum fractional in refraction conversion beyond which iteration will
be stopped. Only used if scheme='iteration'.
maxiter : integer
Maximum number of iterations to run. Only used if scheme='iteration'.
Returns
-------
vac_wavelength : `Quantity` object (number or sequence)
Vacuum wavelengths with the same unit as wavelength.
"""
VALID_SCHEMES = ['inversion', 'iteration', 'piskunov']
assert isinstance(scheme, str), 'scheme must be a string'
scheme = scheme.lower()
if scheme == 'inversion':
refr = refraction_index(wavelength, method=method, co2=co2)
#return wavelength * refr
elif scheme == 'piskunov':
wlum = wavelength.to(u.angstrom).value
sigma2 = (1e4 / wlum)**2
refr = (8.336624212083e-5 + 2.408926869968e-2 / (130.1065924522 - sigma2) +
1.599740894897e-4 / (38.92568793293 - sigma2)) + 1
#return wavelength * refr
elif scheme == 'iteration':
# Refraction index is a function of vacuum wavelengths.
# Iterate to get index of refraction that gives air wavelength that
# is consistent with the reverse transformation.
counter = 0
result = wavelength.copy()
refr = refraction_index(wavelength, method=method, co2=co2)
while True:
counter += 1
diff = wavelength * refr - result
if abs(diff.max().value) < precision:
break
#return wavelength * conv
if counter > maxiter:
raise RuntimeError("Reached maximum number of iterations "
"without reaching desired precision level.")
result += diff
refr = refraction_index(result, method=method, co2=co2)
else:
raise ValueError("Method must be one of " + ", ".join(VALID_SCHEMES))
return wavelength * refr
|
def air_to_vac(wavelength, scheme='inversion', method='Griesen2006', co2=None,
precision=1e-12, maxiter=30):
"""
Converts air to vacuum wavelengths using different methods.
Parameters
----------
wavelength : `Quantity` object (number or sequence)
Air wavelengths with an astropy.unit.
scheme : str, optional
How the to convert from vacuum to air wavelengths. Options are:
'inversion' (default) - result is simply the inversion (1 / n) of the
refraction index of air. Griesen et al. (2006) report that the error
in naively inverting is less than 10^-9.
'Piskunov' - uses an analytical solution used derived by Nikolai Piskunov
and used by the Vienna Atomic Line Database (VALD).
'iteration' - uses an iterative scheme to invert the index of refraction.
method : str, optional
Only used if scheme is 'inversion' or 'iteration'. One of the methods
in refraction_index().
co2 : number, optional
Atmospheric CO2 concentration in ppm. Only used if scheme='inversion' and
method='Ciddor1996'. If not given, a default concentration of 450 ppm is used.
precision : float
Maximum fractional in refraction conversion beyond which iteration will
be stopped. Only used if scheme='iteration'.
maxiter : integer
Maximum number of iterations to run. Only used if scheme='iteration'.
Returns
-------
vac_wavelength : `Quantity` object (number or sequence)
Vacuum wavelengths with the same unit as wavelength.
"""
VALID_SCHEMES = ['inversion', 'iteration', 'piskunov']
assert isinstance(scheme, str), 'scheme must be a string'
scheme = scheme.lower()
if scheme == 'inversion':
refr = refraction_index(wavelength, method=method, co2=co2)
#return wavelength * refr
elif scheme == 'piskunov':
wlum = wavelength.to(u.angstrom).value
sigma2 = (1e4 / wlum)**2
refr = (8.336624212083e-5 + 2.408926869968e-2 / (130.1065924522 - sigma2) +
1.599740894897e-4 / (38.92568793293 - sigma2)) + 1
#return wavelength * refr
elif scheme == 'iteration':
# Refraction index is a function of vacuum wavelengths.
# Iterate to get index of refraction that gives air wavelength that
# is consistent with the reverse transformation.
counter = 0
result = wavelength.copy()
refr = refraction_index(wavelength, method=method, co2=co2)
while True:
counter += 1
diff = wavelength * refr - result
if abs(diff.max().value) < precision:
break
#return wavelength * conv
if counter > maxiter:
raise RuntimeError("Reached maximum number of iterations "
"without reaching desired precision level.")
result += diff
refr = refraction_index(result, method=method, co2=co2)
else:
raise ValueError("Method must be one of " + ", ".join(VALID_SCHEMES))
return wavelength * refr
|
34,884 |
def rewrite_annotated_ops(expr, fallback_device):
"""Rewrite the annotated program where annotation operators, e.g.
`on_deivce`, are used to denote where an expression should be scheduled to.
This pass helps heterogeneous execution where different operators may need
to be allocated on various devices.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
fallback_device : int
The fallback device type. It is also used as the default device for
operators with no annotated device.
Returns
-------
transformed_expr : tvm.relay.Expr
Transformed expression with crossing device data copy operators.
"""
return _ir_pass.RewriteDeviceAnnotation(expr, fallback_device)
|
def rewrite_annotated_ops(expr, fallback_device):
"""Rewrite the annotated program where annotation operators, e.g.
`on_deivce`, are used to denote where an expression should be scheduled to.
This pass helps heterogeneous execution where different operators may need
to be allocated on various devices.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
fallback_device : int
The fallback device type. It is also used as the default device for
operators with no annotated device.
Returns
-------
transformed_expr : tvm.relay.Expr
Transformed expression with cross device data copy operators.
"""
return _ir_pass.RewriteDeviceAnnotation(expr, fallback_device)
|
25,271 |
def multi_attribute(*attribute_paths):
"""Creates a projection that extracts the values of
one or more attribute paths.
Args:
attribute_paths (str): Extracts values from these paths, if given.
Returns:
Projection[any]: A projection that extracts the values of the given
attribute paths.
"""
return _MultiAttributeProjection(*attribute_paths)
|
def multi_attribute(*attribute_paths):
"""Creates a projection that extracts the values of
one or more attribute paths.
Args:
attribute_paths (str): Extracts values from these paths, if given.
Returns:
Projection[list]: A projection that extracts the values of the given
attribute paths.
"""
return _MultiAttributeProjection(*attribute_paths)
|
43,079 |
def to_program(prog):
"""Convert a Blackbird or an XIR program to a Strawberry Fields program.
Args:
prog (blackbird.BlackbirdProgram, xir.Program): the input program object
Returns:
Program: corresponding Strawberry Fields program
"""
if isinstance(prog, blackbird.BlackbirdProgram):
if not prog.modes:
# we can't return an empty program, since we don't know how many modes
# to initialize the Program object with.
raise ValueError("Blackbird program contains no quantum operations!")
if prog.programtype["name"] == "tdm":
return from_blackbird_to_tdm(prog)
return from_blackbird(prog)
if isinstance(prog, xir.Program):
if prog.options.get("type") == "tdm":
return from_xir_to_tdm(prog)
return from_xir(prog)
raise TypeError(f"Cannot convert '{prog.__class__}' to Strawberry Fields Program")
|
def to_program(prog):
"""Convert a Blackbird or an XIR program to a Strawberry Fields program.
Args:
prog (blackbird.BlackbirdProgram, xir.Program): the input program object
Returns:
Program: corresponding Strawberry Fields program
"""
if isinstance(prog, blackbird.BlackbirdProgram):
if not prog.modes:
# we can't return an empty program, since we don't know how many modes
# to initialize the Program object with.
raise ValueError("Blackbird program does not have any modes!")
if prog.programtype["name"] == "tdm":
return from_blackbird_to_tdm(prog)
return from_blackbird(prog)
if isinstance(prog, xir.Program):
if prog.options.get("type") == "tdm":
return from_xir_to_tdm(prog)
return from_xir(prog)
raise TypeError(f"Cannot convert '{prog.__class__}' to Strawberry Fields Program")
|
11,799 |
def lighter(image1, image2):
"""
Compares the two images, pixel by pixel, and returns a new image containing
the lighter values. Note that at least one of the images must have mode "1".
.. code-block:: python
out = max(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im))
|
def lighter(image1, image2):
"""
Compares the two images, pixel by pixel, and returns a new image containing
the lighter values. At least one of the images must have mode "1".
.. code-block:: python
out = max(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im))
|
40,279 |
def test_data():
torch_geometric.set_debug(True)
x = torch.tensor([[1, 3, 5], [2, 4, 6]], dtype=torch.float).t()
edge_index = torch.tensor([[0, 0, 1, 1, 2], [1, 1, 0, 2, 1]])
data = Data(x=x, edge_index=edge_index).to(torch.device('cpu'))
N = data.num_nodes
assert N == 3
assert data.x.tolist() == x.tolist()
assert data['x'].tolist() == x.tolist()
assert sorted(data.keys) == ['edge_index', 'x']
assert len(data) == 2
assert 'x' in data and 'edge_index' in data and 'pos' not in data
D = data.to_dict()
assert len(D) == 2
assert 'x' in D and 'edge_index' in D
D = data.to_namedtuple()
assert len(D) == 2
assert D.x is not None and D.edge_index is not None
assert data.__cat_dim__('x', data.x) == 0
assert data.__cat_dim__('edge_index', data.edge_index) == -1
assert data.__inc__('x', data.x) == 0
assert data.__inc__('edge_index', data.edge_index) == data.num_nodes
assert not data.x.is_contiguous()
data.contiguous()
assert data.x.is_contiguous()
assert not data.is_coalesced()
data = data.coalesce()
assert data.is_coalesced()
clone = data.clone()
assert clone != data
assert len(clone) == len(data)
assert clone.x.tolist() == data.x.tolist()
assert clone.edge_index.tolist() == data.edge_index.tolist()
# test to_heterogenous
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert torch.allclose(data.edge_index,
hetero_data.edge_stores[0]['edge_index'])
data.edge_type = torch.tensor([0, 0, 1, 0])
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert [3, 1] == [i.edge_index.size(1) for i in hetero_data.edge_stores]
data.edge_type = None
data['x'] = x + 1
assert data.x.tolist() == (x + 1).tolist()
assert str(data) == 'Data(x=[3, 2], edge_index=[2, 4])'
dictionary = {'x': data.x, 'edge_index': data.edge_index}
data = Data.from_dict(dictionary)
assert sorted(data.keys) == ['edge_index', 'x']
assert not data.has_isolated_nodes()
assert not data.has_self_loops()
assert data.is_undirected()
assert not data.is_directed()
assert data.num_nodes == 3
assert data.num_edges == 4
assert data.num_faces is None
assert data.num_node_features == 2
assert data.num_features == 2
data.edge_attr = torch.randn(data.num_edges, 2)
assert data.num_edge_features == 2
data.edge_attr = None
data.x = None
assert data.num_nodes == 3
data.edge_index = None
assert data.num_nodes is None
assert data.num_edges == 0
data.num_nodes = 4
assert data.num_nodes == 4
data = Data(x=x, attribute=x)
assert len(data) == 2
assert data.x.tolist() == x.tolist()
assert data.attribute.tolist() == x.tolist()
face = torch.tensor([[0, 1], [1, 2], [2, 3]])
data = Data(num_nodes=4, face=face)
assert data.num_faces == 2
assert data.num_nodes == 4
data = Data(title='test')
assert str(data) == "Data(title='test')"
assert data.num_node_features == 0
assert data.num_edge_features == 0
torch_geometric.set_debug(False)
|
def test_data():
torch_geometric.set_debug(True)
x = torch.tensor([[1, 3, 5], [2, 4, 6]], dtype=torch.float).t()
edge_index = torch.tensor([[0, 0, 1, 1, 2], [1, 1, 0, 2, 1]])
data = Data(x=x, edge_index=edge_index).to(torch.device('cpu'))
N = data.num_nodes
assert N == 3
assert data.x.tolist() == x.tolist()
assert data['x'].tolist() == x.tolist()
assert sorted(data.keys) == ['edge_index', 'x']
assert len(data) == 2
assert 'x' in data and 'edge_index' in data and 'pos' not in data
D = data.to_dict()
assert len(D) == 2
assert 'x' in D and 'edge_index' in D
D = data.to_namedtuple()
assert len(D) == 2
assert D.x is not None and D.edge_index is not None
assert data.__cat_dim__('x', data.x) == 0
assert data.__cat_dim__('edge_index', data.edge_index) == -1
assert data.__inc__('x', data.x) == 0
assert data.__inc__('edge_index', data.edge_index) == data.num_nodes
assert not data.x.is_contiguous()
data.contiguous()
assert data.x.is_contiguous()
assert not data.is_coalesced()
data = data.coalesce()
assert data.is_coalesced()
clone = data.clone()
assert clone != data
assert len(clone) == len(data)
assert clone.x.tolist() == data.x.tolist()
assert clone.edge_index.tolist() == data.edge_index.tolist()
# test to_heterogenous
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert torch.allclose(data.edge_index,
hetero_data.edge_stores[0]['edge_index'])
data.edge_type = torch.tensor([0, 0, 1, 0])
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data['0'].x)
assert [3, 1] == [i.edge_index.size(1) for i in hetero_data.edge_stores]
data.edge_type = None
data['x'] = x + 1
assert data.x.tolist() == (x + 1).tolist()
assert str(data) == 'Data(x=[3, 2], edge_index=[2, 4])'
dictionary = {'x': data.x, 'edge_index': data.edge_index}
data = Data.from_dict(dictionary)
assert sorted(data.keys) == ['edge_index', 'x']
assert not data.has_isolated_nodes()
assert not data.has_self_loops()
assert data.is_undirected()
assert not data.is_directed()
assert data.num_nodes == 3
assert data.num_edges == 4
assert data.num_faces is None
assert data.num_node_features == 2
assert data.num_features == 2
data.edge_attr = torch.randn(data.num_edges, 2)
assert data.num_edge_features == 2
data.edge_attr = None
data.x = None
assert data.num_nodes == 3
data.edge_index = None
assert data.num_nodes is None
assert data.num_edges == 0
data.num_nodes = 4
assert data.num_nodes == 4
data = Data(x=x, attribute=x)
assert len(data) == 2
assert data.x.tolist() == x.tolist()
assert data.attribute.tolist() == x.tolist()
face = torch.tensor([[0, 1], [1, 2], [2, 3]])
data = Data(num_nodes=4, face=face)
assert data.num_faces == 2
assert data.num_nodes == 4
data = Data(title='test')
assert str(data) == "Data(title='test')"
assert data.num_node_features == 0
assert data.num_edge_features == 0
torch_geometric.set_debug(False)
|
34,843 |
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"{backup_location_str} '{backup_location}' already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
58,756 |
def create(target):
"""Get a target given target string.
Parameters
----------
target : str or dict
The target string or configuration dictionary.
When using a dictionary to configure target, the
possible values are:
{
kind : str (required)
Which codegen path to use, for example 'llvm' or 'cuda'.
keys : List of str (optional)
A set of strategies that can be dispatched to. When using
"kind=opencl" for example, one could set keys to ["mali", "opencl", "gpu"].
device : str (optional)
A single key that corresponds to the actual device being run on.
This will be effectively appended to the keys.
libs : List of str (optional)
The set of external libraries to use. For example ['cblas', 'mkl'].
system-lib : bool (optional)
If True, build a module that contains self registered functions.
Useful for environments where dynamic loading like dlopen is banned.
mcpu : str (optional)
The specific cpu being run on. Serves only as an annotation.
model : str (optional)
An annotation indicating what model a workload came from.
runtime : str (optional)
An annotation indicating which runtime to use with a workload.
mtriple : str (optional)
The llvm triplet describing the target, for example "arm64-linux-android".
mattr : List of str (optional)
The llvm features to compile with, for example ["+avx512f", "+mmx"].
mfloat-abi : str (optional)
An llvm setting that is one of 'hard' or 'soft' indicating whether to use
hardware or software floating-point operations.
}
Returns
-------
target : Target
The target object
Note
----
See the note on :py:mod:`tvm.target` on target string format.
"""
if isinstance(target, Target):
return target_str
if isinstance(target, dict):
return _ffi_api.TargetFromConfig(target)
if isinstance(target, str):
return _ffi_api.TargetFromString(target)
raise ValueError("target has to be a string or dictionary.")
|
def create(target):
"""Get a target given target string.
Parameters
----------
target : str or dict
The target string or configuration dictionary.
When using a dictionary to configure target, the
possible values are:
{
kind : str (required)
Which codegen path to use, for example 'llvm' or 'cuda'.
keys : List of str (optional)
A set of strategies that can be dispatched to. When using
"kind=opencl" for example, one could set keys to ["mali", "opencl", "gpu"].
device : str (optional)
A single key that corresponds to the actual device being run on.
This will be effectively appended to the keys.
libs : List of str (optional)
The set of external libraries to use. For example ['cblas', 'mkl'].
system-lib : bool (optional)
If True, build a module that contains self registered functions.
Useful for environments where dynamic loading like dlopen is banned.
mcpu : str (optional)
The specific cpu being run on. Serves only as an annotation.
model : str (optional)
An annotation indicating what model a workload came from.
runtime : str (optional)
An annotation indicating which runtime to use with a workload.
mtriple : str (optional)
The llvm triplet describing the target, for example "arm64-linux-android".
mattr : List of str (optional)
The llvm features to compile with, for example ["+avx512f", "+mmx"].
mfloat-abi : str (optional)
An llvm setting that is one of 'hard' or 'soft' indicating whether to use
hardware or software floating-point operations.
}
Returns
-------
target : Target
The target object
Note
----
See the note on :py:mod:`tvm.target` on target string format.
"""
if isinstance(target, Target):
return target_str
if isinstance(target, dict):
return _ffi_api.TargetFromConfig(target)
if isinstance(target, str):
if target.startswith("{"):
return _ffi_api.TargetFromConfig(json.loads(target))
else:
return _ffi_api.TargetFromString(target)
raise ValueError("target has to be a string or dictionary.")
|
31,716 |
def circleci_trigger_workflow_command(client: Client, args: Dict[str, Any]) -> CommandResults:
vc_type, organization, project, _ = get_common_arguments(client, args)
parameters_json: str = args.get('parameters', '')
try:
parameters = json.loads(parameters_json)
except ValueError:
raise DemistoException("Failed to parse parameters argument")
response_json = client.trigger_workflow(vc_type, organization, project, parameters)
response = json.loads(response_json)
return CommandResults(
outputs_prefix='CircleCI.WorkflowTrigger',
outputs_key_field='id',
readable_output=f"CircleCI Workflow created successfully, ID={response.get('number')}",
outputs=response
)
|
def circleci_trigger_workflow_command(client: Client, args: Dict[str, Any]) -> CommandResults:
vc_type, organization, project, _ = get_common_arguments(client, args)
parameters_json: str = args.get('parameters', '')
try:
parameters = json.loads(parameters_json)
except ValueError:
raise DemistoException("Failed to parse the 'parameters' argument.")
response_json = client.trigger_workflow(vc_type, organization, project, parameters)
response = json.loads(response_json)
return CommandResults(
outputs_prefix='CircleCI.WorkflowTrigger',
outputs_key_field='id',
readable_output=f"CircleCI Workflow created successfully, ID={response.get('number')}",
outputs=response
)
|
19,629 |
def record_prefix_files(m, files_with_prefix):
filtered = []
if not files_with_prefix:
return filtered
# Copies are made to ease debugging. Sorry.
binary_has_prefix_files = m.binary_has_prefix_files()[:]
text_has_prefix_files = m.has_prefix_files()[:]
# We need to cache these as otherwise the fact we remove from this in a for loop later
# that also checks it has elements.
len_binary_has_prefix_files = len(binary_has_prefix_files)
len_text_has_prefix_files = len(text_has_prefix_files)
if files_with_prefix:
if utils.on_win:
# Paths on Windows can contain spaces, so we need to quote the
# paths. Fortunately they can't contain quotes, so we don't have
# to worry about nested quotes.
fmt_str = '"%s" %s "%s"\n'
else:
# Don't do it everywhere because paths on Unix can contain quotes,
# and we don't have a good method of escaping, and because older
# versions of conda don't support quotes in has_prefix
fmt_str = '%s %s %s\n'
print("Files containing CONDA_PREFIX")
print("-----------------------------")
detect_binary_files_with_prefix = m.get_value('build/detect_binary_files_with_prefix',
not len_binary_has_prefix_files and not utils.on_win)
with open(join(m.config.info_dir, 'has_prefix'), 'w') as fo:
for pfix, mode, fn in files_with_prefix:
ignored_because = None
if (fn in binary_has_prefix_files or ((not len_binary_has_prefix_files or
detect_binary_files_with_prefix) and mode == 'binary')):
if fn in binary_has_prefix_files:
if mode != 'binary':
mode = 'binary'
elif fn in binary_has_prefix_files and detect_binary_files_with_prefix:
print("File {} force-identified as 'binary', "
"But it is 'binary' anyway, suggest removing it from "
"`build/binary_has_prefix_files`".format(fn))
if fn in binary_has_prefix_files:
binary_has_prefix_files.remove(fn)
elif (fn in text_has_prefix_files or (not len_text_has_prefix_files and mode == 'text') or
os.path.dirname(fn) == 'python-scripts'):
if mode != 'text':
mode = 'text'
elif fn in text_has_prefix_files and not len_text_has_prefix_files:
print("File {} force-identified as 'text', "
"But it is 'text' anyway, suggest removing it from "
"`build/has_prefix_files`".format(fn))
if fn in text_has_prefix_files:
text_has_prefix_files.remove(fn)
else:
ignored_because = " (not in build/%s_has_prefix_files)" % (mode)
print("{fn} ({mode}): {action}{reason}".format(fn=fn, mode=mode,
action="Ignoring" if ignored_because else "Patching",
reason=ignored_because if ignored_because else ""))
if ignored_because is None:
fo.write(fmt_str % (pfix, mode, fn))
filtered.append((pfix, mode, fn))
# make sure we found all of the files expected
errstr = ""
for f in text_has_prefix_files:
errstr += "Did not detect hard-coded path in %s from has_prefix_files\n" % f
for f in binary_has_prefix_files:
errstr += "Did not detect hard-coded path in %s from binary_has_prefix_files\n" % f
if errstr:
raise RuntimeError(errstr)
return filtered
|
def record_prefix_files(m, files_with_prefix):
filtered = []
if not files_with_prefix:
return filtered
# Copies are made to ease debugging. Sorry.
binary_has_prefix_files = m.binary_has_prefix_files()[:]
text_has_prefix_files = m.has_prefix_files()[:]
# We need to cache these as otherwise the fact we remove from this in a for loop later
# that also checks it has elements.
len_binary_has_prefix_files = len(binary_has_prefix_files)
len_text_has_prefix_files = len(text_has_prefix_files)
if files_with_prefix:
if utils.on_win:
# Paths on Windows can contain spaces, so we need to quote the
# paths. Fortunately they can't contain quotes, so we don't have
# to worry about nested quotes.
fmt_str = '"%s" %s "%s"\n'
else:
# Don't do it everywhere because paths on Unix can contain quotes,
# and we don't have a good method of escaping, and because older
# versions of conda don't support quotes in has_prefix
fmt_str = '%s %s %s\n'
print("Files containing CONDA_PREFIX")
print("-----------------------------")
detect_binary_files_with_prefix = m.get_value('build/detect_binary_files_with_prefix',
len_binary_has_prefix_files and not utils.on_win)
with open(join(m.config.info_dir, 'has_prefix'), 'w') as fo:
for pfix, mode, fn in files_with_prefix:
ignored_because = None
if (fn in binary_has_prefix_files or ((not len_binary_has_prefix_files or
detect_binary_files_with_prefix) and mode == 'binary')):
if fn in binary_has_prefix_files:
if mode != 'binary':
mode = 'binary'
elif fn in binary_has_prefix_files and detect_binary_files_with_prefix:
print("File {} force-identified as 'binary', "
"But it is 'binary' anyway, suggest removing it from "
"`build/binary_has_prefix_files`".format(fn))
if fn in binary_has_prefix_files:
binary_has_prefix_files.remove(fn)
elif (fn in text_has_prefix_files or (not len_text_has_prefix_files and mode == 'text') or
os.path.dirname(fn) == 'python-scripts'):
if mode != 'text':
mode = 'text'
elif fn in text_has_prefix_files and not len_text_has_prefix_files:
print("File {} force-identified as 'text', "
"But it is 'text' anyway, suggest removing it from "
"`build/has_prefix_files`".format(fn))
if fn in text_has_prefix_files:
text_has_prefix_files.remove(fn)
else:
ignored_because = " (not in build/%s_has_prefix_files)" % (mode)
print("{fn} ({mode}): {action}{reason}".format(fn=fn, mode=mode,
action="Ignoring" if ignored_because else "Patching",
reason=ignored_because if ignored_because else ""))
if ignored_because is None:
fo.write(fmt_str % (pfix, mode, fn))
filtered.append((pfix, mode, fn))
# make sure we found all of the files expected
errstr = ""
for f in text_has_prefix_files:
errstr += "Did not detect hard-coded path in %s from has_prefix_files\n" % f
for f in binary_has_prefix_files:
errstr += "Did not detect hard-coded path in %s from binary_has_prefix_files\n" % f
if errstr:
raise RuntimeError(errstr)
return filtered
|
27,445 |
def test_observer_long_lat_el():
"""Test that astropy.EarthLocation conversion to longtitude,
lattitude, and elevation is working correctly in Observer,
and that Observer.location is of type EarthLocation.
"""
obs = Observer.at_site('Subaru')
assert isinstance(obs.location, EarthLocation)
lon, lat, el = obs.location.to_geodetic()[:3]
assert obs.longitude == lon
assert obs.lattitude == lat
assert obs.elevation == el
|
def test_observer_long_lat_el():
"""Test that astropy.EarthLocation conversion to longtitude,
lattitude, and elevation is working correctly in Observer,
and that Observer.location is of type EarthLocation.
"""
obs = Observer.at_site('Subaru')
assert isinstance(obs.location, EarthLocation)
lon, lat, el = obs.location.to_geodetic()[:3]
assert obs.longitude == lon
assert obs.latitude == lat
assert obs.elevation == el
|
49,131 |
def compute_edge_weights(edge_ids, edge_probabilities, beta, threshold):
"""
Convert edge probabilities to energies for the multicut problem.
edge_ids:
The list of edges in the graph. shape=(N, 2)
edge_probabilities:
1-D, float (1.0 means edge is CUT, disconnecting the two SPs)
beta:
scalar (float)
threshold:
scalar (float), moves the 0 of the edge weights (default threshold = 0.5)
Special behavior:
If any node has ID 0, all of it's edges will be given an
artificially low energy, to prevent it from merging with its
neighbors, regardless of what the edge_probabilities say.
"""
def rescale(probabilities, threshold):
"""
Given a threshold in the range (0,1), rescales the probabilities below and above
the threshold to the ranges (0,0.5], and (0.5,1) respectively. This is needed
to implement an effective 'moving' of the 0 weight, since the multicut algorithm
implicitly calculates that weights change sign at p=0.5.
:param probabilities: 1d array (float). Probability data within range (0,1)
:param threshold: scalar (float). The new threshold for the algorithm.
:return: Rescaled data to be used in algorithm.
"""
out = np.zeros_like(probabilities)
data_lower = probabilities[probabilities <= threshold]
data_upper = probabilities[probabilities > threshold]
data_lower = (data_lower / threshold) * 0.5
data_upper = (((data_upper - threshold) / (1 - threshold)) * 0.5) + 0.5
out[probabilities <= threshold] = data_lower
out[probabilities > threshold] = data_upper
return out
p1 = edge_probabilities # P(Edge=CUT)
p1 = np.clip(p1, 0.001, 0.999)
p1 = rescale(p1, threshold)
p0 = 1.0 - p1 # P(Edge=NOT CUT)
edge_weights = np.log(p0 / p1) + np.log((1 - beta) / beta)
# See note special behavior, above
edges_touching_zero = edge_ids[:, 0] == 0
if edges_touching_zero.any():
logger.warning("Volume contains label 0, which will be excluded from the segmentation.")
MINIMUM_ENERGY = -1000.0
edge_weights[edges_touching_zero] = MINIMUM_ENERGY
return edge_weights
|
def compute_edge_weights(edge_ids, edge_probabilities, beta, threshold):
"""
Convert edge probabilities to energies for the multicut problem.
edge_ids:
The list of edges in the graph. shape=(N, 2)
edge_probabilities:
1-D, float (1.0 means edge is CUT, disconnecting the two SPs)
beta:
scalar (float)
threshold:
scalar (float), moves the 0 of the edge weights (default threshold = 0.5)
Special behavior:
If any node has ID 0, all of it's edges will be given an
artificially low energy, to prevent it from merging with its
neighbors, regardless of what the edge_probabilities say.
"""
def rescale(probabilities, threshold):
"""
Given a threshold in the range (0,1), rescales the probabilities below and above
the threshold to the ranges (0,0.5], and (0.5,1) respectively. This is needed
to implement an effective 'moving' of the 0 weight, since the multicut algorithm
implicitly calculates that weights change sign at p=0.5.
:param probabilities: 1d array (float). Probability data within range (0,1)
:param threshold: scalar (float). The new threshold for the algorithm.
:return: Rescaled data to be used in algorithm.
"""
out = np.zeros_like(probabilities)
data_lower = probabilities[probabilities <= threshold]
data_upper = probabilities[probabilities > threshold]
data_lower = (data_lower / threshold) * 0.5
data_upper = (((data_upper - threshold) / (1 - threshold)) * 0.5) + 0.5
out[probabilities <= threshold] = data_lower
out[probabilities > threshold] = data_upper
return out
# P(Edge=CUT), clipped to avoid log(0).
p1 = np.clip(edge_probabilities, 0.001, 0.999)
p1 = rescale(p1, threshold)
p0 = 1.0 - p1 # P(Edge=NOT CUT)
edge_weights = np.log(p0 / p1) + np.log((1 - beta) / beta)
# See note special behavior, above
edges_touching_zero = edge_ids[:, 0] == 0
if edges_touching_zero.any():
logger.warning("Volume contains label 0, which will be excluded from the segmentation.")
MINIMUM_ENERGY = -1000.0
edge_weights[edges_touching_zero] = MINIMUM_ENERGY
return edge_weights
|
48,580 |
def test_distance(camera):
focal_point = np.random.random(3)
position = np.random.random(3)
camera.position = position
camera.focal_point = focal_point
assert np.isclose(camera.distance, np.linalg.norm(focal_point - position, ord=2), rtol=1e-8)
distance = np.random.random()
camera.distance = distance
assert np.isclose(camera.distance, distance, atol=1e-4)
|
def test_distance(camera):
focal_point = np.random.random(3)
position = np.random.random(3)
camera.position = position
camera.focal_point = focal_point
assert np.isclose(camera.distance, np.linalg.norm(focal_point - position, ord=2), rtol=1e-8)
distance = np.random.random()
camera.distance = distance
assert np.isclose(camera.distance, distance, atol=0.002)
# large absolute tolerance because of
|
32,726 |
def test_cursor_attribute():
fake_conn = FakeConnection()
cursor = SnowflakeCursor(fake_conn)
assert not cursor.lastrowid
|
def test_cursor_attribute():
fake_conn = FakeConnection()
cursor = SnowflakeCursor(fake_conn)
assert cursor.lastrowid is None
|
29,685 |
def benchmark_memory(
sizes=["2 kiB", "10 kiB", "100 kiB", "1 MiB", "10 MiB"],
duration=0.2,
) -> dict:
duration = parse_timedelta(duration)
out = {}
for size_str in sizes:
size = parse_bytes(size_str)
data = os.urandom(size)
start = time()
total = 0
while time() < start + duration:
data[:-1]
total += size
out[size_str] = total / (time() - start)
return out
|
def benchmark_memory(
sizes=["2 kiB", "10 kiB", "100 kiB", "1 MiB", "10 MiB"],
duration=0.2,
) -> dict:
duration = parse_timedelta(duration)
out = {}
for size_str in sizes:
size = parse_bytes(size_str)
data = os.urandom(size)
start = time()
total = 0
while time() < start + duration:
data[:]
total += size
out[size_str] = total / (time() - start)
return out
|
58,705 |
def evaluate_intents(
intent_results: List[IntentEvaluationResult],
output_directory: Optional[Text],
successes: bool,
errors: bool,
disable_plotting: bool,
) -> Dict: # pragma: no cover
"""Creates summary statistics for intents.
Only considers those examples with a set intent. Others are filtered out.
Returns a dictionary of containing the evaluation result.
Args:
intent_results: intent evaluation results
output_directory: directory to store files to
successes: if True correct predictions are written to disk
errors: if True incorrect predictions are written to disk
disable_plotting: if True no plots are created
Returns: dictionary with evaluation results
"""
intent_report = create_intent_report(
intent_results=intent_results,
add_confused_labels_to_report=output_directory is not None,
metrics_as_dict=output_directory is not None,
)
if output_directory:
report_filename = os.path.join(output_directory, "intent_report.json")
rasa.shared.utils.io.dump_obj_as_json_to_file(
report_filename, intent_report.report
)
logger.info(f"Classification report saved to {report_filename}.")
else:
if isinstance(intent_report.report, str):
log_evaluation_table(
intent_report.report,
intent_report.precision,
intent_report.f1,
intent_report.accuracy,
)
if successes and output_directory:
successes_filename = os.path.join(output_directory, "intent_successes.json")
# save classified samples to file for debugging
write_intent_successes(intent_results, successes_filename)
if errors and output_directory:
errors_filename = os.path.join(output_directory, "intent_errors.json")
# log and save misclassified samples to file for debugging
write_intent_errors(intent_results, errors_filename)
if not disable_plotting:
confusion_matrix_filename = "intent_confusion_matrix.png"
if output_directory:
confusion_matrix_filename = os.path.join(
output_directory, confusion_matrix_filename
)
plot_utils.plot_confusion_matrix(
intent_report.confusion_matrix,
classes=intent_report.labels,
title="Intent Confusion matrix",
output_file=confusion_matrix_filename,
)
histogram_filename = "intent_histogram.png"
if output_directory:
histogram_filename = os.path.join(output_directory, histogram_filename)
plot_attribute_confidences(
intent_results,
histogram_filename,
"intent_target",
"intent_prediction",
title="Intent Prediction Confidence Distribution",
)
predictions = [
{
"text": res.message,
"intent": res.intent_target,
"predicted": res.intent_prediction,
"confidence": res.confidence,
}
for res in intent_results
]
return {
"predictions": predictions,
"report": intent_report.report,
"precision": intent_report.precision,
"f1_score": intent_report.f1,
"accuracy": intent_report.accuracy,
}
|
def evaluate_intents(
intent_results: List[IntentEvaluationResult],
output_directory: Optional[Text],
successes: bool,
errors: bool,
disable_plotting: bool,
) -> Dict: # pragma: no cover
"""Creates summary statistics for intents.
Only considers those examples with a set intent. Others are filtered out.
Returns a dictionary of containing the evaluation result.
Args:
intent_results: Intent evaluation results.
output_directory: Directory to store files to.
successes: If `True`, correct predictions are written to disk.
errors: If `True`, incorrect predictions are written to disk.
disable_plotting: If `True`, no plots are created.
Returns: dictionary with evaluation results
"""
intent_report = create_intent_report(
intent_results=intent_results,
add_confused_labels_to_report=output_directory is not None,
metrics_as_dict=output_directory is not None,
)
if output_directory:
report_filename = os.path.join(output_directory, "intent_report.json")
rasa.shared.utils.io.dump_obj_as_json_to_file(
report_filename, intent_report.report
)
logger.info(f"Classification report saved to {report_filename}.")
else:
if isinstance(intent_report.report, str):
log_evaluation_table(
intent_report.report,
intent_report.precision,
intent_report.f1,
intent_report.accuracy,
)
if successes and output_directory:
successes_filename = os.path.join(output_directory, "intent_successes.json")
# save classified samples to file for debugging
write_intent_successes(intent_results, successes_filename)
if errors and output_directory:
errors_filename = os.path.join(output_directory, "intent_errors.json")
# log and save misclassified samples to file for debugging
write_intent_errors(intent_results, errors_filename)
if not disable_plotting:
confusion_matrix_filename = "intent_confusion_matrix.png"
if output_directory:
confusion_matrix_filename = os.path.join(
output_directory, confusion_matrix_filename
)
plot_utils.plot_confusion_matrix(
intent_report.confusion_matrix,
classes=intent_report.labels,
title="Intent Confusion matrix",
output_file=confusion_matrix_filename,
)
histogram_filename = "intent_histogram.png"
if output_directory:
histogram_filename = os.path.join(output_directory, histogram_filename)
plot_attribute_confidences(
intent_results,
histogram_filename,
"intent_target",
"intent_prediction",
title="Intent Prediction Confidence Distribution",
)
predictions = [
{
"text": res.message,
"intent": res.intent_target,
"predicted": res.intent_prediction,
"confidence": res.confidence,
}
for res in intent_results
]
return {
"predictions": predictions,
"report": intent_report.report,
"precision": intent_report.precision,
"f1_score": intent_report.f1,
"accuracy": intent_report.accuracy,
}
|
55,592 |
def compute_all_minutes(
opens_in_ns, break_starts_in_ns, break_ends_in_ns, closes_in_ns,
):
"""
Given arrays of opens and closes (in nanoseconds) and optionally
break_starts and break ends, return an array of each minute between the
opens and closes.
NOTE: An extra minute is added to ending pbountaries (break_end and close)
so we include the last bar.
"""
pieces = [] # todo preallocat?
for open_time, break_start_time, break_end_time, close_time in zip(
opens_in_ns, break_starts_in_ns, break_ends_in_ns, closes_in_ns
):
if break_start_time != NP_NAT and break_end_time != NP_NAT:
pieces.append(
np.arange(
open_time,
break_start_time + NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE,
)
)
pieces.append(
np.arange(
break_end_time,
close_time + NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE,
)
)
else:
pieces.append(
np.arange(
open_time,
close_time + NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE,
)
)
out = np.concatenate(pieces).view("datetime64[ns]")
return out
|
def compute_all_minutes(
opens_in_ns, break_starts_in_ns, break_ends_in_ns, closes_in_ns,
):
"""
Given arrays of opens and closes (in nanoseconds) and optionally
break_starts and break ends, return an array of each minute between the
opens and closes.
NOTE: Add an extra minute to ending boundaries (break_start and close)
so we include the last bar (arange doesn't include its stop).
"""
pieces = [] # todo preallocat?
for open_time, break_start_time, break_end_time, close_time in zip(
opens_in_ns, break_starts_in_ns, break_ends_in_ns, closes_in_ns
):
if break_start_time != NP_NAT and break_end_time != NP_NAT:
pieces.append(
np.arange(
open_time,
break_start_time + NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE,
)
)
pieces.append(
np.arange(
break_end_time,
close_time + NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE,
)
)
else:
pieces.append(
np.arange(
open_time,
close_time + NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE,
)
)
out = np.concatenate(pieces).view("datetime64[ns]")
return out
|
38,977 |
def field_schema(
field: ModelField,
*,
by_alias: bool = True,
model_name_map: Dict[TypeModelOrEnum, str],
ref_prefix: Optional[str] = None,
known_models: TypeModelSet = None,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.
Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field
is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they
will be included in the definitions and referenced in the schema instead of included recursively.
:param field: a Pydantic ``ModelField``
:param by_alias: use the defined alias (if any) in the returned schema
:param model_name_map: used to generate the JSON Schema references to other models included in the definitions
:param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of
#/definitions/ will be used
:param known_models: used to solve circular references
:return: tuple of the schema for this field and additional definitions
"""
ref_prefix = ref_prefix or default_prefix
schema_overrides = False
s = dict(title=field.field_info.title or field.alias.title().replace('_', ' '))
if field.field_info.title:
schema_overrides = True
if field.field_info.description:
s['description'] = field.field_info.description
schema_overrides = True
if not field.required and not field.field_info.const and field.default is not None:
s['default'] = encode_default(field.default)
schema_overrides = True
validation_schema = get_field_schema_validations(field)
if validation_schema:
s.update(validation_schema)
schema_overrides = True
f_schema, f_definitions, f_nested_models = field_type_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
known_models=known_models or set(),
)
# $ref will only be returned when there are no schema_overrides
if '$ref' in f_schema or (field.allow_none and [x for x in f_schema.get('anyOf', []) if '$ref' in x]):
return f_schema, f_definitions, f_nested_models
else:
s.update(f_schema)
return s, f_definitions, f_nested_models
|
def field_schema(
field: ModelField,
*,
by_alias: bool = True,
model_name_map: Dict[TypeModelOrEnum, str],
ref_prefix: Optional[str] = None,
known_models: TypeModelSet = None,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.
Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field
is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they
will be included in the definitions and referenced in the schema instead of included recursively.
:param field: a Pydantic ``ModelField``
:param by_alias: use the defined alias (if any) in the returned schema
:param model_name_map: used to generate the JSON Schema references to other models included in the definitions
:param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of
#/definitions/ will be used
:param known_models: used to solve circular references
:return: tuple of the schema for this field and additional definitions
"""
ref_prefix = ref_prefix or default_prefix
schema_overrides = False
s = dict(title=field.field_info.title or field.alias.title().replace('_', ' '))
if field.field_info.title:
schema_overrides = True
if field.field_info.description:
s['description'] = field.field_info.description
schema_overrides = True
if not field.required and not field.field_info.const and field.default is not None:
s['default'] = encode_default(field.default)
schema_overrides = True
validation_schema = get_field_schema_validations(field)
if validation_schema:
s.update(validation_schema)
schema_overrides = True
f_schema, f_definitions, f_nested_models = field_type_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
known_models=known_models or set(),
)
# $ref will only be returned when there are no schema_overrides
if '$ref' in f_schema or (field.allow_none and any('$ref' in x for x in f_schema.get('anyOf', []))):
return f_schema, f_definitions, f_nested_models
else:
s.update(f_schema)
return s, f_definitions, f_nested_models
|
58,372 |
def validate_derivative_path(path, **kwargs):
# Collect all paths that contain a dataset_description.json
dd = path / 'dataset_description.json'
with dd.open('r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_names = [pipeline["Name"]
for pipeline in description.get("GeneratedBy", [])
if "Name" in pipeline]
if pipeline_names:
pipeline_name = pipeline_names[0]
elif "PipelineDescription" in description:
warnings.warn("The PipelineDescription field was superseded "
"by GeneratedBy in BIDS 1.4.0. You can use "
"``pybids upgrade`` to update your derivative "
"dataset.")
pipeline_name = description["PipelineDescription"].get("Name")
else:
pipeline_name = None
if pipeline_name is None:
raise BIDSDerivativesValidationError(
"Every valid BIDS-derivatives dataset must "
"have a GeneratedBy.Name field set "
"inside 'dataset_description.json'. "
"\nExample: %s" %
MANDATORY_DERIVATIVES_FIELDS['GeneratedBy'])
return pipeline_name
|
def validate_derivative_path(path, **kwargs):
# Collect all paths that contain a dataset_description.json
dd = path / 'dataset_description.json'
with dd.open('r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_names = [pipeline["Name"]
for pipeline in description.get("GeneratedBy", [])
if "Name" in pipeline]
if pipeline_names:
pipeline_name = pipeline_names[0]
elif "PipelineDescription" in description:
warnings.warn("The PipelineDescription field was superseded "
"by GeneratedBy in BIDS 1.4.0. You can use "
"``pybids upgrade`` to update your derivative "
"dataset.")
pipeline_name = description["PipelineDescription"].get("Name")
else:
pipeline_name = None
if pipeline_name is None:
raise BIDSDerivativesValidationError(
"Every valid BIDS-derivatives dataset must "
"have a GeneratedBy.Name field set "
"inside 'dataset_description.json'. "
"\nExample: %s" %
MANDATORY_DERIVATIVES_FIELDS['GeneratedBy'])
return pipeline_name
|
32,058 |
def wait_and_complete_task_command(args: Dict[str, Any]) -> CommandResults:
"""
Args:
args: Script arguments
Returns:
CompletedTask - Tasks that was completed by script
FoundTasks - Tasks that was found by script, and already completed, not by this script
"""
task_states = argToList(args.get('task_states'))
if not all(state in POSSIBLE_STATES for state in task_states):
raise Exception(f'task_states are bad. Possible values: {POSSIBLE_STATES}')
complete_option = args.get('complete_option')
incident_id = args.get('incident_id')
if not incident_id:
incident = demisto.incidents()[0]
incident_id = incident.get('id')
task_name = args.get('task_name')
complete_task = argToBoolean(args.get('complete_task', 'true'))
max_timeout = arg_to_number(args.get('max_timeout', 60))
interval_between_tries = arg_to_number(args.get('interval_between_tries', 3))
completed_tasks = []
found_tasks = []
start_time = time.time()
while True:
tasks_by_states = get_incident_tasks_by_state(incident_id, task_states)
requested_task = None
# find task to complete if was given task name
if task_name:
for task in tasks_by_states:
if task['name'] == task_name:
requested_task = task
break
if requested_task and complete_task:
# complete the requested task
complete_task_by_id(
requested_task.get('id'),
requested_task.get('parentPlaybookID'),
incident_id,
complete_option
)
completed_tasks.append(requested_task.get('name'))
break
elif requested_task:
# just validate that task was found and not complete it
found_tasks.append(requested_task.get('name'))
break
elif not task_name and tasks_by_states and complete_task:
# complete all tasks, which state is task_states
for task in tasks_by_states:
complete_res = complete_task_by_id(
task.get('id'),
task.get('parentPlaybookID'),
incident_id,
complete_option
)
if 'Task is completed already' in complete_res:
found_tasks.append(task.get('name'))
else:
completed_tasks.append(task.get('name'))
break
elif not task_name and tasks_by_states:
# just validate that task was found and not complete it
found_tasks.extend(task.get('name') for task in tasks_by_states)
break
if time.time() - start_time > max_timeout: # type: ignore[operator]
break
sleep(float(interval_between_tries)) # type: ignore[arg-type]
if not completed_tasks and not found_tasks:
if task_name and task_states:
raise Exception(f'The task "{task_name}" did not reach the {" or ".join(task_states)} state.')
elif task_name:
raise Exception(f'The task "{task_name}" was not found by script.')
elif task_states:
raise Exception(f'None of the tasks reached the {" or ".join(task_states)} state.')
else:
raise Exception('No tasks were found.')
return CommandResults(
outputs_prefix='WaitAndCompleteTask',
outputs_key_field='',
outputs={'CompletedTask': completed_tasks,
'FoundTasks': found_tasks},
)
|
def wait_and_complete_task_command(args: Dict[str, Any]) -> CommandResults:
"""
Args:
args: Script arguments
Returns:
CompletedTask - Tasks that was completed by script
FoundTasks - Tasks that was found by script, and already completed, not by this script
"""
task_states = argToList(args.get('task_states'))
if not all(state in POSSIBLE_STATES for state in task_states):
raise Exception(f'task_states are bad. Possible values: {POSSIBLE_STATES}')
complete_option = args.get('complete_option')
incident_id = args.get('incident_id')
if not incident_id:
incident = demisto.incidents()[0]
incident_id = incident.get('id')
task_name = args.get('task_name')
complete_task = argToBoolean(args.get('complete_task', 'false'))
max_timeout = arg_to_number(args.get('max_timeout', 60))
interval_between_tries = arg_to_number(args.get('interval_between_tries', 3))
completed_tasks = []
found_tasks = []
start_time = time.time()
while True:
tasks_by_states = get_incident_tasks_by_state(incident_id, task_states)
requested_task = None
# find task to complete if was given task name
if task_name:
for task in tasks_by_states:
if task['name'] == task_name:
requested_task = task
break
if requested_task and complete_task:
# complete the requested task
complete_task_by_id(
requested_task.get('id'),
requested_task.get('parentPlaybookID'),
incident_id,
complete_option
)
completed_tasks.append(requested_task.get('name'))
break
elif requested_task:
# just validate that task was found and not complete it
found_tasks.append(requested_task.get('name'))
break
elif not task_name and tasks_by_states and complete_task:
# complete all tasks, which state is task_states
for task in tasks_by_states:
complete_res = complete_task_by_id(
task.get('id'),
task.get('parentPlaybookID'),
incident_id,
complete_option
)
if 'Task is completed already' in complete_res:
found_tasks.append(task.get('name'))
else:
completed_tasks.append(task.get('name'))
break
elif not task_name and tasks_by_states:
# just validate that task was found and not complete it
found_tasks.extend(task.get('name') for task in tasks_by_states)
break
if time.time() - start_time > max_timeout: # type: ignore[operator]
break
sleep(float(interval_between_tries)) # type: ignore[arg-type]
if not completed_tasks and not found_tasks:
if task_name and task_states:
raise Exception(f'The task "{task_name}" did not reach the {" or ".join(task_states)} state.')
elif task_name:
raise Exception(f'The task "{task_name}" was not found by script.')
elif task_states:
raise Exception(f'None of the tasks reached the {" or ".join(task_states)} state.')
else:
raise Exception('No tasks were found.')
return CommandResults(
outputs_prefix='WaitAndCompleteTask',
outputs_key_field='',
outputs={'CompletedTask': completed_tasks,
'FoundTasks': found_tasks},
)
|
5,812 |
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba', fs=None):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
When Wn is a length-2 sequence, Wn[0] must be less than Wn[1].
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Filter form of the output:
- second-order sections (recommended): 'sos'
- numerator/denominator (default) : 'ba'
- pole-zero : 'zpk'
In general the second-order sections ('sos') form is
recommended because inferring the coefficients for the
numerator/denominator form ('ba') suffers from numerical
instabilities. For reasons of backward compatibility the default
form is the numerator/denominator form ('ba'), where the 'b'
and the 'a' in 'ba' refer to the commonly used names of the
coefficients used.
Note: Using the second-order sections form ('sos') is sometimes
associated with additional computational costs: for
data-intense use cases it is therefore recommended to also
investigate the numerator/denominator form ('ba').
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to
200 Hz and plot the frequency response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60,
... btype='band', analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
Create a digital filter with the same properties, in a system with
sampling rate of 2000 Hz, and plot the frequency response. (Second-order
sections implementation is required to ensure stability of a filter of
this order):
>>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=False, ftype='cheby2', fs=2000,
... output='sos')
>>> w, h = signal.sosfreqz(sos, 2000, fs=2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
Wn = 2*Wn/fs
if numpy.any(Wn <= 0):
raise ValueError("filter critical frequencies must be greater than 0")
if Wn.size > 1 and not Wn[0] < Wn[1]:
raise ValueError("Wn[0] must be less than Wn[1]")
try:
btype = band_dict[btype]
except KeyError as e:
raise ValueError("'%s' is an invalid bandtype for filter." % btype) from e
try:
typefunc = filter_dict[ftype][0]
except KeyError as e:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype) from e
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
if fs is not None:
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < fs/2 (fs={} -> fs/2={})".format(fs, fs/2))
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn for lowpass or highpass filter')
if btype == 'lowpass':
z, p, k = lp2lp_zpk(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = lp2hp_zpk(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError as e:
raise ValueError('Wn must specify start and stop frequencies for bandpass or bandstop '
'filter') from e
if btype == 'bandpass':
z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = bilinear_zpk(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k, analog=analog)
|
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba', fs=None):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
When Wn is a length-2 sequence, ``Wn[0]`` must be less than ``Wn[1]``.
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Filter form of the output:
- second-order sections (recommended): 'sos'
- numerator/denominator (default) : 'ba'
- pole-zero : 'zpk'
In general the second-order sections ('sos') form is
recommended because inferring the coefficients for the
numerator/denominator form ('ba') suffers from numerical
instabilities. For reasons of backward compatibility the default
form is the numerator/denominator form ('ba'), where the 'b'
and the 'a' in 'ba' refer to the commonly used names of the
coefficients used.
Note: Using the second-order sections form ('sos') is sometimes
associated with additional computational costs: for
data-intense use cases it is therefore recommended to also
investigate the numerator/denominator form ('ba').
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to
200 Hz and plot the frequency response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60,
... btype='band', analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
Create a digital filter with the same properties, in a system with
sampling rate of 2000 Hz, and plot the frequency response. (Second-order
sections implementation is required to ensure stability of a filter of
this order):
>>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=False, ftype='cheby2', fs=2000,
... output='sos')
>>> w, h = signal.sosfreqz(sos, 2000, fs=2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
Wn = 2*Wn/fs
if numpy.any(Wn <= 0):
raise ValueError("filter critical frequencies must be greater than 0")
if Wn.size > 1 and not Wn[0] < Wn[1]:
raise ValueError("Wn[0] must be less than Wn[1]")
try:
btype = band_dict[btype]
except KeyError as e:
raise ValueError("'%s' is an invalid bandtype for filter." % btype) from e
try:
typefunc = filter_dict[ftype][0]
except KeyError as e:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype) from e
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
if fs is not None:
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < fs/2 (fs={} -> fs/2={})".format(fs, fs/2))
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn for lowpass or highpass filter')
if btype == 'lowpass':
z, p, k = lp2lp_zpk(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = lp2hp_zpk(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError as e:
raise ValueError('Wn must specify start and stop frequencies for bandpass or bandstop '
'filter') from e
if btype == 'bandpass':
z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = bilinear_zpk(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k, analog=analog)
|
1,654 |
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# variance computed by default shape (n_components, )
assert (spca_lars.explained_variance_.shape == (3,))
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
# variance computed by default shape (n_components, )
assert (spca_lasso.explained_variance_.shape == (3, ))
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# variance computed by default shape (n_components, )
assert spca_lars.explained_variance_.shape == (3,)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
# variance computed by default shape (n_components, )
assert (spca_lasso.explained_variance_.shape == (3, ))
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
54,504 |
def _convert_positional_args(n_positional_args: int = 0) -> Any:
def decorator(func: Any) -> Any:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
assert len(args) <= n_positional_args, "Too many positional arguments."
if len(args) >= 1:
warnings.warn(
f"{func.__name__}: Positional arguments are deprecated."
" Please give all values as keyword arguments."
)
for val, arg_name in zip(args, list(signature(func).parameters)):
assert arg_name not in kwargs
kwargs[arg_name] = val
return func(**kwargs)
return wrapper
return decorator
|
def _convert_positional_args(n_positional_args: int = 0) -> Callable:
def decorator(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
assert len(args) <= n_positional_args, "Too many positional arguments."
if len(args) >= 1:
warnings.warn(
f"{func.__name__}: Positional arguments are deprecated."
" Please give all values as keyword arguments."
)
for val, arg_name in zip(args, list(signature(func).parameters)):
assert arg_name not in kwargs
kwargs[arg_name] = val
return func(**kwargs)
return wrapper
return decorator
|
35,285 |
def symmetric_power_iteration(tensor, n_repeat=10, n_iteration=10, verbose=False):
"""A single Robust Symmetric Tensor Power Iteration
Parameters
----------
tensor : tl.tensor
input tensor to decompose, must be symmetric of shape (size, )*order
n_repeat : int, default is 10
number of initializations to be tried
n_iterations : int, default is 10
number of power iterations
verbose : bool
level of verbosity
Returns
-------
(eigenval, best_factor, deflated)
eigenval : float
the obtained eigenvalue
best_factor : tl.tensor
the best estimated eigenvector
deflated : tl.tensor of same shape as `tensor`
the deflated tensor (i.e. without the estimated component)
"""
order = tl.ndim(tensor)
size = tl.shape(tensor)[0]
if not tl.shape(tensor) == (size, )*order:
raise ValueError('The input tensor does not have the same size along each mode.')
# A list of candidates for each mode
best_score = -np.inf
scores = []
modes = list(range(1, order))
for _ in range(n_repeat):
factor = tl.tensor(np.random.random_sample(size), **tl.context(tensor))
for _ in range(n_iteration):
for _ in range(order):
factor = tl.tenalg.multi_mode_dot(tensor, [factor]*(order-1), modes=modes)
factor = factor / tl.norm(factor, 2)
score = tl.tenalg.multi_mode_dot(tensor, [factor]*order)
scores.append(score) #round(score, 2))
if score > best_score:
best_score = score
best_factor = factor
if verbose:
print(f'Best score of {n_repeat}: {best_score}')
# Refine the init
for _ in range(n_iteration):
for _ in range(order):
best_factor = tl.tenalg.multi_mode_dot(tensor, [best_factor]*(order-1), modes=modes)
best_factor = best_factor / tl.norm(best_factor, 2)
eigenval = tl.tenalg.multi_mode_dot(tensor, [best_factor]*order)
deflated = tensor - outer([best_factor]*order)*eigenval
if verbose:
explained = tl.norm(deflated)/tl.norm(tensor)
print(f'Eigenvalue: {eigenval}, explained: {explained}')
return eigenval, best_factor, deflated
|
def symmetric_power_iteration(tensor, n_repeat=10, n_iteration=10, verbose=False):
"""A single Robust Symmetric Tensor Power Iteration
Parameters
----------
tensor : tl.tensor
input tensor to decompose, must be symmetric of shape (size, )*order
n_repeat : int, default is 10
number of initializations to be tried
n_iterations : int, default is 10
number of power iterations
verbose : bool
level of verbosity
Returns
-------
(eigenval, best_factor, deflated)
eigenval : float
the obtained eigenvalue
best_factor : tl.tensor
the best estimated eigenvector
deflated : tl.tensor of same shape as `tensor`
the deflated tensor (i.e. without the estimated component)
"""
order = tl.ndim(tensor)
size = tl.shape(tensor)[0]
if not tl.shape(tensor) == (size, )*order:
raise ValueError('The input tensor does not have the same size along each mode.')
# A list of candidates for each mode
best_score = -np.inf
scores = []
modes = list(range(1, order))
for _ in range(n_repeat):
factor = tl.tensor(np.random.random_sample(size), **tl.context(tensor))
for _ in range(n_iteration):
for _ in range(order):
factor = tl.tenalg.multi_mode_dot(tensor, [factor]*(order-1), modes=modes)
factor = factor / tl.norm(factor, 2)
score = tl.tenalg.multi_mode_dot(tensor, [factor]*order)
scores.append(score) #round(score, 2))
if (i == 0) or (score > best_score):
best_score = score
best_factor = factor
if verbose:
print(f'Best score of {n_repeat}: {best_score}')
# Refine the init
for _ in range(n_iteration):
for _ in range(order):
best_factor = tl.tenalg.multi_mode_dot(tensor, [best_factor]*(order-1), modes=modes)
best_factor = best_factor / tl.norm(best_factor, 2)
eigenval = tl.tenalg.multi_mode_dot(tensor, [best_factor]*order)
deflated = tensor - outer([best_factor]*order)*eigenval
if verbose:
explained = tl.norm(deflated)/tl.norm(tensor)
print(f'Eigenvalue: {eigenval}, explained: {explained}')
return eigenval, best_factor, deflated
|
54,735 |
def is_connected(url: str) -> bool:
"""
This is a helper function to check if the client
is connected to the internet.
Example:
print(is_connected("www.google.com"))
console >> True
:param url: We take a test url to check if we are
able to create a valid connection.
:return: We return a boolean that signifies our
connection to the internet
"""
try:
sock = socket.create_connection((url, 80))
if sock is not None:
sock.close()
return True
except OSError:
pass
return False
|
def is_connected(url: str) -> bool:
"""
This is a helper function to check if the client
is connected to the internet.
Example:
print(is_connected("www.google.com"))
console >> True
:param url: We take a test url to check if we are
able to create a valid connection.
:return: We return a boolean that signifies our
connection to the internet
"""
try:
sock = socket.create_connection((url, 80))
if sock is not None:
sock.close()
return True
except OSError as e:
import warnings
warnings.warn("There was an issue connecting to the internet. Please see original error below.")
raise e
return False
|
23,113 |
def test_extra_file(tmpdir, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": range(100), "b": ["dog", "cat"] * 50})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmpdir, engine=engine)
open(os.path.join(tmpdir, "_SUCCESS"), "w").close()
open(os.path.join(tmpdir, "part.0.parquet.crc"), "w").close()
os.remove(os.path.join(tmpdir, "_metadata"))
out = dd.read_parquet(tmpdir)
assert_eq(out, df)
|
def test_extra_file(tmpdir, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": range(100), "b": ["dog", "cat"] * 50})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmpdir, engine=engine)
open(os.path.join(tmpdir, "_SUCCESS"), "w").close()
open(os.path.join(tmpdir, "part.0.parquet.crc"), "w").close()
os.remove(os.path.join(tmpdir, "_metadata"))
out = dd.read_parquet(tmpdir, engine=engine)
assert_eq(out, df)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.