id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
31,500 |
def get_ip_objects_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Gets a list of IP objects by the requested list type (denylist or allowlist).
If the object_id argument is given, only those IP objects will be displayed. Otherwise, all IP objects that match
the given list_type will be displayed.
API docs: https://portal.f5silverline.com/docs/api/v1/ip_objects.md (GET section)
"""
list_type = args['list_type']
object_ids = argToList(args.get('object_id'))
page_number = args.get('page_number')
page_size = args.get('page_size')
url_suffix = f'{list_type}/ip_objects'
params = {}
is_paging = False
if page_number and page_size:
params = paging_args_to_params(page_size, page_number)
is_paging = True
if not object_ids:
# in case the user wants to get all the IP objects and not specific ones
response = client.request_ip_objects(body={}, method='GET', url_suffix=url_suffix, params=params)
outputs = response.get('data')
human_results = parse_get_ip_object_list_results(response)
else:
human_results, outputs = get_ip_objects_by_ids(client, object_ids, list_type, params) # type: ignore
human_readable = tableToMarkdown('F5 Silverline IP Objects', human_results, TABLE_HEADERS_GET_OBJECTS,
removeNull=True)
if not human_results and is_paging:
human_readable = "No results were found. Please try to run the command without page_number and page_size to " \
"get all the IP objects that exist."
return CommandResults(
readable_output=human_readable,
outputs_prefix='F5Silverline.IPObjectList',
outputs_key_field='id',
outputs=outputs
)
|
def get_ip_objects_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Gets a list of IP objects by the requested list type (denylist or allowlist).
If the object_id argument is given, only those IP objects will be displayed. Otherwise, all IP objects that match
the given list_type will be displayed.
API docs: https://portal.f5silverline.com/docs/api/v1/ip_objects.md (GET section)
"""
list_type = args['list_type']
object_ids = argToList(args.get('object_id'))
page_number = args.get('page_number')
page_size = args.get('page_size')
url_suffix = f'{list_type}/ip_objects'
params = {}
is_paging = False
if page_number and page_size:
params = paging_args_to_params(page_size, page_number)
is_paging = True
if not object_ids:
# in case the user wants to get all the IP objects and not specific ones
response = client.request_ip_objects(body={}, method='GET', url_suffix=url_suffix, params=params)
outputs = response.get('data')
human_results = parse_get_ip_object_list_results(response)
else:
human_results, outputs = get_ip_objects_by_ids(client, object_ids, list_type, params) # type: ignore
human_readable = tableToMarkdown('F5 Silverline IP Objects', human_results, TABLE_HEADERS_GET_OBJECTS,
removeNull=True)
if not human_results and is_paging:
human_readable = "No results were found. Please try to run the command without page_number and page_size to " \
"get all existing IP objects."
return CommandResults(
readable_output=human_readable,
outputs_prefix='F5Silverline.IPObjectList',
outputs_key_field='id',
outputs=outputs
)
|
3,851 |
def arbitrary_element(iterable):
"""Returns an arbitrary element of `iterable` without removing it.
This is most useful for "peeking" at an arbitrary element of a set,
but can be used for any list, dictionary, etc., as well.
Parameters
----------
iterable: `abc.collections.Iterable` instance
Any object that implements ``__iter__``, e.g. set, dict, list, tuple,
etc.
Returns
-------
The object that results from ``next(iter(iterable))``
Raises
------
ValueError
If `iterable` is an iterator (because the current implementation of
this function would consume an element from the iterator).
Examples
--------
Arbitrary elements from common Iterable objects:
>>> arbitrary_element([1, 2, 3]) # list
1
>>> arbitrary_element((1, 2, 3)) # tuple
1
>>> arbitrary_element({1, 2, 3}) # set
1
>>> d = {k: v for k, v in zip([1, 2, 3], [3, 2, 1])}
>>> arbitrary_element(d) # dict_keys
1
>>> arbitrary_element(d.values()) # dict values
3
`str` is also an Iterable:
>>> arbitrary_element("hello")
'h'
:exc:`ValueError` is raised if `iterable` is an iterator:
>>> iterator = iter([1, 2, 3]) # Iterator, *not* Iterable
>>> arbitrary_element(iterator)
Traceback (most recent call last):
...
ValueError: cannot return an arbitrary item from an iterator
Notes
-----
This function does not return a *random* element. If `iterable` is
ordered, sequential calls will return the same value::
>>> l = [1, 2, 3]
>>> arbitrary_element(l)
1
>>> arbitrary_element(l)
1
"""
if isinstance(iterable, Iterator):
raise ValueError("cannot return an arbitrary item from an iterator")
# Another possible implementation is ``for x in iterable: return x``.
return next(iter(iterable))
|
def arbitrary_element(iterable):
"""Returns an arbitrary element of `iterable` without removing it.
This is most useful for "peeking" at an arbitrary element of a set,
but can be used for any list, dictionary, etc., as well.
Parameters
----------
iterable : `abc.collections.Iterable` instance
Any object that implements ``__iter__``, e.g. set, dict, list, tuple,
etc.
Returns
-------
The object that results from ``next(iter(iterable))``
Raises
------
ValueError
If `iterable` is an iterator (because the current implementation of
this function would consume an element from the iterator).
Examples
--------
Arbitrary elements from common Iterable objects:
>>> arbitrary_element([1, 2, 3]) # list
1
>>> arbitrary_element((1, 2, 3)) # tuple
1
>>> arbitrary_element({1, 2, 3}) # set
1
>>> d = {k: v for k, v in zip([1, 2, 3], [3, 2, 1])}
>>> arbitrary_element(d) # dict_keys
1
>>> arbitrary_element(d.values()) # dict values
3
`str` is also an Iterable:
>>> arbitrary_element("hello")
'h'
:exc:`ValueError` is raised if `iterable` is an iterator:
>>> iterator = iter([1, 2, 3]) # Iterator, *not* Iterable
>>> arbitrary_element(iterator)
Traceback (most recent call last):
...
ValueError: cannot return an arbitrary item from an iterator
Notes
-----
This function does not return a *random* element. If `iterable` is
ordered, sequential calls will return the same value::
>>> l = [1, 2, 3]
>>> arbitrary_element(l)
1
>>> arbitrary_element(l)
1
"""
if isinstance(iterable, Iterator):
raise ValueError("cannot return an arbitrary item from an iterator")
# Another possible implementation is ``for x in iterable: return x``.
return next(iter(iterable))
|
5,821 |
def setup_build(args, env):
"""
Setting up meson-build
"""
cmd = ["meson", "setup", args.build_dir, "--prefix", PATH_INSTALLED]
build_dir = Path(args.build_dir)
run_dir = os.getcwd()
if build_dir.exists() and not (build_dir / 'meson-info').exists():
if list(build_dir.iterdir()):
raise RuntimeError(
"You're using Meson to build in the `build` directory, "
"but it looks like that directory is not empty and "
"was not originally created by Meson. "
f"Please remove '{build_dir.absolute()}' and try again."
)
if os.path.exists(build_dir):
build_options_file = (build_dir / "meson-info"
/ "intro-buildoptions.json")
if build_options_file.exists():
with open(build_options_file) as f:
build_options = json.load(f)
installdir = None
for option in build_options:
if option["name"] == "prefix":
installdir = option["value"]
break
if installdir != PATH_INSTALLED:
run_dir = os.path.join(run_dir, build_dir)
cmd = ["meson", "--reconfigure", "--prefix", PATH_INSTALLED]
else:
return
else:
run_dir = os.path.join(run_dir, build_dir)
cmd = ["meson", "--reconfigure", "--prefix", PATH_INSTALLED]
if args.werror:
cmd += ["--werror"]
if args.gcov:
cmd += ['-Db_coverage=true']
# Setting up meson build
ret = subprocess.call(cmd, env=env, cwd=run_dir)
if ret == 0:
print("Meson build setup OK")
else:
print("Meson build setup failed! ({0} elapsed)")
sys.exit(1)
return
|
def setup_build(args, env):
"""
Setting up meson-build
"""
cmd = ["meson", "setup", args.build_dir, "--prefix", PATH_INSTALLED]
build_dir = Path(args.build_dir)
run_dir = os.getcwd()
if build_dir.exists() and not (build_dir / 'meson-info').exists():
if list(build_dir.iterdir()):
raise RuntimeError(
f"You're using Meson to build in the `{build_dir.absolute()}` directory, "
"but it looks like that directory is not empty and "
"was not originally created by Meson. "
f"Please remove '{build_dir.absolute()}' and try again."
)
if os.path.exists(build_dir):
build_options_file = (build_dir / "meson-info"
/ "intro-buildoptions.json")
if build_options_file.exists():
with open(build_options_file) as f:
build_options = json.load(f)
installdir = None
for option in build_options:
if option["name"] == "prefix":
installdir = option["value"]
break
if installdir != PATH_INSTALLED:
run_dir = os.path.join(run_dir, build_dir)
cmd = ["meson", "--reconfigure", "--prefix", PATH_INSTALLED]
else:
return
else:
run_dir = os.path.join(run_dir, build_dir)
cmd = ["meson", "--reconfigure", "--prefix", PATH_INSTALLED]
if args.werror:
cmd += ["--werror"]
if args.gcov:
cmd += ['-Db_coverage=true']
# Setting up meson build
ret = subprocess.call(cmd, env=env, cwd=run_dir)
if ret == 0:
print("Meson build setup OK")
else:
print("Meson build setup failed! ({0} elapsed)")
sys.exit(1)
return
|
58,158 |
def create_indicator_relationships(indicator, indicator_type, indicator_value):
relationships_list: List[EntityRelationship] = []
entities_b = indicator.get('feedrelatedindicators', [])
for entity_b in entities_b:
entity_b_value = entity_b.get('summary') or entity_b.get('name')
entity_b_type = entity_b.get('type')
relationships_list.extend(create_relationships(indicator_value, indicator_type, entity_b_value, entity_b_type))
return relationships_list
|
def create_indicator_relationships(indicator, indicator_type, indicator_value):
relationships_list: List[EntityRelationship] = []
b_entities = indicator.get('feedrelatedindicators', [])
for entity_b in b_entities:
entity_b_value = entity_b.get('summary') or entity_b.get('name')
entity_b_type = entity_b.get('type')
relationships_list.extend(create_relationships(indicator_value, indicator_type, entity_b_value, entity_b_type))
return relationships_list
|
25,071 |
def downsample_2d(src, w, h, method=DS_MEAN, fill_value=None, mode_rank=1, out=None):
"""
Downsample a 2-D grid to a lower resolution by aggregating original grid cells.
Parameters
----------
src : numpy.ndarray or dask.array.Array
The source array to resample
w : int
New grid width
h : int
New grid height
ds_method : str (optional)
Grid cell aggregation method for a possible downsampling
(one of the *DS_* constants).
fill_value : scalar (optional)
If ``None``, it is taken from **src** if it is a masked array,
otherwise from *out* if it is a masked array,
otherwise numpy's default value is used.
mode_rank : scalar (optional)
The rank of the frequency determined by the *ds_method*
``DS_MODE``. One (the default) means most frequent value, zwo
means second most frequent value, and so forth.
out : numpy.ndarray (optional)
Alternate output array in which to place the result. The
default is *None*; if provided, it must have the same shape as
the expected output.
Returns
-------
downsampled : numpy.ndarray or dask.array.Array
An downsampled version of the *src* array.
"""
if method == DS_MODE and mode_rank < 1:
raise ValueError('mode_rank must be >= 1')
out = _get_out(out, src, (h, w))
if out is None:
return src
mask, use_mask = _get_mask(src)
fill_value = _get_fill_value(fill_value, src, out)
if method not in DOWNSAMPLING_METHODS:
raise ValueError('invalid downsampling method')
downsampling_method = DOWNSAMPLING_METHODS[method]
downsampled = downsampling_method(
src, mask, use_mask, method, fill_value, mode_rank, (0, 0),
(0, 0), out)
return _mask_or_not(downsampled, src, fill_value)
|
def downsample_2d(src, w, h, method=DS_MEAN, fill_value=None, mode_rank=1, out=None):
"""
Downsample a 2-D grid to a lower resolution by aggregating original grid cells.
Parameters
----------
src : numpy.ndarray or dask.array.Array
The source array to resample
w : int
New grid width
h : int
New grid height
ds_method : str (optional)
Grid cell aggregation method for a possible downsampling
(one of the *DS_* constants).
fill_value : scalar (optional)
If ``None``, it is taken from **src** if it is a masked array,
otherwise from *out* if it is a masked array,
otherwise numpy's default value is used.
mode_rank : scalar (optional)
The rank of the frequency determined by the *ds_method*
``DS_MODE``. One (the default) means most frequent value, two
means second most frequent value, and so forth.
out : numpy.ndarray (optional)
Alternate output array in which to place the result. The
default is *None*; if provided, it must have the same shape as
the expected output.
Returns
-------
downsampled : numpy.ndarray or dask.array.Array
An downsampled version of the *src* array.
"""
if method == DS_MODE and mode_rank < 1:
raise ValueError('mode_rank must be >= 1')
out = _get_out(out, src, (h, w))
if out is None:
return src
mask, use_mask = _get_mask(src)
fill_value = _get_fill_value(fill_value, src, out)
if method not in DOWNSAMPLING_METHODS:
raise ValueError('invalid downsampling method')
downsampling_method = DOWNSAMPLING_METHODS[method]
downsampled = downsampling_method(
src, mask, use_mask, method, fill_value, mode_rank, (0, 0),
(0, 0), out)
return _mask_or_not(downsampled, src, fill_value)
|
31,194 |
def close_incidents_command(client, args):
incident_ids = args.get('incident_ids')
result = client.close_incidents(incident_ids)
if not result.get('success'):
raise DemistoException(result['message'])
msg = result.get('message')
markdown = "### " + msg
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.close',
outputs_key_field='',
outputs=msg
)
|
def close_incidents_command(client, args):
incident_ids = argToList(args.get('incident_ids'))
result = client.close_incidents(incident_ids)
if not result.get('success'):
raise DemistoException(result['message'])
msg = result.get('message')
markdown = "### " + msg
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.close',
outputs_key_field='',
outputs=msg
)
|
32,191 |
def get_global_counters(topology: Topology, device_filter_string: str = None) -> ShowCounterGlobalCommmandResult:
"""
Gets global counter information from all the PAN-OS firewalls in the topology
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only show specific hostnames or serial numbers.
"""
result: ShowCounterGlobalCommmandResult = FirewallCommand.get_counter_global(topology, device_filter_string)
return result
|
def get_global_counters(topology: Topology, device_filter_string: Optional[str] = None) -> ShowCounterGlobalCommmandResult:
"""
Gets global counter information from all the PAN-OS firewalls in the topology
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only show specific hostnames or serial numbers.
"""
result: ShowCounterGlobalCommmandResult = FirewallCommand.get_counter_global(topology, device_filter_string)
return result
|
51,088 |
def average(*args: Any) -> float:
"""
Filter and function to calculate the mean of a set.
The parameters may be passed as an iterable or as separate arguments.
"""
def avg(*items: Any) -> float:
return sum(float(item) for item in items) / len(items)
if len(args) == 0:
raise TypeError("average expected at least 1 argument, got 0")
if len(args) == 1:
if isinstance(args[0], (list, tuple)):
return avg(*args[0])
raise TypeError(f"'{type(args[0]).__name__}' object is not iterable")
return avg(*args)
|
def average(*args: Any) -> float:
"""
Filter and function to calculate the mean of a set.
The parameters may be passed as an iterable or as separate arguments.
"""
def avg(*items: Any) -> float:
return sum(float(item) for item in items) / len(items)
if len(args) == 0:
raise TypeError("average expected at least 1 argument, got 0")
if not isinstance(args[0], (list, tuple)):
raise TypeError(f"'{type(args[0]).__name__}' object is not iterable")
return avg(*args)
|
29,755 |
def get_user_recordings(user_id: int, stats_range: str) -> Optional[UserRecordingStat]:
"""Get top recordings in a tine range for user with given ID.
Args:
user_id: the row ID of the user in the DB
stats_range: the time range to fetch the stats for
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT user_id, recording->:range AS {range}, last_updated
FROM statistics.user
WHERE user_id = :user_id
""".format(range=stats_range)), {
'range': stats_range,
'user_id': user_id
})
row = result.fetchone()
return UserRecordingStat(**dict(row)) if row else None
|
def get_user_recordings(user_id: int, stats_range: str) -> Optional[UserRecordingStat]:
"""Get top recordings in a time range for user with given ID.
Args:
user_id: the row ID of the user in the DB
stats_range: the time range to fetch the stats for
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT user_id, recording->:range AS {range}, last_updated
FROM statistics.user
WHERE user_id = :user_id
""".format(range=stats_range)), {
'range': stats_range,
'user_id': user_id
})
row = result.fetchone()
return UserRecordingStat(**dict(row)) if row else None
|
49 |
def get_contact_id_by_username(username):
"""TODO: Use CiviCRM Explorer to replace with call to get contact_id by username"""
data = {
'entity': 'Contact',
'action': 'get',
'api_key': lending.config_ia_civicrm_api.get('api_key', ''),
'key': lending.config_ia_civicrm_api.get('site_key', ''),
'json': {
"sequential": 1,
CIVI_USERNAME: username
}
}
data['json'] = json.dumps(data['json']) # flatten the json field as a string
r = requests.get(
lending.config_ia_civicrm_api.get('url', ''),
params=data,
headers={
'Authorization': 'Basic %s' % lending.config_ia_civicrm_api.get('auth', '')
})
contacts = r.status_code == 200 and r.json().get('values', None)
return contacts and contacts[0].get('contact_id')
|
def get_contact_id_by_username(username):
"""TODO: Use CiviCRM Explorer to replace with call to get contact_id by username"""
data = {
'entity': 'Contact',
'action': 'get',
'api_key': lending.config_ia_civicrm_api.get('api_key', ''),
'key': lending.config_ia_civicrm_api.get('site_key', ''),
'json': {
"sequential": 1,
CIVI_USERNAME: username
}
}
data['json'] = json.dumps(data['json']) # flatten the json field as a string
r = requests.get(
lending.config_ia_civicrm_api.get('url', ''),
params=data,
headers={
'Authorization': 'Basic %s' % lending.config_ia_civicrm_api.get('auth', '')
})
contacts = r.status_code == 200 and r.json().get('values', None)
return contacts and contacts[0].get('contact_id')
|
50,091 |
def brterm(H, a_op, spectra, sec_cutoff=0.1,
fock_basis=False, sparse_eigensolver=False, br_dtype='sparse'):
"""
Calculates the contribution of one coupling operator to the Bloch-Redfield
tensor.
Parameters
----------
H : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`
System Hamiltonian.
a_op : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`
The operator coupling to the environment. Must be hermitian.
spectra : :class:`Coefficient`, func, str
The corresponding bath spectra.
Can be a `Coefficient` using an 'w' args, a function of the
frequency or a string. The `SpectraCoefficient` can be used for
array based coefficient.
The spectra function can depend on ``t`` if the corresponding
``a_op`` is a :class:`QobjEvo`.
Example:
coefficient('w>0', args={"w": 0})
SpectraCoefficient(coefficient(array, tlist=...))
sec_cutoff : float {0.1}
Cutoff for secular approximation. Use ``-1`` if secular approximation
is not used when evaluating bath-coupling terms.
fock_basis : bool {False}
Whether to return the tensor in the input basis or the diagonalized
basis.
sparse_eigensolver : bool {False}
Whether to use the sparse eigensolver on the Hamiltonian.
br_dtype : ['sparse', 'dense', 'data']
Which data type to use when computing the brtensor.
With a cutoff 'sparse' is usually the most efficient.
Returns
-------
R, [evecs]: :class:`~Qobj`, :class:`~QobjEvo` or tuple
If ``fock_basis``, return the Bloch Redfield tensor in the outside
basis. Otherwise return the Bloch Redfield tensor in the diagonalized
Hamiltonian basis and the eigenvectors of the Hamiltonian as hstacked
column. The tensors and, if given, evecs, will be :obj:`~QobjEvo` if
the ``H`` and ``a_op`` is time dependent, :obj:`Qobj` otherwise.
"""
if isinstance(H, _EigenBasisTransform):
Hdiag = H
else:
Hdiag = _EigenBasisTransform(QobjEvo(H), sparse=sparse_eigensolver)
# convert spectra to Coefficient
if isinstance(spectra, str):
spectra = coefficient(spectra, args={'w': 0})
elif isinstance(spectra, InterCoefficient):
spectra = SpectraCoefficient(spectra)
elif isinstance(spectra, Coefficient):
pass
elif callable(spectra):
sig = inspect.signature(spectra)
if tuple(sig.parameters.keys()) == ("w",):
spectra = SpectraCoefficient(coefficient(spectra))
else:
spectra = coefficient(spectra, args={'w': 0})
else:
raise TypeError("a_ops's spectra not known")
sec_cutoff = sec_cutoff if sec_cutoff >= 0 else np.inf
R = QobjEvo(_BlochRedfieldElement(Hdiag, QobjEvo(a_op), spectra,
sec_cutoff, not fock_basis, dtype=br_dtype))
if (
((isinstance(H, _EigenBasisTransform) and H.isconstant)
or isinstance(H, Qobj))
and isinstance(a_op, Qobj)
):
R = R(0)
return R if fock_basis else (R, Hdiag.as_Qobj())
|
def brterm(H, a_op, spectra, sec_cutoff=0.1,
fock_basis=False, sparse_eigensolver=False, br_dtype='sparse'):
"""
Calculates the contribution of one coupling operator to the Bloch-Redfield
tensor.
Parameters
----------
H : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`
System Hamiltonian.
a_op : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`
The operator coupling to the environment. Must be hermitian.
spectra : :class:`Coefficient`, func, str
The corresponding bath spectra.
Can be a :class:`~Coefficient` using an 'w' args, a function of the
frequency or a string. The :class:`SpectraCoefficient` can be used for
array based coefficient.
The spectra can depend on ``t`` if the corresponding
``a_op`` is a :class:`QobjEvo`.
Example:
coefficient('w>0', args={"w": 0})
SpectraCoefficient(coefficient(array, tlist=...))
sec_cutoff : float {0.1}
Cutoff for secular approximation. Use ``-1`` if secular approximation
is not used when evaluating bath-coupling terms.
fock_basis : bool {False}
Whether to return the tensor in the input basis or the diagonalized
basis.
sparse_eigensolver : bool {False}
Whether to use the sparse eigensolver on the Hamiltonian.
br_dtype : ['sparse', 'dense', 'data']
Which data type to use when computing the brtensor.
With a cutoff 'sparse' is usually the most efficient.
Returns
-------
R, [evecs]: :class:`~Qobj`, :class:`~QobjEvo` or tuple
If ``fock_basis``, return the Bloch Redfield tensor in the outside
basis. Otherwise return the Bloch Redfield tensor in the diagonalized
Hamiltonian basis and the eigenvectors of the Hamiltonian as hstacked
column. The tensors and, if given, evecs, will be :obj:`~QobjEvo` if
the ``H`` and ``a_op`` is time dependent, :obj:`Qobj` otherwise.
"""
if isinstance(H, _EigenBasisTransform):
Hdiag = H
else:
Hdiag = _EigenBasisTransform(QobjEvo(H), sparse=sparse_eigensolver)
# convert spectra to Coefficient
if isinstance(spectra, str):
spectra = coefficient(spectra, args={'w': 0})
elif isinstance(spectra, InterCoefficient):
spectra = SpectraCoefficient(spectra)
elif isinstance(spectra, Coefficient):
pass
elif callable(spectra):
sig = inspect.signature(spectra)
if tuple(sig.parameters.keys()) == ("w",):
spectra = SpectraCoefficient(coefficient(spectra))
else:
spectra = coefficient(spectra, args={'w': 0})
else:
raise TypeError("a_ops's spectra not known")
sec_cutoff = sec_cutoff if sec_cutoff >= 0 else np.inf
R = QobjEvo(_BlochRedfieldElement(Hdiag, QobjEvo(a_op), spectra,
sec_cutoff, not fock_basis, dtype=br_dtype))
if (
((isinstance(H, _EigenBasisTransform) and H.isconstant)
or isinstance(H, Qobj))
and isinstance(a_op, Qobj)
):
R = R(0)
return R if fock_basis else (R, Hdiag.as_Qobj())
|
48,071 |
def from_frictionless_schema(schema):
"""Create a :class:`~pandera.schemas.DataFrameSchema` from a frictionless
json/yaml schema file on disk, or a frictionless schema already loaded
into memory.
Each field from the frictionless schema will be converted to a pandera
column specification using :class:`~pandera.io.FrictionlessFieldParser`
to map field characteristics to pandera column specifications.
:param frictionless_schema: the frictionless schema object (or a
string/Path to the location on disk of a schema specification) to
parse.
:returns: dataframe schema with frictionless field specs converted to
pandera column checks and constraints for use as normal.
:example:
>>> from pandera.io import from_frictionless_schema
>>>
>>> FRICTIONLESS_SCHEMA = {
... "fields": [
... {
... "name": "column_1",
... "type": "integer",
... "constraints": {"minimum": 10, "maximum": 99}
... }
... ],
... "primaryKey": "column_1"
... }
>>> schema = from_frictionless_schema(FRICTIONLESS_SCHEMA)
>>> schema.columns["column_1"].checks
[<Check in_range: in_range(10, 99)>]
>>> schema.columns["column_1"].required
True
>>> schema.columns["column_1"].allow_duplicates
False
"""
if not isinstance(schema, FrictionlessSchema):
schema = FrictionlessSchema(schema)
assembled_schema = {
"columns": {
field.name: FrictionlessFieldParser(
field, schema.primary_key
).to_pandera_column()
for field in schema.fields
},
"index": None,
"checks": None,
"coerce": True,
"strict": True,
}
return _deserialize_schema(assembled_schema)
|
def from_frictionless_schema(schema: FrictionlessSchema):
"""Create a :class:`~pandera.schemas.DataFrameSchema` from a frictionless
json/yaml schema file on disk, or a frictionless schema already loaded
into memory.
Each field from the frictionless schema will be converted to a pandera
column specification using :class:`~pandera.io.FrictionlessFieldParser`
to map field characteristics to pandera column specifications.
:param frictionless_schema: the frictionless schema object (or a
string/Path to the location on disk of a schema specification) to
parse.
:returns: dataframe schema with frictionless field specs converted to
pandera column checks and constraints for use as normal.
:example:
>>> from pandera.io import from_frictionless_schema
>>>
>>> FRICTIONLESS_SCHEMA = {
... "fields": [
... {
... "name": "column_1",
... "type": "integer",
... "constraints": {"minimum": 10, "maximum": 99}
... }
... ],
... "primaryKey": "column_1"
... }
>>> schema = from_frictionless_schema(FRICTIONLESS_SCHEMA)
>>> schema.columns["column_1"].checks
[<Check in_range: in_range(10, 99)>]
>>> schema.columns["column_1"].required
True
>>> schema.columns["column_1"].allow_duplicates
False
"""
if not isinstance(schema, FrictionlessSchema):
schema = FrictionlessSchema(schema)
assembled_schema = {
"columns": {
field.name: FrictionlessFieldParser(
field, schema.primary_key
).to_pandera_column()
for field in schema.fields
},
"index": None,
"checks": None,
"coerce": True,
"strict": True,
}
return _deserialize_schema(assembled_schema)
|
26,473 |
def match(command):
return ("Migrations are pending. To resolve this issue, run:" in command.output)
|
def match(command):
return "Migrations are pending. To resolve this issue, run:" in command.output
|
51,417 |
def map_blocks(func, darray):
"""
A version of dask's map_blocks for DataArrays.
Parameters
----------
func: callable
User-provided function that should accept DataArrays corresponding to one chunk.
darray: DataArray
Chunks of this array will be provided to 'func'. The function must not change
shape of the provided DataArray.
Returns
-------
DataArray
See Also
--------
dask.array.map_blocks
"""
def _wrapper(darray):
result = func(darray)
if not isinstance(result, type(darray)):
raise ValueError("Result is not the same type as input.")
if result.shape != darray.shape:
raise ValueError("Result does not have the same shape as input.")
return result
meta_array = DataArray(darray.data._meta, dims=darray.dims)
result_meta = func(meta_array)
name = "%s-%s" % (darray.name or func.__name__, dask.base.tokenize(darray))
slicers = get_chunk_slices(darray._to_temp_dataset())
dask_keys = list(dask.core.flatten(darray.__dask_keys__()))
graph = {
(name,)
+ (*key[1:],): (
_wrapper,
(
DataArray,
key,
{
dim_name: darray[dim_name][slicers[dim_name][index]]
for dim_name, index in zip(darray.dims, key[1:])
},
darray.dims,
),
)
for key in dask_keys
}
graph = HighLevelGraph.from_collections(name, graph, dependencies=[darray])
return DataArray(
dask.array.Array(graph, name, chunks=darray.chunks, meta=result_meta),
dims=darray.dims,
coords=darray.coords,
)
|
def map_blocks(func, darray):
"""
Apply a function across each chunk of the DataArray in parallel
This is similar to Dask Array's map_blocks function, but your function
receives a DataArray rather than a Numpy Array.
Parameters
----------
func: callable
User-provided function that should accept DataArrays corresponding to one chunk.
darray: DataArray
Chunks of this array will be provided to 'func'. The function must not change
shape of the provided DataArray.
Returns
-------
DataArray
See Also
--------
dask.array.map_blocks
"""
def _wrapper(darray):
result = func(darray)
if not isinstance(result, type(darray)):
raise ValueError("Result is not the same type as input.")
if result.shape != darray.shape:
raise ValueError("Result does not have the same shape as input.")
return result
meta_array = DataArray(darray.data._meta, dims=darray.dims)
result_meta = func(meta_array)
name = "%s-%s" % (darray.name or func.__name__, dask.base.tokenize(darray))
slicers = get_chunk_slices(darray._to_temp_dataset())
dask_keys = list(dask.core.flatten(darray.__dask_keys__()))
graph = {
(name,)
+ (*key[1:],): (
_wrapper,
(
DataArray,
key,
{
dim_name: darray[dim_name][slicers[dim_name][index]]
for dim_name, index in zip(darray.dims, key[1:])
},
darray.dims,
),
)
for key in dask_keys
}
graph = HighLevelGraph.from_collections(name, graph, dependencies=[darray])
return DataArray(
dask.array.Array(graph, name, chunks=darray.chunks, meta=result_meta),
dims=darray.dims,
coords=darray.coords,
)
|
39,419 |
def download_mount_damavand(load=True): # pragma: no cover
"""Download the Mount Damavand dataset.
Visualize 3D models of Damavand Volcano, Alborz, Iran. This is a 2D map
with the altitude embedded as ``'z'`` cell data within the
:class:`pyvista.Polydata`.
Originally posted at `banesullivan/damavand-volcano
<https://github.com/banesullivan/damavand-volcano>`_
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.PolyData or str
DataSet or filename depending on ``load``.
Examples
--------
Download the Damavand dataset and plot it after warping it by its altitude.
>>> from pyvista import examples
>>> dataset = download_mount_damavand()
>>> dataset = dataset.cell_data_to_point_data()
>>> dataset = dataset.warp_by_scalar('z', factor=2)
>>> dataset.plot(cmap='gist_earth', show_scalar_bar=False)
"""
return _download_and_read('AOI.Damavand.32639.vtp', load=load)
|
def download_mount_damavand(load=True): # pragma: no cover
"""Download the Mount Damavand dataset.
Visualize 3D models of Damavand Volcano, Alborz, Iran. This is a 2D map
with the altitude embedded as ``'z'`` cell data within the
:class:`pyvista.Polydata`.
Originally posted at `banesullivan/damavand-volcano
<https://github.com/banesullivan/damavand-volcano>`_
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.PolyData or str
DataSet or filename depending on ``load``.
Examples
--------
Download the Damavand dataset and plot it after warping it by its altitude.
>>> from pyvista import examples
>>> dataset = examples.download_mount_damavand()
>>> dataset = dataset.cell_data_to_point_data()
>>> dataset = dataset.warp_by_scalar('z', factor=2)
>>> dataset.plot(cmap='gist_earth', show_scalar_bar=False)
"""
return _download_and_read('AOI.Damavand.32639.vtp', load=load)
|
56,935 |
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
state: str = MISSING
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
.. versionchanged:: 2.0
``permissions``, ``guild``, ``redirect_uri``, ``scopes`` and ``state`` parameters
are now keyword-only.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot', 'applications.commands')``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
state: :class:`str`
The state to return after the authorization.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += '&scope=' + '+'.join(scopes or ('bot', 'applications.commands'))
if permissions is not MISSING:
url += f'&permissions={permissions.value}'
if guild is not MISSING:
url += f'&guild_id={guild.id}'
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri})
if disable_guild_select:
url += '&disable_guild_select=true'
if state is not MISSING:
url += f'&state={state}'
return url
|
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
state: str = MISSING
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
.. versionchanged:: 2.0
``permissions``, ``guild``, ``redirect_uri``, ``scopes`` and ``state`` parameters
are now keyword-only.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot', 'applications.commands')``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
state: :class:`str`
The state to return after the authorization.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += '&scope=' + '+'.join(scopes or ('bot', 'applications.commands'))
if permissions is not MISSING:
url += f'&permissions={permissions.value}'
if guild is not MISSING:
url += f'&guild_id={guild.id}'
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri})
if disable_guild_select:
url += '&disable_guild_select=true'
if state is not MISSING:
encoded = urlencode({'state': state})
url += f'&{encoded}'
return url
|
8,653 |
def get_configuration(options):
"""Get an instance of configuration from options.
This may raise a ``sopel.config.ConfigurationError`` of the file is an
invalid configuration file.
"""
config_name = options.config or 'default'
config_path = find_config(DEFAULT_HOMEDIR, config_name)
if not os.path.isfile(config_path):
print(
"Welcome to Sopel!\n"
"I can't seem to find the configuration file, "
"so let's generate it!\n")
if not config_path.endswith('.cfg'):
config_path = config_path + '.cfg'
config_path = _create_config(config_path)
bot_config = Config(config_path)
bot_config._is_daemonized = options.daemonize
return bot_config
|
def get_configuration(options):
"""Get an instance of configuration from options.
This may raise a ``sopel.config.ConfigurationError`` if the file is an
invalid configuration file.
"""
config_name = options.config or 'default'
config_path = find_config(DEFAULT_HOMEDIR, config_name)
if not os.path.isfile(config_path):
print(
"Welcome to Sopel!\n"
"I can't seem to find the configuration file, "
"so let's generate it!\n")
if not config_path.endswith('.cfg'):
config_path = config_path + '.cfg'
config_path = _create_config(config_path)
bot_config = Config(config_path)
bot_config._is_daemonized = options.daemonize
return bot_config
|
4,190 |
def make_resolution_matrix(fwd, invop, method, lambda2):
"""Compute resolution matrix for linear inverse operator.
Parameters
----------
fwd: forward solution
Used to get leadfield matrix.
invop: inverse operator
Inverse operator to get inverse matrix.
pick_ori='normal' will be selected.
method: string
Inverse method to use (MNE, dSPM, sLORETA).
lambda2: float
The regularisation parameter.
Returns
-------
resmat: 2D numpy array.
Resolution matrix (inverse matrix times leadfield).
"""
# make sure forward and inverse operator match
fwd = _convert_forward_match_inv(fwd, invop)
# don't include bad channels
# only use good channels from inverse operator
bads_inv = invop['info']['bads']
# good channels
ch_names = [c for c in invop['info']['ch_names'] if (c not in bads_inv)]
# get leadfield matrix from forward solution
leadfield = _pick_leadfield(fwd['sol']['data'], fwd, ch_names)
invmat = _get_matrix_from_inverse_operator(invop, fwd,
method=method, lambda2=lambda2)
resmat = invmat.dot(leadfield)
dims = resmat.shape
print('Dimensions of resolution matrix: %d by %d.' % (dims[0], dims[1]))
return resmat
|
def make_resolution_matrix(fwd, invop, method, lambda2):
"""Compute resolution matrix for linear inverse operator.
Parameters
----------
fwd: forward solution
Used to get leadfield matrix.
invop: inverse operator
Inverse operator to get inverse matrix.
pick_ori='normal' will be selected.
method: str
Inverse method to use (MNE, dSPM, sLORETA).
lambda2: float
The regularisation parameter.
Returns
-------
resmat: 2D numpy array.
Resolution matrix (inverse matrix times leadfield).
"""
# make sure forward and inverse operator match
fwd = _convert_forward_match_inv(fwd, invop)
# don't include bad channels
# only use good channels from inverse operator
bads_inv = invop['info']['bads']
# good channels
ch_names = [c for c in invop['info']['ch_names'] if (c not in bads_inv)]
# get leadfield matrix from forward solution
leadfield = _pick_leadfield(fwd['sol']['data'], fwd, ch_names)
invmat = _get_matrix_from_inverse_operator(invop, fwd,
method=method, lambda2=lambda2)
resmat = invmat.dot(leadfield)
dims = resmat.shape
print('Dimensions of resolution matrix: %d by %d.' % (dims[0], dims[1]))
return resmat
|
10,988 |
def check_site_id_type(**kwargs):
errors = []
if hasattr(settings, 'SITE_ID'):
if not isinstance(settings.SITE_ID, int):
errors.append(
Error(
"``SITE_ID`` must be of type int, got %s" % type(settings.SITE_ID),
obj="sites.E003"
)
)
return errors
|
def check_site_id_type(**kwargs):
errors = []
if hasattr(settings, 'SITE_ID'):
if not isinstance(settings.SITE_ID, int):
errors.append(
Error(
'SITE_ID must be an integer',
obj="sites.E003"
)
)
return errors
|
38,876 |
def evaluate_from_args(args: argparse.Namespace) -> Dict[str, Any]:
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
# Disable some of the more verbose logging statements
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
# Load from archive
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
config = deepcopy(archive.config)
prepare_environment(config)
model = archive.model
model.eval()
# Load the evaluation data
dataset_reader = archive.validation_dataset_reader
# split files
evaluation_data_path_list = args.input_file.split(":")
if args.output_file is not None:
output_file_list = args.output_file.split(":")
assert len(output_file_list) == len(
evaluation_data_path_list
), "number of output path must be equal number of dataset "
if args.predictions_output_file is not None:
predictions_output_file_list = args.predictions_output_file.split(":")
assert len(predictions_output_file_list) == len(
evaluation_data_path_list
), "number of predictions_output_file path must be equal number of dataset "
# output file
output_file_path = None
predictions_output_file_path = None
for index in range(len(evaluation_data_path_list)):
config = deepcopy(archive.config)
evaluation_data_path = evaluation_data_path_list[index]
if args.output_file is not None:
output_file_path = output_file_list[index]
if args.predictions_output_file is not None:
predictions_output_file_path = predictions_output_file_list[index]
logger.info("Reading evaluation data from %s", evaluation_data_path)
data_loader_params = config.get("validation_data_loader", None)
if data_loader_params is None:
data_loader_params = config.get("data_loader")
if args.batch_size:
data_loader_params["batch_size"] = args.batch_size
data_loader = DataLoader.from_params(
params=data_loader_params, reader=dataset_reader, data_path=evaluation_data_path
)
embedding_sources = (
json.loads(args.embedding_sources_mapping) if args.embedding_sources_mapping else {}
)
if args.extend_vocab:
logger.info("Vocabulary is being extended with test instances.")
model.vocab.extend_from_instances(instances=data_loader.iter_instances())
model.extend_embedder_vocab(embedding_sources)
data_loader.index_with(model.vocab)
metrics = evaluate(
model,
data_loader,
args.cuda_device,
args.batch_weight_key,
output_file=output_file_path,
predictions_output_file=predictions_output_file_path,
)
logger.info("Finished evaluating.")
return metrics
|
def evaluate_from_args(args: argparse.Namespace) -> Dict[str, Any]:
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
# Disable some of the more verbose logging statements
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
# Load from archive
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
config = deepcopy(archive.config)
prepare_environment(config)
model = archive.model
model.eval()
# Load the evaluation data
dataset_reader = archive.validation_dataset_reader
# split files
evaluation_data_path_list = args.input_file.split(":")
if args.output_file is not None:
output_file_list = args.output_file.split(":")
assert len(output_file_list) == len(
evaluation_data_path_list
), "The number of `output_file` paths must be equal to the number of datasets being evaluated. "
if args.predictions_output_file is not None:
predictions_output_file_list = args.predictions_output_file.split(":")
assert len(predictions_output_file_list) == len(
evaluation_data_path_list
), "number of predictions_output_file path must be equal number of dataset "
# output file
output_file_path = None
predictions_output_file_path = None
for index in range(len(evaluation_data_path_list)):
config = deepcopy(archive.config)
evaluation_data_path = evaluation_data_path_list[index]
if args.output_file is not None:
output_file_path = output_file_list[index]
if args.predictions_output_file is not None:
predictions_output_file_path = predictions_output_file_list[index]
logger.info("Reading evaluation data from %s", evaluation_data_path)
data_loader_params = config.get("validation_data_loader", None)
if data_loader_params is None:
data_loader_params = config.get("data_loader")
if args.batch_size:
data_loader_params["batch_size"] = args.batch_size
data_loader = DataLoader.from_params(
params=data_loader_params, reader=dataset_reader, data_path=evaluation_data_path
)
embedding_sources = (
json.loads(args.embedding_sources_mapping) if args.embedding_sources_mapping else {}
)
if args.extend_vocab:
logger.info("Vocabulary is being extended with test instances.")
model.vocab.extend_from_instances(instances=data_loader.iter_instances())
model.extend_embedder_vocab(embedding_sources)
data_loader.index_with(model.vocab)
metrics = evaluate(
model,
data_loader,
args.cuda_device,
args.batch_weight_key,
output_file=output_file_path,
predictions_output_file=predictions_output_file_path,
)
logger.info("Finished evaluating.")
return metrics
|
6,966 |
def get_versions(doc):
number_of_versions = frappe.db.get_single_value("System Settings", "number_of_versions")
if number_of_versions == 0:
number_of_versions = 10
return frappe.get_all('Version', filters=dict(ref_doctype=doc.doctype, docname=doc.name),
fields=['name', 'owner', 'creation', 'data'], limit=number_of_versions, order_by='creation desc')
|
def get_versions(doc):
number_of_versions = frappe.db.get_single_value("System Settings", "number_of_versions") or 10
return frappe.get_all('Version', filters=dict(ref_doctype=doc.doctype, docname=doc.name),
fields=['name', 'owner', 'creation', 'data'], limit=number_of_versions, order_by='creation desc')
|
13,474 |
def scrub_status(pool):
"""
Returns the raw statistics per-device (-R option) of the ongoing or last
known btrfs scrub. Works by parsing the output of the following command:
btrfs scrub status -R <mount-point>
:param pool: pool object
:return: dictionary indexed via 'status' and if a finished or halted, or
cancelld scrub is indicated then the duration of that scrub is added as
value to added index 'duration'. In all 'status' cases bar 'unknown',
data_bytes_scrubbed is passed as value to index 'kb_scrubbed' and all
other -R invoked details are returned as key value pairs.
"""
stats = {'status': 'unknown', }
mnt_pt = mount_root(pool)
out3, err3, rc3 = run_command([BTRFS, 'version'])
btrfsProgsVers = out3[0].strip().split()[1]
# Based on version of btrfs progs, set the offset to parse properly
if parse_version(btrfsProgsVers) < parse_version("v5.1.2"):
statOffset = 1
durOffset = 1
fieldOffset = 2
else:
statOffset = 2
durOffset = 3
fieldOffset = 4
out, err, rc = run_command([BTRFS, 'scrub', 'status', '-R', mnt_pt])
if err != [''] and len(err) > 0:
if err[0] == "WARNING: failed to read status: Connection reset by " \
"peer":
stats['status'] = 'conn-reset'
return stats
if len(out) > 1:
if re.search('interrupted', out[statOffset]) is not None:
stats['status'] = 'halted'
# extract the duration from towards the end of the first line eg:
# "... 2017, interrupted after 00:00:09, not running"
dfields = out[durOffset].split()[-1].strip(',').split(':')
stats['duration'] = ((int(dfields[0]) * 60 * 60) +
(int(dfields[1]) * 60) + int(dfields[2]))
elif re.search('running', out[statOffset]) is not None:
stats['status'] = 'running'
elif re.search('finished', out[statOffset]) is not None:
stats['status'] = 'finished'
# extract the duration from the end of the first line eg:
# "... 2017 and finished after 00:00:16"
dfields = out[durOffset].split()[-1].split(':')
stats['duration'] = ((int(dfields[0]) * 60 * 60) +
(int(dfields[1]) * 60) + int(dfields[2]))
elif re.search('aborted', out[statOffset]) is not None:
stats['status'] = 'cancelled'
# extract the duration from the end of the first line eg:
# "... 2017 and was aborted after 00:04:56"
# TODO: we have code duplication here re finished clause above.
dfields = out[durOffset].split()[-1].split(':')
stats['duration'] = ((int(dfields[0]) * 60 * 60) +
(int(dfields[1]) * 60) + int(dfields[2]))
else:
return stats
else: # we have an unknown status as out is 0 or 1 lines long.
return stats
for l in out[fieldOffset:-1]:
fields = l.strip().split(': ')
if fields[0] == 'data_bytes_scrubbed':
stats['kb_scrubbed'] = int(fields[1]) / 1024
else:
stats[fields[0]] = int(fields[1])
# If we are on the newer version of btrfs-progs, pull additional stats
if parse_version(btrfsProgsVers) >= parse_version("v5.1.2"):
out2, err2, rc2 = run_command([BTRFS, 'scrub', 'status', mnt_pt])
if re.search('running', out2[2]) is not None:
# time_left
fields2 = out2[4].split()[-1].split(':')
stats['time_left'] = ((int(fields2[0]) * 60 * 60) + (int(fields2[1]) * 60) + int(fields2[2]))
# eta
fields3 = out2[5].strip().split(': ')
dateFormat = "%a %b %d %H:%M:%S %Y"
stats['eta'] = datetime.strptime(fields3[1].strip(), dateFormat)
# rate
fields4 = out2[8].strip().split(': ')
stats['rate'] = fields4[1].strip()
else:
fields5 = out2[5].strip().split(': ')
stats['rate'] = fields5[1].strip()
return stats
|
def scrub_status(pool):
"""
Returns the raw statistics per-device (-R option) of the ongoing or last
known btrfs scrub. Works by parsing the output of the following command:
btrfs scrub status -R <mount-point>
:param pool: pool object
:return: dictionary indexed via 'status' and if a finished or halted, or
cancelld scrub is indicated then the duration of that scrub is added as
value to added index 'duration'. In all 'status' cases bar 'unknown',
data_bytes_scrubbed is passed as value to index 'kb_scrubbed' and all
other -R invoked details are returned as key value pairs.
"""
stats = {'status': 'unknown', }
mnt_pt = mount_root(pool)
out3, err3, rc3 = run_command([BTRFS, 'version'])
btrfsProgsVers = out3[0].strip().split()[1]
# Based on version of btrfs progs, set the offset to parse properly
if parse_version(btrfsProgsVers) < parse_version("v5.1.2"):
statOffset = 1
durOffset = 1
fieldOffset = 2
else:
statOffset = 2
durOffset = 3
fieldOffset = 4
out, err, rc = run_command([BTRFS, 'scrub', 'status', '-R', mnt_pt])
if err != [''] and len(err) > 0:
if err[0] == "WARNING: failed to read status: Connection reset by " \
"peer":
stats['status'] = 'conn-reset'
return stats
if len(out) > 1:
if re.search('interrupted', out[statOffset]) is not None:
stats['status'] = 'halted'
# extract the duration from towards the end of the first line eg:
# "... 2017, interrupted after 00:00:09, not running"
dfields = out[durOffset].split()[-haltedDurOffset].strip(',').split(':')
stats['duration'] = ((int(dfields[0]) * 60 * 60) +
(int(dfields[1]) * 60) + int(dfields[2]))
elif re.search('running', out[statOffset]) is not None:
stats['status'] = 'running'
elif re.search('finished', out[statOffset]) is not None:
stats['status'] = 'finished'
# extract the duration from the end of the first line eg:
# "... 2017 and finished after 00:00:16"
dfields = out[durOffset].split()[-1].split(':')
stats['duration'] = ((int(dfields[0]) * 60 * 60) +
(int(dfields[1]) * 60) + int(dfields[2]))
elif re.search('aborted', out[statOffset]) is not None:
stats['status'] = 'cancelled'
# extract the duration from the end of the first line eg:
# "... 2017 and was aborted after 00:04:56"
# TODO: we have code duplication here re finished clause above.
dfields = out[durOffset].split()[-1].split(':')
stats['duration'] = ((int(dfields[0]) * 60 * 60) +
(int(dfields[1]) * 60) + int(dfields[2]))
else:
return stats
else: # we have an unknown status as out is 0 or 1 lines long.
return stats
for l in out[fieldOffset:-1]:
fields = l.strip().split(': ')
if fields[0] == 'data_bytes_scrubbed':
stats['kb_scrubbed'] = int(fields[1]) / 1024
else:
stats[fields[0]] = int(fields[1])
# If we are on the newer version of btrfs-progs, pull additional stats
if parse_version(btrfsProgsVers) >= parse_version("v5.1.2"):
out2, err2, rc2 = run_command([BTRFS, 'scrub', 'status', mnt_pt])
if re.search('running', out2[2]) is not None:
# time_left
fields2 = out2[4].split()[-1].split(':')
stats['time_left'] = ((int(fields2[0]) * 60 * 60) + (int(fields2[1]) * 60) + int(fields2[2]))
# eta
fields3 = out2[5].strip().split(': ')
dateFormat = "%a %b %d %H:%M:%S %Y"
stats['eta'] = datetime.strptime(fields3[1].strip(), dateFormat)
# rate
fields4 = out2[8].strip().split(': ')
stats['rate'] = fields4[1].strip()
else:
fields5 = out2[5].strip().split(': ')
stats['rate'] = fields5[1].strip()
return stats
|
28,605 |
def plot_ts(
idata,
y,
x=None,
y_hat=None,
y_holdout=None,
y_forecasts=None,
x_holdout=None,
plot_dim=None,
holdout_dim=None,
num_samples=100,
backend=None,
backend_kwargs=None,
y_kwargs=None,
y_hat_plot_kwargs=None,
y_mean_plot_kwargs=None,
vline_kwargs=None,
textsize=None,
figsize=None,
legend=True,
axes=None,
show=None,
):
"""Plot timeseries data.
Parameters
----------
idata : InferenceData
InferenceData object.
y : str
Variable name from ``observed_data``.
Values to be plotted on y-axis before holdout.
x : str, Optional
Values to be plotted on x-axis before holdout.
If None, coordinates of ``y`` dims is chosen.
y_hat : str, optional
Variable name from ``posterior_predictive``.
Assumed to be of shape ``(chain, draw, *y_dims)``.
y_holdout : str, optional
Variable name from ``observed_data``.
It represents the observed data after the holdout period.
Useful while testing the model, when you want to compare
observed test data with predictions/forecasts.
y_forecasts : str, optional
Variable name from ``posterior_predictive``.
It represents forecasts (posterior predictive) values after holdout period.
Useful to compare observed vs predictions/forecasts.
Assumed shape ``(chain, draw, *shape)``.
x_holdout : str, Defaults to coordinates of ``y``.
Variable name from ``constant_data``.
If None, coordinates of ``y_holdout`` or
coordinates of ``y_forecast`` (either of the two available) is chosen.
plot_dim: str, Optional
Should be present in ``y.dims``.
Necessary for selection of ``x`` if ``x`` is None and ``y`` is multidimensional.
holdout_dim: str, Optional
Should be present in ``y_holdout.dims`` or ``y_forecats.dims``.
Necessary to choose ``x_holdout`` if ``x`` is None and
if ``y_holdout`` or ``y_forecasts`` is multidimensional.
num_samples : int, default 100
Number of posterior predictive samples drawn from ``y_hat`` and ``y_forecasts``.
backend : {"matplotlib", "bokeh"}, default "matplotlib"
Select plotting backend.
y_kwargs : dict, optional
Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib.
y_hat_plot_kwargs : dict, optional
Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib.
y_mean_plot_kwargs : dict, optional
Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib.
vline_kwargs : dict, optional
Passed to :meth:`mpl:matplotlib.axes.Axes.axvline` in matplotlib.
backend_kwargs : dict, optional
These are kwargs specific to the backend being used. Passed to
:func: `mpl:matplotlib.pyplot.subplots`.
figsize : tuple, optional
Figure size. If None, it will be defined automatically.
textsize : float, optional
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
Returns
-------
axes: matplotlib axes or bokeh figures.
See Also
--------
plot_lm : Posterior predictive and mean plots for regression-like data.
plot_ppc : Plot for posterior/prior predictive checks.
Examples
--------
Plot timeseries default plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> nchains, ndraws = (4, 500)
>>> obs_data = {
... "y": 2 * np.arange(1, 9) + 3,
... "z": 2 * np.arange(8, 12) + 3,
... }
>>> posterior_predictive = {
... "y": np.random.normal(
... (obs_data["y"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data["y"]))
... ),
... "z": np.random.normal(
... (obs_data["z"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data["z"]))
... ),
... }
>>> idata = az.from_dict(
... observed_data=obs_data,
... posterior_predictive=posterior_predictive,
... coords={"obs_dim": np.arange(1, 9), "pred_dim": np.arange(8, 12)},
... dims={"y": ["obs_dim"], "z": ["pred_dim"]},
... )
>>> ax = az.plot_ts(idata=idata, y="y", y_holdout="z")
Plot timeseries multidim plot
.. plot::
:context: close-figs
>>> ndim1, ndim2 = (5, 7)
>>> data = {
... "y": np.random.normal(size=(ndim1, ndim2)),
... "z": np.random.normal(size=(ndim1, ndim2)),
... }
>>> posterior_predictive = {
... "y": np.random.randn(nchains, ndraws, ndim1, ndim2),
... "z": np.random.randn(nchains, ndraws, ndim1, ndim2),
... }
>>> const_data = {"x": np.arange(1, 6), "x_pred": np.arange(5, 10)}
>>> idata = az.from_dict(
... observed_data=data,
... posterior_predictive=posterior_predictive,
... constant_data=const_data,
... dims={
... "y": ["dim1", "dim2"],
... "z": ["holdout_dim1", "holdout_dim2"],
... },
... coords={
... "dim1": range(ndim1),
... "dim2": range(ndim2),
... "holdout_dim1": range(ndim1 - 1, ndim1 + 4),
... "holdout_dim2": range(ndim2 - 1, ndim2 + 6),
... },
... )
>>> az.plot_ts(
... idata=idata,
... y="y",
... plot_dim="dim1",
... y_holdout="z",
... holdout_dim="holdout_dim1",
... )
"""
# Assign default values if none is provided
y_hat = y if y_hat is None and isinstance(y, str) else y_hat
y_forecasts = y_holdout if y_forecasts is None and isinstance(y_holdout, str) else y_forecasts
# holdout_dim = plot_dim if holdout_dim is None and plot_dim is not None else holdout_dim
if isinstance(y, str):
y = idata.observed_data[y]
if isinstance(y_holdout, str):
y_holdout = idata.observed_data[y_holdout]
if len(y.dims) > 1 and plot_dim is None:
raise ValueError("Argument plot_dim is needed in case of multidimensional data")
if y_holdout is not None and len(y_holdout.dims) > 1 and holdout_dim is None:
raise ValueError("Argument holdout_dim is needed in case of multidimensional data")
# Assigning values to x
x_var_names = None
if isinstance(x, str):
x = idata.constant_data[x]
elif isinstance(x, tuple):
x_var_names = x
x = idata.constant_data
elif x is None:
if plot_dim is None:
x = y.coords[y.dims[0]]
else:
x = y.coords[plot_dim]
# If posterior_predictive is present in idata and y_hat is there, get its values
if isinstance(y_hat, str):
if "posterior_predictive" not in idata.groups():
warnings.warn("posterior_predictive not found in idata", UserWarning)
y_hat = None
elif hasattr(idata.posterior_predictive, y_hat):
y_hat = idata.posterior_predictive[y_hat]
else:
warnings.warn("y_hat not found in posterior_predictive", UserWarning)
y_hat = None
# If posterior_predictive is present in idata and y_forecasts is there, get its values
x_holdout_var_names = None
if isinstance(y_forecasts, str):
if "posterior_predictive" not in idata.groups():
warnings.warn("posterior_predictive not found in idata", UserWarning)
y_forecasts = None
elif hasattr(idata.posterior_predictive, y_forecasts):
y_forecasts = idata.posterior_predictive[y_forecasts]
else:
warnings.warn("y_hat not found in posterior_predictive", UserWarning)
y_forecasts = None
# Assign values to y_holdout
if isinstance(y_holdout, str):
y_holdout = idata.observed_data[y_holdout]
# Assign values to x_holdout.
if y_holdout is not None or y_forecasts is not None:
if x_holdout is None:
if holdout_dim is None:
if y_holdout is None:
x_holdout = y_forecasts.coords[y_forecasts.dims[-1]]
else:
x_holdout = y_holdout.coords[y_holdout.dims[-1]]
else:
if y_holdout is None:
x_holdout = y_forecasts.coords[holdout_dim]
else:
x_holdout = y_holdout.coords[holdout_dim]
elif isinstance(x_holdout, str):
x_holdout = idata.constant_data[x_holdout]
elif isinstance(x_holdout, tuple):
x_holdout_var_names = x_holdout
x_holdout = idata.constant_data
# Choose dims to generate y plotters
if plot_dim is None:
skip_dims = list(y.dims)
elif isinstance(plot_dim, str):
skip_dims = [plot_dim]
elif isinstance(plot_dim, tuple):
skip_dims = list(plot_dim)
# Choose dims to generate y_holdout plotters
if holdout_dim is None:
if y_holdout is not None:
skip_holdout_dims = list(y_holdout.dims)
elif y_forecasts is not None:
skip_holdout_dims = list(y_forecasts.dims)
elif isinstance(holdout_dim, str):
skip_holdout_dims = [holdout_dim]
elif isinstance(holdout_dim, tuple):
skip_holdout_dims = list(holdout_dim)
# Compulsory plotters
y_plotters = list(
xarray_var_iter(
y,
skip_dims=set(skip_dims),
combined=True,
)
)
# Compulsory plotters
x_plotters = list(
xarray_var_iter(
x,
var_names=x_var_names,
skip_dims=set(x.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
len_y = len(y_plotters)
len_x = len(x_plotters)
length_plotters = len_x * len_y
y_plotters = np.tile(y_plotters, (len_x, 1))
x_plotters = np.tile(x_plotters, (len_y, 1))
# Generate plotters for all the available data
y_mean_plotters = None
y_hat_plotters = None
if y_hat is not None:
total_samples = y_hat.sizes["chain"] * y_hat.sizes["draw"]
pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False)
y_hat_satcked = y_hat.stack(__sample__=("chain", "draw"))[..., pp_sample_ix]
y_hat_plotters = list(
xarray_var_iter(
y_hat_satcked,
skip_dims=set(skip_dims + ["__sample__"]),
combined=True,
)
)
y_mean = y_hat.mean(("chain", "draw"))
y_mean_plotters = list(
xarray_var_iter(
y_mean,
skip_dims=set(skip_dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_hat_plotters = np.tile(y_hat_plotters, (len_x, 1))
y_mean_plotters = np.tile(y_mean_plotters, (len_x, 1))
y_holdout_plotters = None
x_holdout_plotters = None
if y_holdout is not None:
y_holdout_plotters = list(
xarray_var_iter(
y_holdout,
skip_dims=set(skip_holdout_dims),
combined=True,
)
)
x_holdout_plotters = list(
xarray_var_iter(
x_holdout,
var_names=x_holdout_var_names,
skip_dims=set(x_holdout.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_holdout_plotters = np.tile(y_holdout_plotters, (len_x, 1))
x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1))
y_forecasts_plotters = None
y_forecasts_mean_plotters = None
if y_forecasts is not None:
total_samples = y_forecasts.sizes["chain"] * y_forecasts.sizes["draw"]
pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False)
y_forecasts_satcked = y_forecasts.stack(__sample__=("chain", "draw"))[..., pp_sample_ix]
y_forecasts_plotters = list(
xarray_var_iter(
y_forecasts_satcked,
skip_dims=set(skip_holdout_dims + ["__sample__"]),
combined=True,
)
)
y_forecasts_mean = y_forecasts.mean(("chain", "draw"))
y_forecasts_mean_plotters = list(
xarray_var_iter(
y_forecasts_mean,
skip_dims=set(skip_holdout_dims),
combined=True,
)
)
x_holdout_plotters = list(
xarray_var_iter(
x_holdout,
var_names=x_holdout_var_names,
skip_dims=set(x_holdout.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_forecasts_mean_plotters = np.tile(y_forecasts_mean_plotters, (len_x, 1))
y_forecasts_plotters = np.tile(y_forecasts_plotters, (len_x, 1))
x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1))
rows, cols = default_grid(length_plotters)
tsplot_kwargs = dict(
x_plotters=x_plotters,
y_plotters=y_plotters,
y_mean_plotters=y_mean_plotters,
y_hat_plotters=y_hat_plotters,
y_holdout_plotters=y_holdout_plotters,
x_holdout_plotters=x_holdout_plotters,
y_forecasts_plotters=y_forecasts_plotters,
y_forecasts_mean_plotters=y_forecasts_mean_plotters,
num_samples=num_samples,
length_plotters=length_plotters,
rows=rows,
cols=cols,
backend_kwargs=backend_kwargs,
y_kwargs=y_kwargs,
y_hat_plot_kwargs=y_hat_plot_kwargs,
y_mean_plot_kwargs=y_mean_plot_kwargs,
vline_kwargs=vline_kwargs,
textsize=textsize,
figsize=figsize,
legend=legend,
axes=axes,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
plot = get_plotting_function("plot_ts", "tsplot", backend)
ax = plot(**tsplot_kwargs)
return ax
|
def plot_ts(
idata,
y,
x=None,
y_hat=None,
y_holdout=None,
y_forecasts=None,
x_holdout=None,
plot_dim=None,
holdout_dim=None,
num_samples=100,
backend=None,
backend_kwargs=None,
y_kwargs=None,
y_hat_plot_kwargs=None,
y_mean_plot_kwargs=None,
vline_kwargs=None,
textsize=None,
figsize=None,
legend=True,
axes=None,
show=None,
):
"""Plot timeseries data.
Parameters
----------
idata : InferenceData
InferenceData object.
y : str
Variable name from ``observed_data``.
Values to be plotted on y-axis before holdout.
x : str, Optional
Values to be plotted on x-axis before holdout.
If None, coordinates of ``y`` dims is chosen.
y_hat : str, optional
Variable name from ``posterior_predictive``.
Assumed to be of shape ``(chain, draw, *y_dims)``.
y_holdout : str, optional
Variable name from ``observed_data``.
It represents the observed data after the holdout period.
Useful while testing the model, when you want to compare
observed test data with predictions/forecasts.
y_forecasts : str, optional
Variable name from ``posterior_predictive``.
It represents forecasts (posterior predictive) values after holdout period.
Useful to compare observed vs predictions/forecasts.
Assumed shape ``(chain, draw, *shape)``.
x_holdout : str, Defaults to coordinates of ``y``.
Variable name from ``constant_data``.
If None, coordinates of ``y_holdout`` or
coordinates of ``y_forecast`` (either of the two available) is chosen.
plot_dim: str, Optional
Should be present in ``y.dims``.
Necessary for selection of ``x`` if ``x`` is None and ``y`` is multidimensional.
holdout_dim: str, Optional
Should be present in ``y_holdout.dims`` or ``y_forecats.dims``.
Necessary to choose ``x_holdout`` if ``x`` is None and
if ``y_holdout`` or ``y_forecasts`` is multidimensional.
num_samples : int, default 100
Number of posterior predictive samples drawn from ``y_hat`` and ``y_forecasts``.
backend : {"matplotlib", "bokeh"}, default "matplotlib"
Select plotting backend.
y_kwargs : dict, optional
Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib.
y_hat_plot_kwargs : dict, optional
Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib.
y_mean_plot_kwargs : dict, optional
Passed to :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib.
vline_kwargs : dict, optional
Passed to :meth:`mpl:matplotlib.axes.Axes.axvline` in matplotlib.
backend_kwargs : dict, optional
These are kwargs specific to the backend being used. Passed to
:func:`matplotlib.pyplot.subplots`.
figsize : tuple, optional
Figure size. If None, it will be defined automatically.
textsize : float, optional
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
Returns
-------
axes: matplotlib axes or bokeh figures.
See Also
--------
plot_lm : Posterior predictive and mean plots for regression-like data.
plot_ppc : Plot for posterior/prior predictive checks.
Examples
--------
Plot timeseries default plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> nchains, ndraws = (4, 500)
>>> obs_data = {
... "y": 2 * np.arange(1, 9) + 3,
... "z": 2 * np.arange(8, 12) + 3,
... }
>>> posterior_predictive = {
... "y": np.random.normal(
... (obs_data["y"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data["y"]))
... ),
... "z": np.random.normal(
... (obs_data["z"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data["z"]))
... ),
... }
>>> idata = az.from_dict(
... observed_data=obs_data,
... posterior_predictive=posterior_predictive,
... coords={"obs_dim": np.arange(1, 9), "pred_dim": np.arange(8, 12)},
... dims={"y": ["obs_dim"], "z": ["pred_dim"]},
... )
>>> ax = az.plot_ts(idata=idata, y="y", y_holdout="z")
Plot timeseries multidim plot
.. plot::
:context: close-figs
>>> ndim1, ndim2 = (5, 7)
>>> data = {
... "y": np.random.normal(size=(ndim1, ndim2)),
... "z": np.random.normal(size=(ndim1, ndim2)),
... }
>>> posterior_predictive = {
... "y": np.random.randn(nchains, ndraws, ndim1, ndim2),
... "z": np.random.randn(nchains, ndraws, ndim1, ndim2),
... }
>>> const_data = {"x": np.arange(1, 6), "x_pred": np.arange(5, 10)}
>>> idata = az.from_dict(
... observed_data=data,
... posterior_predictive=posterior_predictive,
... constant_data=const_data,
... dims={
... "y": ["dim1", "dim2"],
... "z": ["holdout_dim1", "holdout_dim2"],
... },
... coords={
... "dim1": range(ndim1),
... "dim2": range(ndim2),
... "holdout_dim1": range(ndim1 - 1, ndim1 + 4),
... "holdout_dim2": range(ndim2 - 1, ndim2 + 6),
... },
... )
>>> az.plot_ts(
... idata=idata,
... y="y",
... plot_dim="dim1",
... y_holdout="z",
... holdout_dim="holdout_dim1",
... )
"""
# Assign default values if none is provided
y_hat = y if y_hat is None and isinstance(y, str) else y_hat
y_forecasts = y_holdout if y_forecasts is None and isinstance(y_holdout, str) else y_forecasts
# holdout_dim = plot_dim if holdout_dim is None and plot_dim is not None else holdout_dim
if isinstance(y, str):
y = idata.observed_data[y]
if isinstance(y_holdout, str):
y_holdout = idata.observed_data[y_holdout]
if len(y.dims) > 1 and plot_dim is None:
raise ValueError("Argument plot_dim is needed in case of multidimensional data")
if y_holdout is not None and len(y_holdout.dims) > 1 and holdout_dim is None:
raise ValueError("Argument holdout_dim is needed in case of multidimensional data")
# Assigning values to x
x_var_names = None
if isinstance(x, str):
x = idata.constant_data[x]
elif isinstance(x, tuple):
x_var_names = x
x = idata.constant_data
elif x is None:
if plot_dim is None:
x = y.coords[y.dims[0]]
else:
x = y.coords[plot_dim]
# If posterior_predictive is present in idata and y_hat is there, get its values
if isinstance(y_hat, str):
if "posterior_predictive" not in idata.groups():
warnings.warn("posterior_predictive not found in idata", UserWarning)
y_hat = None
elif hasattr(idata.posterior_predictive, y_hat):
y_hat = idata.posterior_predictive[y_hat]
else:
warnings.warn("y_hat not found in posterior_predictive", UserWarning)
y_hat = None
# If posterior_predictive is present in idata and y_forecasts is there, get its values
x_holdout_var_names = None
if isinstance(y_forecasts, str):
if "posterior_predictive" not in idata.groups():
warnings.warn("posterior_predictive not found in idata", UserWarning)
y_forecasts = None
elif hasattr(idata.posterior_predictive, y_forecasts):
y_forecasts = idata.posterior_predictive[y_forecasts]
else:
warnings.warn("y_hat not found in posterior_predictive", UserWarning)
y_forecasts = None
# Assign values to y_holdout
if isinstance(y_holdout, str):
y_holdout = idata.observed_data[y_holdout]
# Assign values to x_holdout.
if y_holdout is not None or y_forecasts is not None:
if x_holdout is None:
if holdout_dim is None:
if y_holdout is None:
x_holdout = y_forecasts.coords[y_forecasts.dims[-1]]
else:
x_holdout = y_holdout.coords[y_holdout.dims[-1]]
else:
if y_holdout is None:
x_holdout = y_forecasts.coords[holdout_dim]
else:
x_holdout = y_holdout.coords[holdout_dim]
elif isinstance(x_holdout, str):
x_holdout = idata.constant_data[x_holdout]
elif isinstance(x_holdout, tuple):
x_holdout_var_names = x_holdout
x_holdout = idata.constant_data
# Choose dims to generate y plotters
if plot_dim is None:
skip_dims = list(y.dims)
elif isinstance(plot_dim, str):
skip_dims = [plot_dim]
elif isinstance(plot_dim, tuple):
skip_dims = list(plot_dim)
# Choose dims to generate y_holdout plotters
if holdout_dim is None:
if y_holdout is not None:
skip_holdout_dims = list(y_holdout.dims)
elif y_forecasts is not None:
skip_holdout_dims = list(y_forecasts.dims)
elif isinstance(holdout_dim, str):
skip_holdout_dims = [holdout_dim]
elif isinstance(holdout_dim, tuple):
skip_holdout_dims = list(holdout_dim)
# Compulsory plotters
y_plotters = list(
xarray_var_iter(
y,
skip_dims=set(skip_dims),
combined=True,
)
)
# Compulsory plotters
x_plotters = list(
xarray_var_iter(
x,
var_names=x_var_names,
skip_dims=set(x.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
len_y = len(y_plotters)
len_x = len(x_plotters)
length_plotters = len_x * len_y
y_plotters = np.tile(y_plotters, (len_x, 1))
x_plotters = np.tile(x_plotters, (len_y, 1))
# Generate plotters for all the available data
y_mean_plotters = None
y_hat_plotters = None
if y_hat is not None:
total_samples = y_hat.sizes["chain"] * y_hat.sizes["draw"]
pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False)
y_hat_satcked = y_hat.stack(__sample__=("chain", "draw"))[..., pp_sample_ix]
y_hat_plotters = list(
xarray_var_iter(
y_hat_satcked,
skip_dims=set(skip_dims + ["__sample__"]),
combined=True,
)
)
y_mean = y_hat.mean(("chain", "draw"))
y_mean_plotters = list(
xarray_var_iter(
y_mean,
skip_dims=set(skip_dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_hat_plotters = np.tile(y_hat_plotters, (len_x, 1))
y_mean_plotters = np.tile(y_mean_plotters, (len_x, 1))
y_holdout_plotters = None
x_holdout_plotters = None
if y_holdout is not None:
y_holdout_plotters = list(
xarray_var_iter(
y_holdout,
skip_dims=set(skip_holdout_dims),
combined=True,
)
)
x_holdout_plotters = list(
xarray_var_iter(
x_holdout,
var_names=x_holdout_var_names,
skip_dims=set(x_holdout.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_holdout_plotters = np.tile(y_holdout_plotters, (len_x, 1))
x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1))
y_forecasts_plotters = None
y_forecasts_mean_plotters = None
if y_forecasts is not None:
total_samples = y_forecasts.sizes["chain"] * y_forecasts.sizes["draw"]
pp_sample_ix = np.random.choice(total_samples, size=num_samples, replace=False)
y_forecasts_satcked = y_forecasts.stack(__sample__=("chain", "draw"))[..., pp_sample_ix]
y_forecasts_plotters = list(
xarray_var_iter(
y_forecasts_satcked,
skip_dims=set(skip_holdout_dims + ["__sample__"]),
combined=True,
)
)
y_forecasts_mean = y_forecasts.mean(("chain", "draw"))
y_forecasts_mean_plotters = list(
xarray_var_iter(
y_forecasts_mean,
skip_dims=set(skip_holdout_dims),
combined=True,
)
)
x_holdout_plotters = list(
xarray_var_iter(
x_holdout,
var_names=x_holdout_var_names,
skip_dims=set(x_holdout.dims),
combined=True,
)
)
# Necessary when multidim y
# If there are multiple x and multidimensional y, we need total of len(x)*len(y) graphs
y_forecasts_mean_plotters = np.tile(y_forecasts_mean_plotters, (len_x, 1))
y_forecasts_plotters = np.tile(y_forecasts_plotters, (len_x, 1))
x_holdout_plotters = np.tile(x_holdout_plotters, (len_y, 1))
rows, cols = default_grid(length_plotters)
tsplot_kwargs = dict(
x_plotters=x_plotters,
y_plotters=y_plotters,
y_mean_plotters=y_mean_plotters,
y_hat_plotters=y_hat_plotters,
y_holdout_plotters=y_holdout_plotters,
x_holdout_plotters=x_holdout_plotters,
y_forecasts_plotters=y_forecasts_plotters,
y_forecasts_mean_plotters=y_forecasts_mean_plotters,
num_samples=num_samples,
length_plotters=length_plotters,
rows=rows,
cols=cols,
backend_kwargs=backend_kwargs,
y_kwargs=y_kwargs,
y_hat_plot_kwargs=y_hat_plot_kwargs,
y_mean_plot_kwargs=y_mean_plot_kwargs,
vline_kwargs=vline_kwargs,
textsize=textsize,
figsize=figsize,
legend=legend,
axes=axes,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
plot = get_plotting_function("plot_ts", "tsplot", backend)
ax = plot(**tsplot_kwargs)
return ax
|
24,854 |
def my_func(self, doc_type): # [missing-return-doc, missing-return-type-doc]
"""This is a docstring.
Arguments
---------
doc_type : str
Numpy
"""
return False
|
def my_func(self, doc_type): # [missing-return-doc, missing-return-type-doc]
"""warn_missing_numpy_returns
Arguments
---------
doc_type : str
Numpy
"""
return False
|
8,207 |
def read(filename, debug=False, **kwargs):
"""
Loads an ANA file and returns the data and a header in a list of (data,
header) tuples.
Parameters
----------
filename : `str`
Name of file to be read.
debug : `bool`, optional
Prints verbose debug information.
Returns
-------
out : `list`
A list of (data, header) tuples
Examples
--------
>>> data = sunpy.io.ana.read(filename) # doctest: +SKIP
"""
if not os.path.isfile(filename):
raise OSError("File does not exist!")
if _pyana is None:
raise ImportError("C extension for ANA is missing, please rebuild.")
# NOTE: This can be removed after adding support for file-obj in `sunpy.io._pyana`.
if isinstance(filename, io.IOBase):
filename = filename.name # Extracting path from the file-obj
data = _pyana.fzread(filename, debug)
return [HDPair(data['data'], FileHeader(data['header']))]
|
def read(filename, debug=False, **kwargs):
"""
Loads an ANA file and returns the data and a header in a list of (data,
header) tuples.
Parameters
----------
filename : `str`
Name of file to be read.
debug : `bool`, optional
Prints verbose debug information.
Returns
-------
out : `list`
A list of (data, header) tuples
Examples
--------
>>> data = sunpy.io.ana.read(filename) # doctest: +SKIP
"""
if not os.path.isfile(filename):
raise OSError("File does not exist!")
if _pyana is None:
raise ImportError("C extension for ANA is missing, please rebuild.")
# NOTE: This can be removed after adding support for file-obj in `sunpy.io._pyana`.
if isinstance(filename, io.IOBase):
filename = filename.name
data = _pyana.fzread(filename, debug)
return [HDPair(data['data'], FileHeader(data['header']))]
|
27,819 |
def is_return_code_zero(args):
"""Return true iff the given command's return code is zero.
All the messages to stdout or stderr are suppressed.
Args:
args (list of str): Command to execute.
"""
with open(os.devnull, 'wb') as FNULL:
try:
subprocess.check_call(args, stdout=FNULL, stderr=FNULL)
except subprocess.CalledProcessError:
# The given command returned an error
return False
except OSError:
# The given command was not found
return False
return True
|
def is_return_code_zero(args):
"""Returns `True` if the return code of the given command is zero.
All the messages to stdout or stderr are suppressed.
Args:
args (list of str): Command to execute.
"""
with open(os.devnull, 'wb') as FNULL:
try:
subprocess.check_call(args, stdout=FNULL, stderr=FNULL)
except subprocess.CalledProcessError:
# The given command returned an error
return False
except OSError:
# The given command was not found
return False
return True
|
27,698 |
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None):
ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)
for valindex, parameterset in enumerate(parametersets)
]
# All ids must be unique!
unique_ids = set(ids)
if len(unique_ids) != len(ids):
# Record the number of occurances of each test id
testid_counts = Counter(ids)
# Map the test id to its next suffix.
testid_suffixes = Counter(unique_ids)
for testid in testid_suffixes.keys():
testid_suffixes[testid] -= 1 # start each suffix at 0
# Suffix non-unique ids to make them unique:
for index, testid in enumerate(ids):
if testid_counts[testid] > 1:
ids[index] = f"{testid}{testid_suffixes[testid]}"
testid_suffixes[testid] += 1
return ids
|
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None):
ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)
for valindex, parameterset in enumerate(parametersets)
]
# All ids must be unique!
unique_ids = set(ids)
if len(unique_ids) != len(ids):
# Record the number of occurances of each test id
test_id_counts = Counter(ids)
# Map the test id to its next suffix.
testid_suffixes = Counter(unique_ids)
for testid in testid_suffixes.keys():
testid_suffixes[testid] -= 1 # start each suffix at 0
# Suffix non-unique ids to make them unique:
for index, testid in enumerate(ids):
if testid_counts[testid] > 1:
ids[index] = f"{testid}{testid_suffixes[testid]}"
testid_suffixes[testid] += 1
return ids
|
30,287 |
def handle_analyze_response(response):
response = handle_response(response, ACCEPTABLE_HTTP_CODES)
result_url = response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis': {'ID': analysis_id, 'Status': 'Created', 'type':'File'}}
return_outputs('Analysis created successfully', context_json, response)
|
def handle_analyze_response(response):
response = handle_response(response, ACCEPTABLE_HTTP_CODES)
result_url = response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(obj.ID === val.ID)': {'ID': analysis_id, 'Status': 'Created', 'type':' File'}}
return_outputs('Analysis created successfully', context_json, response)
|
25,958 |
def load_arguments(self, _):
batch_name_type = CLIArgumentType(
help='Name of the Batch account.',
options_list=('--account-name',),
completer=get_resource_name_completion_list('Microsoft.Batch/batchAccounts'),
id_part=None)
with self.argument_context('batch') as c:
c.argument('resource_group_name', resource_group_name_type, help='Name of the resource group', completer=None, validator=None, required=True)
with self.argument_context('batch account') as c:
c.argument('account_name', batch_name_type, options_list=('--name', '-n'))
with self.argument_context('batch account show') as c:
c.argument('resource_group_name', resource_group_name_type, help='Name of the resource group. If not specified will display currently set account.', required=False)
c.argument('account_name', batch_name_type, options_list=('--name', '-n'), help='Name of the batch account to show. If not specified will display currently set account.', required=False)
with self.argument_context('batch account list') as c:
c.argument('resource_group_name', resource_group_name_type, help='Name of the resource group', required=False)
with self.argument_context('batch account create') as c:
c.argument('location', get_location_type(self.cli_ctx), help='The region in which to create the account.')
c.argument('tags', tags_type, help="Space-separated tags in 'key[=value]' format.")
c.argument('storage_account', help='The storage account name or resource ID to be used for auto storage.', validator=storage_account_id)
c.argument('keyvault', help='The KeyVault name or resource ID to be used for an account with a pool allocation mode of \'User Subscription\'.', validator=keyvault_id)
c.argument('public_network_access', help="The network access type for accessing Azure Batch account. Values can either be enabled or disabled.", arg_type=get_enum_type(PublicNetworkAccessType))
c.argument('encryption_key_source', help='Part of the encryption configuration for the Batch account. Type of the key source. Can be either Microsoft.Batch or Microsoft.KeyVault', arg_type=get_enum_type(KeySource))
c.argument('encryption_key_identifier', help='Part of the encryption configuration for the Batch account. '
'Full path to the versioned secret. Example https://mykeyvault.vault.azure.net/keys/testkey/6e34a81fef704045975661e297a4c053.')
c.argument('identity_type', help="The type of identity used for the Batch account. Possible values include: 'SystemAssigned', 'None'.", arg_type=get_enum_type(ResourceIdentityType))
c.ignore('keyvault_url')
with self.argument_context('batch account set') as c:
c.argument('tags', tags_type)
c.argument('storage_account', help='The storage account name or resource ID to be used for auto storage.', validator=storage_account_id)
c.argument('encryption_key_source', help='Part of the encryption configuration for the Batch account. Type of the key source. Can be either Microsoft.Batch or Microsoft.KeyVault')
c.argument('encryption_key_identifier', help='Part of the encryption configuration for the Batch account. Full path to the versioned secret. Example https://mykeyvault.vault.azure.net/keys/testkey/6e34a81fef704045975661e297a4c053.')
c.argument('identity_type', help="The type of identity used for the Batch account. Possible values include: 'SystemAssigned', 'None'.", arg_type=get_enum_type(ResourceIdentityType))
with self.argument_context('batch account keys renew') as c:
c.argument('resource_group_name', resource_group_name_type,
help='Name of the resource group. If not specified will display currently set account.',
required=False)
c.argument('account_name', batch_name_type, options_list=('--name', '-n'),
help='Name of the batch account to show. If not specified will display currently set account.',
required=False)
c.argument('key_name', arg_type=get_enum_type(AccountKeyType), help='Name of the batch account key.')
with self.argument_context('batch account login') as c:
c.argument('shared_key_auth', action='store_true', help='Using Shared Key authentication, if not specified, it will use Azure Active Directory authentication.')
c.argument('show', action='store_true', help='Display the credential information for the Batch account.')
with self.argument_context('batch application set') as c:
c.argument('application_name', options_list=('--application-name',), help="The name of the application.")
c.argument('allow_updates', options_list=('--allow-updates',), help="Specify to indicate whether packages within the application may be overwritten using the same version string. Specify either 'true' or 'false' to update the property.")
with self.argument_context('batch application create') as c:
c.argument('allow_updates', options_list=('--allow-updates',), action="store_true", help="Specify to indicate whether packages within the application may be overwritten using the same version string. True if flag present.")
for command in ['create', 'activate']:
with self.argument_context('batch application package {}'.format(command)) as c:
c.argument('package_file', type=file_type, help='The path of the application package in zip format', completer=FilesCompleter())
c.argument('application_name', options_list=('--application-name',), help="The name of the application.")
c.argument('version_name', options_list=('--version-name',), help="The version name of the application.")
c.argument('f_ormat', options_list=('--format',), help="The format of the application package binary file.")
with self.argument_context('batch location quotas show') as c:
c.argument('location_name', get_location_type(self.cli_ctx), help='The region from which to display the Batch service quotas.')
for command in ['list', 'show', 'create', 'set', 'delete', 'package']:
with self.argument_context('batch application {}'.format(command)) as c:
c.argument('account_name', batch_name_type, options_list=('--name', '-n'), validator=application_enabled)
# TODO: Refactor so the help text can be extracted automatically
with self.argument_context('batch pool resize') as c:
c.argument('if_modified_since', help='The operation will be performed only if the resource has been modified since the specified timestamp.', type=datetime_format, arg_group='Pre-condition and Query')
c.argument('if_unmodified_since', help='The operation will not be performed only if the resource has been modified since the specified timestamp.', type=datetime_format, arg_group='Pre-condition and Query')
c.argument('if_match', help='The operation will be performed only if the resource\'s current ETag exactly matches the specified value.', arg_group='Pre-condition and Query')
c.argument('if_none_match', help='The operation will not be performed only if the resource\'s current ETag exactly matches the specified value.', arg_group='Pre-condition and Query')
c.argument('pool_id', help='The ID of the pool.')
c.argument('abort', action='store_true', help='Stop the pool resize operation.', validator=validate_pool_resize_parameters)
c.argument('node_deallocation_option', options_list=('--node-deallocation-option',), help='When nodes may be removed from the pool, if the pool size is decreasing.', arg_type=get_enum_type(ComputeNodeDeallocationOption))
# TODO: Refactor so the help text can be extracted automatically
with self.argument_context('batch pool reset') as c:
c.argument('json_file', type=file_type, help='The file containing pool update properties parameter specification in JSON(formatted to match REST API request body). If this parameter is specified, all \'Pool Update Properties Parameter Arguments\' are ignored.', validator=validate_json_file, completer=FilesCompleter())
c.argument('pool_id', help='The ID of the pool to update.')
c.argument('application_package_references', nargs='+', type=application_package_reference_format, arg_group='Pool')
c.argument('certificate_references', nargs='+', type=certificate_reference_format, arg_group='Pool')
c.argument('metadata', nargs='+', type=metadata_item_format, arg_group='Pool')
c.argument('start_task_command_line', arg_group='Pool: Start Task',
help='The command line of the start task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux.')
c.argument('start_task_wait_for_success', action='store_true', arg_group='Pool: Start Task',
help='Whether the Batch service should wait for the start task to complete successfully (that is, to exit with exit code 0) before scheduling any tasks on the compute node. True if flag present, otherwise defaults to False.')
c.argument('start_task_max_task_retry_count', arg_group='Pool: Start Task',
help='The maximum number of times the task may be retried.')
c.argument('start_task_environment_settings', nargs='+', type=environment_setting_format, arg_group='Pool: Start Task',
help='A list of environment variable settings for the start task. Space-separated values in \'key=value\' format.')
with self.argument_context('batch job list') as c:
c.argument('filter', help=' An OData $filter clause.', arg_group='Pre-condition and Query')
c.argument('select', help=' An OData $select clause.', arg_group='Pre-condition and Query')
c.argument('expand', help=' An OData $expand clause.', arg_group='Pre-condition and Query')
c.argument('job_schedule_id', help='The ID of the job schedule from which you want to get a list of jobs. If omitted, lists all jobs in the account.')
for command in ['job create', 'job set', 'job reset', 'job-schedule create', 'job-schedule set', 'job-schedule reset']:
with self.argument_context('batch {}'.format(command)) as c:
c.argument('pool_id', options_list=('--pool-id',), help='The id of an existing pool. All the tasks of the job will run on the specified pool.')
with self.argument_context('batch pool create') as c:
c.argument('os_family', arg_type=get_enum_type(['2', '3', '4', '5', '6']))
c.argument('auto_scale_formula', help='A formula for the desired number of compute nodes in the pool. The formula is checked for validity before the pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/.')
c.extra('disk_encryption_targets',
arg_group="Pool: Virtual Machine Configuration",
help='A space separated list of DiskEncryptionTargets. current possible values include OsDisk and TemporaryDisk.', type=disk_encryption_configuration_format)
c.extra('image', completer=load_supported_images, arg_group="Pool: Virtual Machine Configuration",
help="OS image reference. This can be either 'publisher:offer:sku[:version]' format, or a fully qualified ARM image id of the form '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}'. If 'publisher:offer:sku[:version]' format, version is optional and if omitted latest will be used. Valid values can be retrieved via 'az batch pool supported-images list'. For example: 'MicrosoftWindowsServer:WindowsServer:2012-R2-Datacenter:latest'")
with self.argument_context('batch certificate') as c:
c.argument('thumbprint', help='The certificate thumbprint.')
c.argument('password', help='The password to access the certificate\'s private key.')
c.argument('certificate_file', type=file_type, help='The certificate file: cer file or pfx file.', validator=validate_cert_file, completer=FilesCompleter())
c.argument('abort', action='store_true', help='Cancel the failed certificate deletion operation.')
with self.argument_context('batch certificate show') as c:
c.argument('thumbprint', help='The certificate thumbprint.', validator=validate_cert_settings)
with self.argument_context('batch task create') as c:
c.argument('json_file', type=file_type, help='The file containing the task(s) to create in JSON(formatted to match REST API request body). When submitting multiple tasks, accepts either an array of tasks or a TaskAddCollectionParamater. If this parameter is specified, all other parameters are ignored.', validator=validate_json_file, completer=FilesCompleter())
c.argument('application_package_references', nargs='+', help='The space-separated list of IDs specifying the application packages to be installed. Space-separated application IDs with optional version in \'id[#version]\' format.', type=application_package_reference_format)
c.argument('job_id', help='The ID of the job containing the task.')
c.argument('task_id', help='The ID of the task.')
c.argument('command_line', help='The command line of the task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux.')
c.argument('environment_settings', nargs='+', help='A list of environment variable settings for the task. Space-separated values in \'key=value\' format.', type=environment_setting_format)
c.argument('resource_files', nargs='+', help='A list of files that the Batch service will download to the compute node before running the command line. Space-separated resource references in filename=httpurl format, with httpurl being any HTTP url with public access or a SAS url with read access.', type=resource_file_format)
for item in ['batch certificate delete', 'batch certificate create', 'batch pool resize', 'batch pool reset', 'batch job list', 'batch task create']:
with self.argument_context(item) as c:
c.extra('account_name', arg_group='Batch Account', validator=validate_client_parameters,
help='The Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT')
c.extra('account_key', arg_group='Batch Account',
help='The Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY')
c.extra('account_endpoint', arg_group='Batch Account',
help='Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT')
|
def load_arguments(self, _):
batch_name_type = CLIArgumentType(
help='Name of the Batch account.',
options_list=('--account-name',),
completer=get_resource_name_completion_list('Microsoft.Batch/batchAccounts'),
id_part=None)
with self.argument_context('batch') as c:
c.argument('resource_group_name', resource_group_name_type, help='Name of the resource group', completer=None, validator=None, required=True)
with self.argument_context('batch account') as c:
c.argument('account_name', batch_name_type, options_list=('--name', '-n'))
with self.argument_context('batch account show') as c:
c.argument('resource_group_name', resource_group_name_type, help='Name of the resource group. If not specified will display currently set account.', required=False)
c.argument('account_name', batch_name_type, options_list=('--name', '-n'), help='Name of the batch account to show. If not specified will display currently set account.', required=False)
with self.argument_context('batch account list') as c:
c.argument('resource_group_name', resource_group_name_type, help='Name of the resource group', required=False)
with self.argument_context('batch account create') as c:
c.argument('location', get_location_type(self.cli_ctx), help='The region in which to create the account.')
c.argument('tags', tags_type, help="Space-separated tags in 'key[=value]' format.")
c.argument('storage_account', help='The storage account name or resource ID to be used for auto storage.', validator=storage_account_id)
c.argument('keyvault', help='The KeyVault name or resource ID to be used for an account with a pool allocation mode of \'User Subscription\'.', validator=keyvault_id)
c.argument('public_network_access', help="The network access type for accessing Azure Batch account. Values can either be enabled or disabled.", arg_type=get_enum_type(PublicNetworkAccessType))
c.argument('encryption_key_source', help='Part of the encryption configuration for the Batch account. Type of the key source. Can be either Microsoft.Batch or Microsoft.KeyVault', arg_type=get_enum_type(KeySource))
c.argument('encryption_key_identifier', help='Part of the encryption configuration for the Batch account. '
'Full path to the versioned secret. Example https://mykeyvault.vault.azure.net/keys/testkey/6e34a81fef704045975661e297a4c053.')
c.argument('identity_type', help="The type of identity used for the Batch account. Possible values include: 'SystemAssigned', 'None'.", arg_type=get_enum_type(ResourceIdentityType))
c.ignore('keyvault_url')
with self.argument_context('batch account set') as c:
c.argument('tags', tags_type)
c.argument('storage_account', help='The storage account name or resource ID to be used for auto storage.', validator=storage_account_id)
c.argument('encryption_key_source', help='Part of the encryption configuration for the Batch account. Type of the key source. Can be either Microsoft.Batch or Microsoft.KeyVault')
c.argument('encryption_key_identifier', help='Part of the encryption configuration for the Batch account. Full path to the versioned secret. Example https://mykeyvault.vault.azure.net/keys/testkey/6e34a81fef704045975661e297a4c053.')
c.argument('identity_type', help="The type of identity used for the Batch account. Possible values include: 'SystemAssigned', 'None'.", arg_type=get_enum_type(ResourceIdentityType))
with self.argument_context('batch account keys renew') as c:
c.argument('resource_group_name', resource_group_name_type,
help='Name of the resource group. If not specified will display currently set account.',
required=False)
c.argument('account_name', batch_name_type, options_list=('--name', '-n'),
help='Name of the batch account to show. If not specified will display currently set account.',
required=False)
c.argument('key_name', arg_type=get_enum_type(AccountKeyType), help='Name of the batch account key.')
with self.argument_context('batch account login') as c:
c.argument('shared_key_auth', action='store_true', help='Using Shared Key authentication, if not specified, it will use Azure Active Directory authentication.')
c.argument('show', action='store_true', help='Display the credential information for the Batch account.')
with self.argument_context('batch application set') as c:
c.argument('application_name', options_list=('--application-name',), help="The name of the application.")
c.argument('allow_updates', options_list=('--allow-updates',), help="Specify to indicate whether packages within the application may be overwritten using the same version string. Specify either 'true' or 'false' to update the property.")
with self.argument_context('batch application create') as c:
c.argument('allow_updates', options_list=('--allow-updates',), action="store_true", help="Specify to indicate whether packages within the application may be overwritten using the same version string. True if flag present.")
for command in ['create', 'activate']:
with self.argument_context('batch application package {}'.format(command)) as c:
c.argument('package_file', type=file_type, help='The path of the application package in zip format', completer=FilesCompleter())
c.argument('application_name', options_list=('--application-name',), help="The name of the application.")
c.argument('version_name', options_list=('--version-name',), help="The version name of the application.")
c.argument('format', options_list=('--format',), help="The format of the application package binary file.")
with self.argument_context('batch location quotas show') as c:
c.argument('location_name', get_location_type(self.cli_ctx), help='The region from which to display the Batch service quotas.')
for command in ['list', 'show', 'create', 'set', 'delete', 'package']:
with self.argument_context('batch application {}'.format(command)) as c:
c.argument('account_name', batch_name_type, options_list=('--name', '-n'), validator=application_enabled)
# TODO: Refactor so the help text can be extracted automatically
with self.argument_context('batch pool resize') as c:
c.argument('if_modified_since', help='The operation will be performed only if the resource has been modified since the specified timestamp.', type=datetime_format, arg_group='Pre-condition and Query')
c.argument('if_unmodified_since', help='The operation will not be performed only if the resource has been modified since the specified timestamp.', type=datetime_format, arg_group='Pre-condition and Query')
c.argument('if_match', help='The operation will be performed only if the resource\'s current ETag exactly matches the specified value.', arg_group='Pre-condition and Query')
c.argument('if_none_match', help='The operation will not be performed only if the resource\'s current ETag exactly matches the specified value.', arg_group='Pre-condition and Query')
c.argument('pool_id', help='The ID of the pool.')
c.argument('abort', action='store_true', help='Stop the pool resize operation.', validator=validate_pool_resize_parameters)
c.argument('node_deallocation_option', options_list=('--node-deallocation-option',), help='When nodes may be removed from the pool, if the pool size is decreasing.', arg_type=get_enum_type(ComputeNodeDeallocationOption))
# TODO: Refactor so the help text can be extracted automatically
with self.argument_context('batch pool reset') as c:
c.argument('json_file', type=file_type, help='The file containing pool update properties parameter specification in JSON(formatted to match REST API request body). If this parameter is specified, all \'Pool Update Properties Parameter Arguments\' are ignored.', validator=validate_json_file, completer=FilesCompleter())
c.argument('pool_id', help='The ID of the pool to update.')
c.argument('application_package_references', nargs='+', type=application_package_reference_format, arg_group='Pool')
c.argument('certificate_references', nargs='+', type=certificate_reference_format, arg_group='Pool')
c.argument('metadata', nargs='+', type=metadata_item_format, arg_group='Pool')
c.argument('start_task_command_line', arg_group='Pool: Start Task',
help='The command line of the start task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux.')
c.argument('start_task_wait_for_success', action='store_true', arg_group='Pool: Start Task',
help='Whether the Batch service should wait for the start task to complete successfully (that is, to exit with exit code 0) before scheduling any tasks on the compute node. True if flag present, otherwise defaults to False.')
c.argument('start_task_max_task_retry_count', arg_group='Pool: Start Task',
help='The maximum number of times the task may be retried.')
c.argument('start_task_environment_settings', nargs='+', type=environment_setting_format, arg_group='Pool: Start Task',
help='A list of environment variable settings for the start task. Space-separated values in \'key=value\' format.')
with self.argument_context('batch job list') as c:
c.argument('filter', help=' An OData $filter clause.', arg_group='Pre-condition and Query')
c.argument('select', help=' An OData $select clause.', arg_group='Pre-condition and Query')
c.argument('expand', help=' An OData $expand clause.', arg_group='Pre-condition and Query')
c.argument('job_schedule_id', help='The ID of the job schedule from which you want to get a list of jobs. If omitted, lists all jobs in the account.')
for command in ['job create', 'job set', 'job reset', 'job-schedule create', 'job-schedule set', 'job-schedule reset']:
with self.argument_context('batch {}'.format(command)) as c:
c.argument('pool_id', options_list=('--pool-id',), help='The id of an existing pool. All the tasks of the job will run on the specified pool.')
with self.argument_context('batch pool create') as c:
c.argument('os_family', arg_type=get_enum_type(['2', '3', '4', '5', '6']))
c.argument('auto_scale_formula', help='A formula for the desired number of compute nodes in the pool. The formula is checked for validity before the pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/.')
c.extra('disk_encryption_targets',
arg_group="Pool: Virtual Machine Configuration",
help='A space separated list of DiskEncryptionTargets. current possible values include OsDisk and TemporaryDisk.', type=disk_encryption_configuration_format)
c.extra('image', completer=load_supported_images, arg_group="Pool: Virtual Machine Configuration",
help="OS image reference. This can be either 'publisher:offer:sku[:version]' format, or a fully qualified ARM image id of the form '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}'. If 'publisher:offer:sku[:version]' format, version is optional and if omitted latest will be used. Valid values can be retrieved via 'az batch pool supported-images list'. For example: 'MicrosoftWindowsServer:WindowsServer:2012-R2-Datacenter:latest'")
with self.argument_context('batch certificate') as c:
c.argument('thumbprint', help='The certificate thumbprint.')
c.argument('password', help='The password to access the certificate\'s private key.')
c.argument('certificate_file', type=file_type, help='The certificate file: cer file or pfx file.', validator=validate_cert_file, completer=FilesCompleter())
c.argument('abort', action='store_true', help='Cancel the failed certificate deletion operation.')
with self.argument_context('batch certificate show') as c:
c.argument('thumbprint', help='The certificate thumbprint.', validator=validate_cert_settings)
with self.argument_context('batch task create') as c:
c.argument('json_file', type=file_type, help='The file containing the task(s) to create in JSON(formatted to match REST API request body). When submitting multiple tasks, accepts either an array of tasks or a TaskAddCollectionParamater. If this parameter is specified, all other parameters are ignored.', validator=validate_json_file, completer=FilesCompleter())
c.argument('application_package_references', nargs='+', help='The space-separated list of IDs specifying the application packages to be installed. Space-separated application IDs with optional version in \'id[#version]\' format.', type=application_package_reference_format)
c.argument('job_id', help='The ID of the job containing the task.')
c.argument('task_id', help='The ID of the task.')
c.argument('command_line', help='The command line of the task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux.')
c.argument('environment_settings', nargs='+', help='A list of environment variable settings for the task. Space-separated values in \'key=value\' format.', type=environment_setting_format)
c.argument('resource_files', nargs='+', help='A list of files that the Batch service will download to the compute node before running the command line. Space-separated resource references in filename=httpurl format, with httpurl being any HTTP url with public access or a SAS url with read access.', type=resource_file_format)
for item in ['batch certificate delete', 'batch certificate create', 'batch pool resize', 'batch pool reset', 'batch job list', 'batch task create']:
with self.argument_context(item) as c:
c.extra('account_name', arg_group='Batch Account', validator=validate_client_parameters,
help='The Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT')
c.extra('account_key', arg_group='Batch Account',
help='The Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY')
c.extra('account_endpoint', arg_group='Batch Account',
help='Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT')
|
55,075 |
def sample(op=None, wires=None):
r"""Sample from the supplied observable, with the number of shots
determined from the ``dev.shots`` attribute of the corresponding device.
If no observable is provided then sample from the device specific raw samples.
The samples are drawn from the eigenvalues :math:`\{\lambda_i\}` of the observable.
The probability of drawing eigenvalue :math:`\lambda_i` is given by
:math:`p(\lambda_i) = |\langle \xi_i | \psi \rangle|^2`, where :math:`| \xi_i \rangle`
is the corresponding basis state from the observable's eigenbasis.
If no observable was provided then the raw samples obtained from device are returned
(eg. for a qubit device, samples from the computational device are returned). In this
case, `wires` can be specified so that sample results only include measurement results
of the qubits of interest.
**Example 1:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliY(0))
Executing this QNode:
>>> circuit(0.5)
array([ 1., 1., 1., -1.])
**Example 2:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample()
Executing this QNode:
>>> circuit(0.5)
array([[0, 1],
[0, 0],
[1, 1],
[0, 0]])
Args:
op (Observable or None): a quantum observable object
wires (Sequence[int] or int or None): the wires we wish to sample from
Raises:
QuantumFunctionError: `op` is not an instance of :class:`~.Observable`
ValueError: Cannot set wires if an observable is provided
"""
if not isinstance(op, Observable) and op is not None: # None type is also allowed for op
raise qml.QuantumFunctionError(
"{} is not an observable: cannot be used with sample".format(op.name)
)
if wires is not None:
if op is not None:
raise ValueError("Cannot set the wires if an observable is provided.")
return MeasurementProcess(Sample, obs=op, wires=qml.wires.Wires(wires))
return MeasurementProcess(Sample, obs=op)
|
def sample(op=None, wires=None):
r"""Sample from the supplied observable, with the number of shots
determined from the ``dev.shots`` attribute of the corresponding device.
If no observable is provided then sample from the device specific raw samples.
The samples are drawn from the eigenvalues :math:`\{\lambda_i\}` of the observable.
The probability of drawing eigenvalue :math:`\lambda_i` is given by
:math:`p(\lambda_i) = |\langle \xi_i | \psi \rangle|^2`, where :math:`| \xi_i \rangle`
is the corresponding basis state from the observable's eigenbasis.
If no observable was provided then the raw samples obtained from device are returned
(eg. for a qubit device, samples from the computational device are returned). In this
case, `wires` can be specified so that sample results only include measurement results
of the qubits of interest.
**Example 1:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample(qml.PauliY(0))
Executing this QNode:
>>> circuit(0.5)
array([ 1., 1., 1., -1.])
If no observable is provided, then the raw basis state samples obtained
from device are returned (e.g., for a qubit device, samples from the
computational device are returned). In this case, ``wires`` can be specified
so that sample results only include measurement results of the qubits of interest.
.. code-block:: python3
dev = qml.device("default.qubit", wires=2, shots=4)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.sample()
Executing this QNode:
>>> circuit(0.5)
array([[0, 1],
[0, 0],
[1, 1],
[0, 0]])
Args:
op (Observable or None): a quantum observable object
wires (Sequence[int] or int or None): the wires we wish to sample from
Raises:
QuantumFunctionError: `op` is not an instance of :class:`~.Observable`
ValueError: Cannot set wires if an observable is provided
"""
if not isinstance(op, Observable) and op is not None: # None type is also allowed for op
raise qml.QuantumFunctionError(
"{} is not an observable: cannot be used with sample".format(op.name)
)
if wires is not None:
if op is not None:
raise ValueError("Cannot set the wires if an observable is provided.")
return MeasurementProcess(Sample, obs=op, wires=qml.wires.Wires(wires))
return MeasurementProcess(Sample, obs=op)
|
50,129 |
def handle_args(name, args):
# Note that if an exception happens between now and when logging is
# setup, we'll only see it in the journal
hotplug_reporter = events.ReportEventStack(
name, __doc__, reporting_enabled=True
)
hotplug_init = Init(ds_deps=[], reporter=hotplug_reporter)
hotplug_init.read_cfg()
log.setupLogging(hotplug_init.cfg)
if 'reporting' in hotplug_init.cfg:
reporting.update_configuration(hotplug_init.cfg.get('reporting'))
# Logging isn't going to be setup until now
LOG.debug(
'%s called with the following arguments: {'
'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}',
name,
args.hotplug_action,
args.subsystem,
args.udevaction if 'udevaction' in args else None,
args.devpath if 'devpath' in args else None,
)
with hotplug_reporter:
try:
if args.hotplug_action == 'query':
hotplug_init.fetch(existing="trust")
enabled = is_enabled(hotplug_init, args.subsystem)
print('enabled' if enabled else 'disabled')
else:
handle_hotplug(
hotplug_init=hotplug_init,
devpath=args.devpath,
subsystem=args.subsystem,
udevaction=args.udevaction,
)
except Exception:
LOG.exception('Received fatal exception handling hotplug!')
raise
LOG.debug('Exiting hotplug handler')
reporting.flush_events()
|
def handle_args(name, args):
# Note that if an exception happens between now and when logging is
# setup, we'll only see it in the journal
hotplug_reporter = events.ReportEventStack(
name, __doc__, reporting_enabled=True
)
hotplug_init = Init(ds_deps=[], reporter=hotplug_reporter)
hotplug_init.read_cfg()
log.setupLogging(hotplug_init.cfg)
if 'reporting' in hotplug_init.cfg:
reporting.update_configuration(hotplug_init.cfg.get('reporting'))
# Logging isn't going to be setup until now
LOG.debug(
'%s called with the following arguments: {'
'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}',
name,
args.hotplug_action,
args.subsystem,
args.udevaction if 'udevaction' in args else None,
args.devpath if 'devpath' in args else None,
)
with hotplug_reporter:
try:
if args.hotplug_action == 'query':
try:
hotplug_init.fetch(existing="trust")
except cloudinit.sources.DataSourceNotFoundException:
print("Unable to determine hotplug state. No datasource detected")
sys.exit(1)
enabled = is_enabled(hotplug_init, args.subsystem)
print('enabled' if enabled else 'disabled')
else:
handle_hotplug(
hotplug_init=hotplug_init,
devpath=args.devpath,
subsystem=args.subsystem,
udevaction=args.udevaction,
)
except Exception:
LOG.exception('Received fatal exception handling hotplug!')
raise
LOG.debug('Exiting hotplug handler')
reporting.flush_events()
|
5,426 |
def test_get_repos():
"""
Test aptpkg.get_repos
"""
test_repo, comps = get_current_repo()
if not test_repo:
pytest.skip("Did not detect an ubuntu repo")
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
|
def test_get_repos():
"""
Test aptpkg.get_repos
"""
test_repo, comps = get_current_repo()
if not test_repo:
pytest.skip("Did not detect an apt repo")
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
|
13,171 |
def extend_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
subparsers = parser.add_subparsers(
title="GitLab resource",
dest="gitlab_resource",
help="The GitLab resource to manipulate.",
)
subparsers.required = True
# populate argparse for all Gitlab Object
classes = set()
for cls in gitlab.v4.objects.__dict__.values():
if not isinstance(cls, type):
continue
if issubclass(cls, gitlab.base.RESTManager):
if cls._obj_cls is not None:
classes.add(cls._obj_cls)
for cls in sorted(classes, key=operator.attrgetter("__name__")):
arg_name = cli.cls_to_gitlab_resource(cls)
object_group = subparsers.add_parser(arg_name)
object_subparsers = object_group.add_subparsers(
title="action",
dest="resource_action",
help="Action to execute on the GitLab resource.",
)
_populate_sub_parser_by_class(cls, object_subparsers)
object_subparsers.required = True
return parser
|
def extend_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
subparsers = parser.add_subparsers(
title="resource",
dest="gitlab_resource",
help="The GitLab resource to manipulate.",
)
subparsers.required = True
# populate argparse for all Gitlab Object
classes = set()
for cls in gitlab.v4.objects.__dict__.values():
if not isinstance(cls, type):
continue
if issubclass(cls, gitlab.base.RESTManager):
if cls._obj_cls is not None:
classes.add(cls._obj_cls)
for cls in sorted(classes, key=operator.attrgetter("__name__")):
arg_name = cli.cls_to_gitlab_resource(cls)
object_group = subparsers.add_parser(arg_name)
object_subparsers = object_group.add_subparsers(
title="action",
dest="resource_action",
help="Action to execute on the GitLab resource.",
)
_populate_sub_parser_by_class(cls, object_subparsers)
object_subparsers.required = True
return parser
|
40,644 |
def load_bot_data(bot_data_dir, max_train_data):
data = SimpleDSTC2DatasetReader().read(bot_data_dir)
if max_train_data is not None:
src = os.path.join(bot_data_dir, 'simple-dstc2-trn.json')
dst = os.path.join(bot_data_dir, 'simple-dstc2-trn_full.json')
with open(src, 'rt') as fin:
old_data = json.load(fin)
with open(dst, 'wt') as fout:
json.dump(old_data, fout, indent=2)
with open(src, 'wt') as fout:
json.dump(old_data[:max_train_data], fout, indent=2)
log.info(f"Train set is reduced to {max_train_data} dialogues (out of {len(data)}).")
# load slot values
download_decompress(
url='http://files.deeppavlov.ai/deeppavlov_data/dstc_slot_vals.tar.gz',
download_path=bot_data_dir
)
load_database(bot_data_dir, data)
|
def load_bot_data(bot_data_dir, max_train_data):
data = SimpleDSTC2DatasetReader().read(bot_data_dir)
if max_train_data is not None:
src = os.path.join(bot_data_dir, 'simple-dstc2-trn.json')
dst = os.path.join(bot_data_dir, 'simple-dstc2-trn_full.json')
with open(src, 'rt', encoding='utf8') as fin:
old_data = json.load(fin)
with open(dst, 'wt') as fout:
json.dump(old_data, fout, indent=2)
with open(src, 'wt') as fout:
json.dump(old_data[:max_train_data], fout, indent=2)
log.info(f"Train set is reduced to {max_train_data} dialogues (out of {len(data)}).")
# load slot values
download_decompress(
url='http://files.deeppavlov.ai/deeppavlov_data/dstc_slot_vals.tar.gz',
download_path=bot_data_dir
)
load_database(bot_data_dir, data)
|
38,567 |
def unique_columns_tol(
mat: Union[
np.ndarray[Any, np.dtype[np.float64]], np.ndarray[Any, np.dtype[np.int64]]
],
tol: float = 1e-8,
) -> Tuple[
np.ndarray[Any, np.dtype[np.float64]],
np.ndarray[Any, np.dtype[np.int64]],
np.ndarray[Any, np.dtype[np.int64]],
]:
"""
For an array, remove columns that are closer than a given tolerance.
To uniquify a point set, consider using the function uniquify_point_set
instead.
Resembles Matlab's uniquetol function, as applied to columns. To rather
work at rows, use a transpose.
Parameters:
mat (np.ndarray, nd x n_pts): Columns to be uniquified.
tol (double, optional): Tolerance for when columns are considered equal.
Should be seen in connection with distance between the points in
the points (due to rounding errors). Defaults to 1e-8.
Returns:
np.ndarray: Unique columns.
new_2_old: Index of which points that are preserved
old_2_new: Index of the representation of old points in the reduced
list.
Example:
>>> p_un, n2o, o2n = unique_columns(np.array([[1, 0, 1], [1, 0, 1]]))
>>> p_un
array([[1, 0], [1, 0]])
>>> n2o
array([0, 1])
>>> o2n
array([0, 1, 0])
"""
# Treat 1d array as 2d
mat = np.atleast_2d(mat)
# Some special cases
if mat.shape[1] == 0:
# Empty arrays gets empty return
return mat, np.array([], dtype=int), np.array([], dtype=int)
elif mat.shape[1] == 1:
# Array with a single column needs no processing
return mat, np.array([0]), np.array([0])
# If the matrix is integers, and the tolerance less than 1/2, we can use
# numpy's unique function
if issubclass(mat.dtype.type, np.int_) and tol < 0.5:
un_ar, new_2_old, old_2_new = np.unique(
mat, return_index=True, return_inverse=True, axis=1
)
return un_ar, new_2_old, old_2_new
@numba.jit("Tuple((b1[:],i8[:],i8[:]))(f8[:, :],f8)", nopython=True, cache=True)
def _numba_distance(mat, tol):
"""Helper function for numba acceleration of unique_columns_tol.
IMPLEMENTATION NOTE: Calling this function many times (it is unclear
what this really means, but likely >=100s of thousands of times) may
lead to enhanced memory consumption and significant reductions in
performance. This could be related to this GH issue
https://github.com/numba/numba/issues/1361
However, it is not clear this is really the error. No solution is known
at the time of writing, the only viable options seem to be algorithmic
modifications that reduce the number of calls to this function.
"""
num_cols = mat.shape[0]
keep = np.zeros(num_cols, dtype=numba.types.bool_)
keep[0] = True
keep_counter = 1
# Map from old points to the unique subspace. Defaults to map to itself.
old_2_new = np.arange(num_cols)
# Loop over all points, check if it is already represented in the kept list
for i in range(1, num_cols):
d = np.sum((mat[i] - mat[keep]) ** 2, axis=1)
condition = d < tol**2
if np.any(condition):
# We will not keep this point
old_2_new[i] = np.argmin(d)
else:
# We have found a new point
keep[i] = True
old_2_new[i] = keep_counter
keep_counter += 1
# Finally find which elements we kept
new_2_old = np.nonzero(keep)[0]
return keep, new_2_old, old_2_new
mat_t = np.atleast_2d(mat.T).astype(float)
# IMPLEMENTATION NOTE: It could pay off to make a pure Python implementation
# to be used for small arrays, however, attempts on making this work in
# practice failed.
keep, new_2_old, old_2_new = _numba_distance(mat_t, tol)
return mat[:, keep], new_2_old, old_2_new
|
def unique_columns_tol(
mat: Union[
np.ndarray[Any, np.dtype[np.float64]], np.ndarray[Any, np.dtype[np.int64]]
],
tol: float = 1e-8,
) -> Tuple[
np.ndarray[Any, np.dtype[np.float64]],
np.ndarray[Any, np.dtype[np.int64]],
np.ndarray[Any, np.dtype[np.int64]],
]:
"""
For an array, remove columns that are closer than a given tolerance.
To uniquify a point set, consider using the function uniquify_point_set
instead.
Resembles Matlab's uniquetol function, as applied to columns. To rather
work on rows, use a transpose.
Parameters:
mat (np.ndarray, nd x n_pts): Columns to be uniquified.
tol (double, optional): Tolerance for when columns are considered equal.
Should be seen in connection with distance between the points in
the points (due to rounding errors). Defaults to 1e-8.
Returns:
np.ndarray: Unique columns.
new_2_old: Index of which points that are preserved
old_2_new: Index of the representation of old points in the reduced
list.
Example:
>>> p_un, n2o, o2n = unique_columns(np.array([[1, 0, 1], [1, 0, 1]]))
>>> p_un
array([[1, 0], [1, 0]])
>>> n2o
array([0, 1])
>>> o2n
array([0, 1, 0])
"""
# Treat 1d array as 2d
mat = np.atleast_2d(mat)
# Some special cases
if mat.shape[1] == 0:
# Empty arrays gets empty return
return mat, np.array([], dtype=int), np.array([], dtype=int)
elif mat.shape[1] == 1:
# Array with a single column needs no processing
return mat, np.array([0]), np.array([0])
# If the matrix is integers, and the tolerance less than 1/2, we can use
# numpy's unique function
if issubclass(mat.dtype.type, np.int_) and tol < 0.5:
un_ar, new_2_old, old_2_new = np.unique(
mat, return_index=True, return_inverse=True, axis=1
)
return un_ar, new_2_old, old_2_new
@numba.jit("Tuple((b1[:],i8[:],i8[:]))(f8[:, :],f8)", nopython=True, cache=True)
def _numba_distance(mat, tol):
"""Helper function for numba acceleration of unique_columns_tol.
IMPLEMENTATION NOTE: Calling this function many times (it is unclear
what this really means, but likely >=100s of thousands of times) may
lead to enhanced memory consumption and significant reductions in
performance. This could be related to this GH issue
https://github.com/numba/numba/issues/1361
However, it is not clear this is really the error. No solution is known
at the time of writing, the only viable options seem to be algorithmic
modifications that reduce the number of calls to this function.
"""
num_cols = mat.shape[0]
keep = np.zeros(num_cols, dtype=numba.types.bool_)
keep[0] = True
keep_counter = 1
# Map from old points to the unique subspace. Defaults to map to itself.
old_2_new = np.arange(num_cols)
# Loop over all points, check if it is already represented in the kept list
for i in range(1, num_cols):
d = np.sum((mat[i] - mat[keep]) ** 2, axis=1)
condition = d < tol**2
if np.any(condition):
# We will not keep this point
old_2_new[i] = np.argmin(d)
else:
# We have found a new point
keep[i] = True
old_2_new[i] = keep_counter
keep_counter += 1
# Finally find which elements we kept
new_2_old = np.nonzero(keep)[0]
return keep, new_2_old, old_2_new
mat_t = np.atleast_2d(mat.T).astype(float)
# IMPLEMENTATION NOTE: It could pay off to make a pure Python implementation
# to be used for small arrays, however, attempts on making this work in
# practice failed.
keep, new_2_old, old_2_new = _numba_distance(mat_t, tol)
return mat[:, keep], new_2_old, old_2_new
|
39,914 |
def confirm_staged_stake(staker_address, value, lock_periods) -> None:
click.confirm(f"""
* Ursula Node Operator Notice *
-------------------------------
By agreeing to stake {str(value)} ({str(value.to_nunits())} NuNits):
- Staked tokens will be locked for the stake duration.
- You are obligated to maintain a networked and available Ursula-Worker node
bonded to the staker address {staker_address} for the duration
of the stake(s) ({lock_periods} periods).
- Agree to allow NuCypher network users to carry out uninterrupted re-encryption
work orders at-will without interference.
Failure to keep your node online, or violation of re-encryption work orders
will result in the loss of staked tokens as described in the NuCypher slashing protocol.
Keeping your Ursula node online during the staking period and successfully
producing correct re-encryption work orders will result in fees
paid out in ethers retro-actively and on-demand.
Accept ursula node operator obligation?""", abort=True)
|
def confirm_staged_stake(staker_address, value, lock_periods) -> None:
click.confirm(f"""
* Ursula Node Operator Notice *
-------------------------------
By agreeing to stake {str(value)} ({str(value.to_nunits())} NuNits):
- Staked tokens will be locked for the stake duration.
- You are obligated to maintain a networked and available Ursula-Worker node
bonded to the staker address {staker_address} for the duration
of the stake(s) ({lock_periods} periods).
- Agree to allow NuCypher network users to carry out uninterrupted re-encryption
work orders at-will without interference.
Failure to keep your node online, or violation of re-encryption work orders
will result in the loss of staked tokens as described in the NuCypher slashing protocol.
Keeping your Ursula node online during the staking period and successfully
producing correct re-encryption work orders will earn fees
paid out in ethers retro-actively and on-demand.
Accept ursula node operator obligation?""", abort=True)
|
9,122 |
def openURL(url_base, data=None, method='Get', cookies=None, username=None, password=None, timeout=30, headers=None,
verify=True, cert=None, auth=None):
"""
Function to open URLs.
Uses requests library but with additional checks for OGC service exceptions and url formatting.
Also handles cookies and simple user password authentication.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided.
Defaults to ``True``.
:param cert: (optional) A file with a client side certificate for SSL authentication
to send with the :class:`Request`.
:param auth: Instance of owslib.util.Authentication
"""
headers = headers if headers is not None else {}
rkwargs = {}
rkwargs['timeout'] = timeout
if auth:
if username:
auth.username = username
if password:
auth.password = password
if cert:
auth.cert = cert
if not verify or not auth.verify:
verify = False
else:
auth = Authentication(username, password, cert, verify)
if auth.username and auth.password:
rkwargs['auth'] = (auth.username, auth.password)
rkwargs['cert'] = auth.cert
rkwargs['verify'] = verify
# FIXUP for WFS in particular, remove xml style namespace
# @TODO does this belong here?
method = method.split("}")[-1]
if method.lower() == 'post':
try:
etree.fromstring(data)
headers['Content-Type'] = 'text/xml'
except (ParseError, UnicodeEncodeError):
pass
rkwargs['data'] = data
elif method.lower() == 'get':
rkwargs['params'] = data
else:
raise ValueError("Unknown method ('%s'), expected 'get' or 'post'" % method)
if cookies is not None:
rkwargs['cookies'] = cookies
req = requests.request(method.upper(), url_base, headers=headers, **rkwargs)
if req.status_code in [400, 401]:
raise ServiceException(req.text)
if req.status_code in [404, 500, 502, 503, 504]: # add more if needed
req.raise_for_status()
# check for service exceptions without the http header set
if 'Content-Type' in req.headers and \
req.headers['Content-Type'] in ['text/xml', 'application/xml', 'application/vnd.ogc.se_xml']:
# just in case 400 headers were not set, going to have to read the xml to see if it's an exception report.
se_tree = etree.fromstring(req.content)
# to handle the variety of namespaces and terms across services
# and versions, especially for "legacy" responses like WMS 1.3.0
possible_errors = [
'{http://www.opengis.net/ows}Exception',
'{http://www.opengis.net/ows/1.1}Exception',
'{http://www.opengis.net/ogc}ServiceException',
'ServiceException'
]
for possible_error in possible_errors:
serviceException = se_tree.find(possible_error)
if serviceException is not None:
# and we need to deal with some message nesting
raise ServiceException('\n'.join([t.strip() for t in serviceException.itertext() if t.strip()]))
return ResponseWrapper(req)
|
def openURL(url_base, data=None, method='Get', cookies=None, username=None, password=None, timeout=30, headers=None,
verify=True, cert=None, auth=None):
"""
Function to open URLs.
Uses requests library but with additional checks for OGC service exceptions and url formatting.
Also handles cookies and simple user password authentication.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided.
Defaults to ``True``.
:param cert: (optional) A file with a client side certificate for SSL authentication
to send with the :class:`Request`.
:param auth: Instance of owslib.util.Authentication
"""
headers = headers if headers is not None else {}
rkwargs = {}
rkwargs['timeout'] = timeout
if auth:
if username:
auth.username = username
if password:
auth.password = password
if cert:
auth.cert = cert
verify = verify and auth.verify
else:
auth = Authentication(username, password, cert, verify)
if auth.username and auth.password:
rkwargs['auth'] = (auth.username, auth.password)
rkwargs['cert'] = auth.cert
rkwargs['verify'] = verify
# FIXUP for WFS in particular, remove xml style namespace
# @TODO does this belong here?
method = method.split("}")[-1]
if method.lower() == 'post':
try:
etree.fromstring(data)
headers['Content-Type'] = 'text/xml'
except (ParseError, UnicodeEncodeError):
pass
rkwargs['data'] = data
elif method.lower() == 'get':
rkwargs['params'] = data
else:
raise ValueError("Unknown method ('%s'), expected 'get' or 'post'" % method)
if cookies is not None:
rkwargs['cookies'] = cookies
req = requests.request(method.upper(), url_base, headers=headers, **rkwargs)
if req.status_code in [400, 401]:
raise ServiceException(req.text)
if req.status_code in [404, 500, 502, 503, 504]: # add more if needed
req.raise_for_status()
# check for service exceptions without the http header set
if 'Content-Type' in req.headers and \
req.headers['Content-Type'] in ['text/xml', 'application/xml', 'application/vnd.ogc.se_xml']:
# just in case 400 headers were not set, going to have to read the xml to see if it's an exception report.
se_tree = etree.fromstring(req.content)
# to handle the variety of namespaces and terms across services
# and versions, especially for "legacy" responses like WMS 1.3.0
possible_errors = [
'{http://www.opengis.net/ows}Exception',
'{http://www.opengis.net/ows/1.1}Exception',
'{http://www.opengis.net/ogc}ServiceException',
'ServiceException'
]
for possible_error in possible_errors:
serviceException = se_tree.find(possible_error)
if serviceException is not None:
# and we need to deal with some message nesting
raise ServiceException('\n'.join([t.strip() for t in serviceException.itertext() if t.strip()]))
return ResponseWrapper(req)
|
29,031 |
def _get_username(credentials: StolenCredentials) -> str:
if credentials.identities:
return credentials.identities[0]["username"]
else:
return ""
|
def _get_username(credentials: StolenCredentials) -> str:
return credentials.identities if credentials.identities[0]["username"] else ""
|
45,814 |
def load_depth(file_name):
"""Loads the depth using the syntel SDK and converts to torch.Tensor"""
if not os.path.isfile(file_name):
raise AssertionError("Invalid file {}".format(file_name))
import sintel_io
depth = sintel_io.depth_read(file_name)
return torch.from_numpy(depth).view(1, 1, *depth.shape).float()
|
def load_depth(file_name):
"""Loads the depth using the syntel SDK and converts to torch.Tensor"""
if not os.path.isfile(file_name):
raise FileExistsError("Invalid file {}".format(file_name))
import sintel_io
depth = sintel_io.depth_read(file_name)
return torch.from_numpy(depth).view(1, 1, *depth.shape).float()
|
5,500 |
def sendinblue_check(app_configs, **kwargs):
if not settings.SENDINBLUE_API_KEY:
return []
response = sendinblue.request("GET", "contacts/attributes")
if not response.ok:
return [
Error(
f"Error when creating sendinblue attribute: {response.json()['message']}",
id=SENDINBLUE_ERROR,
)
]
if not any(
attribute["name"] == "IS_PAYING" for attribute in response.json()["attributes"]
):
response = sendinblue.request(
"POST", "contacts/attributes/normal/IS_PAYING", json={"type": "boolean"},
)
if not response.ok:
return [
Error(
f"Error when creating sendinblue attribute: {response.json()['message']}",
id=SENDINBLUE_ERROR,
)
]
return []
|
def sendinblue_check(app_configs, **kwargs):
if not settings.SENDINBLUE_API_KEY:
return []
response = sendinblue.request("GET", "contacts/attributes")
if not response.ok:
return [
Error(
f"Error getting sendinblue attributes: {response.status_code}",
id=SENDINBLUE_ERROR,
)
]
if not any(
attribute["name"] == "IS_PAYING" for attribute in response.json()["attributes"]
):
response = sendinblue.request(
"POST", "contacts/attributes/normal/IS_PAYING", json={"type": "boolean"},
)
if not response.ok:
return [
Error(
f"Error when creating sendinblue attribute: {response.json()['message']}",
id=SENDINBLUE_ERROR,
)
]
return []
|
43,147 |
def gspmm(g, op, reduce_op, lhs_data, rhs_data):
r""" Generalized Sparse Matrix Multiplication interface.
It fuses two steps into one kernel.
1. Computes messages by :attr:`op` source node and edge features.
2. Aggregate the messages by :attr:`reduce_op` as the features on destination nodes.
.. math::
x_v = \psi_{(u, v, e)\in \mathcal{G}}(\rho(x_u, x_e))
where :math:`x_v` is the returned feature on destination nodes, and :math:`x_u`,
:math:`x_e` refers to :attr:`u`, :attr:`e` respectively. :math:`\rho` means binary
operator :attr:`op` and :math:`\psi` means reduce operator :attr:`reduce_op`,
:math:`\mathcal{G}` is the graph we apply gspmm on: :attr:`g`.
Note that this function does not handle gradients.
Parameters
----------
g : DGLGraph
The input graph.
op : str
The binary op's name, could be ``add``, ``sub``, ``mul``, ``div``,
``copy_lhs``, ``copy_rhs``.
reduce_op : str
Reduce operator, could be ``sum``, ``max``, ``min``, ``mean``.
lhs_data : tensor or None
The left operand, could be None if it's not required by the op.
rhs_data : tensor or None
The right operand, could be None if it's not required by the op.
Returns
-------
tensor
The result tensor.
"""
if op not in ['copy_lhs', 'copy_rhs']:
# Expand dims so that there will be no broadcasting issues with different
# number of dimensions. For example, given two shapes (N, 3, 1), (E, 5, 3, 4)
# that are valid broadcastable shapes, change them to (N, 1, 3, 1) and
# (E, 5, 3, 4)
lhs_shape = F.shape(lhs_data)
rhs_shape = F.shape(rhs_data)
if len(lhs_shape) != len(rhs_shape):
max_ndims = max(len(lhs_shape), len(rhs_shape))
lhs_pad_ndims = max_ndims - len(lhs_shape)
rhs_pad_ndims = max_ndims - len(rhs_shape)
new_lhs_shape = (lhs_shape[0],) + (1,) * lhs_pad_ndims + lhs_shape[1:]
new_rhs_shape = (rhs_shape[0],) + (1,) * rhs_pad_ndims + rhs_shape[1:]
lhs_data = F.reshape(lhs_data, new_lhs_shape)
rhs_data = F.reshape(rhs_data, new_rhs_shape)
# With max and min reducers infinity will be returned for zero degree nodes
ret = gspmm_internal(g._graph, op,
'sum' if reduce_op == 'mean' else reduce_op,
lhs_data, rhs_data)
# Replace infinity with zero for isolated nodes when reducer is min/max
if reduce_op in ['min', 'max']:
ret = F.replace_inf_with_zero(ret)
# divide in degrees for mean reducer.
if reduce_op == 'mean':
ret_shape = F.shape(ret)
deg = g.in_degrees()
deg = F.astype(F.clamp(deg, 1, max(g.number_of_edges(),1)), F.dtype(ret))
deg_shape = (ret_shape[0],) + (1,) * (len(ret_shape) - 1)
return ret / F.reshape(deg, deg_shape)
else:
return ret
|
def gspmm(g, op, reduce_op, lhs_data, rhs_data):
r""" Generalized Sparse Matrix Multiplication interface.
It fuses two steps into one kernel.
1. Computes messages by :attr:`op` source node and edge features.
2. Aggregate the messages by :attr:`reduce_op` as the features on destination nodes.
.. math::
x_v = \psi_{(u, v, e)\in \mathcal{G}}(\rho(x_u, x_e))
where :math:`x_v` is the returned feature on destination nodes, and :math:`x_u`,
:math:`x_e` refers to :attr:`u`, :attr:`e` respectively. :math:`\rho` means binary
operator :attr:`op` and :math:`\psi` means reduce operator :attr:`reduce_op`,
:math:`\mathcal{G}` is the graph we apply gspmm on: :attr:`g`.
Note that this function does not handle gradients.
Parameters
----------
g : DGLGraph
The input graph.
op : str
The binary op's name, could be ``add``, ``sub``, ``mul``, ``div``,
``copy_lhs``, ``copy_rhs``.
reduce_op : str
Reduce operator, could be ``sum``, ``max``, ``min``, ``mean``.
lhs_data : tensor or None
The left operand, could be None if it's not required by the op.
rhs_data : tensor or None
The right operand, could be None if it's not required by the op.
Returns
-------
tensor
The result tensor.
"""
if op not in ['copy_lhs', 'copy_rhs']:
# Expand dims so that there will be no broadcasting issues with different
# number of dimensions. For example, given two shapes (N, 3, 1), (E, 5, 3, 4)
# that are valid broadcastable shapes, change them to (N, 1, 3, 1) and
# (E, 5, 3, 4)
lhs_shape = F.shape(lhs_data)
rhs_shape = F.shape(rhs_data)
if len(lhs_shape) != len(rhs_shape):
max_ndims = max(len(lhs_shape), len(rhs_shape))
lhs_pad_ndims = max_ndims - len(lhs_shape)
rhs_pad_ndims = max_ndims - len(rhs_shape)
new_lhs_shape = (lhs_shape[0],) + (1,) * lhs_pad_ndims + lhs_shape[1:]
new_rhs_shape = (rhs_shape[0],) + (1,) * rhs_pad_ndims + rhs_shape[1:]
lhs_data = F.reshape(lhs_data, new_lhs_shape)
rhs_data = F.reshape(rhs_data, new_rhs_shape)
# With max and min reducers infinity will be returned for zero degree nodes
ret = gspmm_internal(g._graph, op,
'sum' if reduce_op == 'mean' else reduce_op,
lhs_data, rhs_data)
# Replace infinity with zero for isolated nodes when reducer is min/max
if reduce_op in ['min', 'max']:
ret = F.replace_inf_with_zero(ret)
# divide in degrees for mean reducer.
if reduce_op == 'mean':
ret_shape = F.shape(ret)
deg = g.in_degrees()
deg = F.astype(F.clamp(deg, 1, max(g.number_of_edges(), 1)), F.dtype(ret))
deg_shape = (ret_shape[0],) + (1,) * (len(ret_shape) - 1)
return ret / F.reshape(deg, deg_shape)
else:
return ret
|
14,123 |
def _hilbert_distance(gdf, total_bounds, p):
"""
Calculate hilbert distance for a GeoDataFrame
int coordinates
Parameters
----------
gdf : GeoDataFrame
total_bounds : Total bounds of geometries - array
p : The number of iterations used in constructing the Hilbert curve
Returns
---------
Pandas Series containing hilbert distances
"""
# Calculate bounds as numpy array
bounds = gdf.bounds.to_numpy()
# Calculate discrete coords based on total bounds and bounds
x, y = _continuous_to_discrete_coords(total_bounds, bounds, p)
# Calculate distance from morton curve
distances = _encode(p, x, y)
return pd.Series(distances, index=gdf.index, name="hilbert_distance")
|
def _hilbert_distance(gdf, total_bounds, p):
"""
Calculate distance along a Hilbert curve a GeoDataFrame
int coordinates
Parameters
----------
gdf : GeoDataFrame
total_bounds : Total bounds of geometries - array
p : The number of iterations used in constructing the Hilbert curve
Returns
---------
Pandas Series containing hilbert distances
"""
# Calculate bounds as numpy array
bounds = gdf.bounds.to_numpy()
# Calculate discrete coords based on total bounds and bounds
x, y = _continuous_to_discrete_coords(total_bounds, bounds, p)
# Calculate distance from morton curve
distances = _encode(p, x, y)
return pd.Series(distances, index=gdf.index, name="hilbert_distance")
|
30,502 |
def check_ref_set_exist(ref_set_name):
"""
The function checks if reference set is exist
Args:
ref_set_name (str): Reference set name
Returns:
Dict. If found - Reference set object, else - Error
"""
try:
return get_reference_by_name(ref_set_name)
except Exception as e:
if '1002' in str(e):
return None
raise e
|
def check_ref_set_exist(ref_set_name):
"""
The function checks if reference set is exist
Args:
ref_set_name (str): Reference set name
Returns:
Dict. If found - Reference set object, else - Error
"""
try:
return True if get_reference_by_name(ref_set_name) else False
except Exception as e:
if '1002' in str(e):
return None
raise e
|
1,242 |
def _unpack_var(var):
"""
Parses key : value pair from `var`
Parameters
----------
var : str
Entry from HEAD file
Returns
-------
name : str
Name of attribute
value : object
Value of attribute
Examples
--------
>>> var = "type = integer-attribute\\nname = BRICK_TYPES\\ncount = 1\\n1\\n"
>>> name, attr = _unpack_var(var)
>>> print(name, attr)
BRICK_TYPES 1
>>> var = "type = string-attribute\\nname = TEMPLATE_SPACE\\ncount = 5\\n'ORIG~"
>>> name, attr = _unpack_var(var)
>>> print(name, attr)
TEMPLATE_SPACE ORIG
"""
err_msg = (f'Please check HEAD file to ensure it is AFNI compliant. '
f'Offending attribute:\n{var}')
atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var)
if len(atype) != 1:
raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}')
if len(aname) != 1:
raise AFNIHeaderError(f'Invalid attribute name entry in HEAD file. {err_msg}')
atype = _attr_dic.get(atype[0], str)
attr = ' '.join(var.strip().splitlines()[3:])
if atype is not str:
try:
attr = [atype(f) for f in attr.split()]
except ValueError:
raise AFNIHeaderError(f'Failed to read variable from HEAD file '
f'due to improper type casting. {err_msg}')
else:
# AFNI string attributes will always start with open single quote and
# end with a tilde (NUL). These attributes CANNOT contain tildes (so
# stripping is safe), but can contain single quotes (so we replace)
attr = attr.replace('\'', '', 1).rstrip('~')
return aname[0], attr[0] if len(attr) == 1 else attr
|
def _unpack_var(var):
"""
Parses key : value pair from `var`
Parameters
----------
var : str
Entry from HEAD file
Returns
-------
name : str
Name of attribute
value : object
Value of attribute
Examples
--------
>>> var = "type = integer-attribute\\nname = BRICK_TYPES\\ncount = 1\\n1\\n"
>>> name, attr = _unpack_var(var)
>>> print(name, attr)
BRICK_TYPES 1
>>> var = "type = string-attribute\\nname = TEMPLATE_SPACE\\ncount = 5\\n'ORIG~"
>>> name, attr = _unpack_var(var)
>>> print(name, attr)
TEMPLATE_SPACE ORIG
"""
err_msg = (f'Please check HEAD file to ensure it is AFNI compliant. '
f'Offending attribute:\n{var}')
atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var)
if len(atype) != 1:
raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}')
if len(aname) != 1:
raise AFNIHeaderError(f'Invalid attribute name entry in HEAD file. {err_msg}')
atype = _attr_dic.get(atype[0], str)
attr = ' '.join(var.strip().splitlines()[3:])
if atype is not str:
try:
attr = [atype(f) for f in attr.split()]
except ValueError:
raise AFNIHeaderError('Failed to read variable from HEAD file '
f'due to improper type casting. {err_msg}')
else:
# AFNI string attributes will always start with open single quote and
# end with a tilde (NUL). These attributes CANNOT contain tildes (so
# stripping is safe), but can contain single quotes (so we replace)
attr = attr.replace('\'', '', 1).rstrip('~')
return aname[0], attr[0] if len(attr) == 1 else attr
|
56,595 |
def Calculate_ICS(
dataset_dict,
scale: Optional[ScaleKeyword] = None,
ic: Optional[ICKeyword] = None
):
"""
Helper Function used to execute the loo and waic information criteria
LOO is leave-one-out (PSIS-LOO `loo`) cross-validation and
WAIC is the widely applicable information criterion.
Read more theory here - in a paper by some of the leading authorities
on model selection dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
dataset_dict : dataset_dict: dict[str] -> InferenceData or ELPDData
A dictionary of model names and InferenceData or ELPDData objects
scale: str, optional
Output scale for IC. Available options are:
- `log` : (default) log-score (after Vehtari et al. (2017))
- `negative_log` : -1 * (log-score)
- `deviance` : -2 * (log-score)
A higher log-score (or a lower deviance) indicates a model with better predictive
accuracy.
ic: str, optional
Information Criterion (PSIS-LOO `loo` or WAIC `waic`) used to compare models. Defaults to
``rcParams["stats.information_criterion"]``.
Returns
-------
[WIP]
"""
names = list(dataset_dict.keys())
if scale is not None:
scale = cast(ScaleKeyword, scale.lower())
else:
scale = cast(ScaleKeyword, rcParams["stats.ic_scale"])
allowable = ["log", "negative_log", "deviance"] if NO_GET_ARGS else get_args(ScaleKeyword)
if scale not in allowable:
raise ValueError(f"{scale} is not a valid value for scale: must be in {allowable}")
if scale == "log":
scale_value = 1
ascending = False
else:
if scale == "negative_log":
scale_value = -1
else:
scale_value = -2
ascending = True
if ic is None:
ic = cast(ICKeyword, rcParams["stats.information_criterion"])
else:
ic = cast(ICKeyword, ic.lower())
allowable = ["loo", "waic"] if NO_GET_ARGS else get_args(ICKeyword)
if ic not in allowable:
raise ValueError(f"{ic} is not a valid value for ic: must be in {allowable}")
# I have to return loo or waic in order for compare to create the df_comp and scale col
if ic == "loo":
ic_func: Callable = loo
elif ic == "waic":
ic_func = waic
else:
raise NotImplementedError(f"The information criterion {ic} is not supported.")
names = []
dataset_dict = deepcopy(dataset_dict)
for name, dataset in dataset_dict.items():
names.append(name)
if not isinstance(dataset, ELPDData):
try:
dataset_dict[name] = ic_func(convert_to_inference_data(dataset), pointwise=True, scale=scale)
except Exception as e:
raise e.__class__(f"Encountered error trying to compute {ic} from model {name}.") from e
ics = [elpd_data.index[0] for elpd_data in dataset_dict.values()]
if not all(x == ics[0] for x in ics):
raise SyntaxError(
"All Information Criteria must be of the same kind, but both loo and waic data present"
)
return(dataset_dict, scale, ic, ics, name)
|
def Calculate_ICS(
dataset_dict,
scale: Optional[ScaleKeyword] = None,
ic: Optional[ICKeyword] = None
):
"""
Helper Function used to execute the loo and waic information criteria
LOO is leave-one-out (PSIS-LOO `loo`) cross-validation and
WAIC is the widely applicable information criterion.
Read more theory here - in a paper by some of the leading authorities
on model selection dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
dataset_dict : dict of {str: InferenceData or ELPDData}
A dictionary of model names and InferenceData or ELPDData objects
scale: str, optional
Output scale for IC. Available options are:
- `log` : (default) log-score (after Vehtari et al. (2017))
- `negative_log` : -1 * (log-score)
- `deviance` : -2 * (log-score)
A higher log-score (or a lower deviance) indicates a model with better predictive
accuracy.
ic: str, optional
Information Criterion (PSIS-LOO `loo` or WAIC `waic`) used to compare models. Defaults to
``rcParams["stats.information_criterion"]``.
Returns
-------
[WIP]
"""
names = list(dataset_dict.keys())
if scale is not None:
scale = cast(ScaleKeyword, scale.lower())
else:
scale = cast(ScaleKeyword, rcParams["stats.ic_scale"])
allowable = ["log", "negative_log", "deviance"] if NO_GET_ARGS else get_args(ScaleKeyword)
if scale not in allowable:
raise ValueError(f"{scale} is not a valid value for scale: must be in {allowable}")
if scale == "log":
scale_value = 1
ascending = False
else:
if scale == "negative_log":
scale_value = -1
else:
scale_value = -2
ascending = True
if ic is None:
ic = cast(ICKeyword, rcParams["stats.information_criterion"])
else:
ic = cast(ICKeyword, ic.lower())
allowable = ["loo", "waic"] if NO_GET_ARGS else get_args(ICKeyword)
if ic not in allowable:
raise ValueError(f"{ic} is not a valid value for ic: must be in {allowable}")
# I have to return loo or waic in order for compare to create the df_comp and scale col
if ic == "loo":
ic_func: Callable = loo
elif ic == "waic":
ic_func = waic
else:
raise NotImplementedError(f"The information criterion {ic} is not supported.")
names = []
dataset_dict = deepcopy(dataset_dict)
for name, dataset in dataset_dict.items():
names.append(name)
if not isinstance(dataset, ELPDData):
try:
dataset_dict[name] = ic_func(convert_to_inference_data(dataset), pointwise=True, scale=scale)
except Exception as e:
raise e.__class__(f"Encountered error trying to compute {ic} from model {name}.") from e
ics = [elpd_data.index[0] for elpd_data in dataset_dict.values()]
if not all(x == ics[0] for x in ics):
raise SyntaxError(
"All Information Criteria must be of the same kind, but both loo and waic data present"
)
return(dataset_dict, scale, ic, ics, name)
|
32,510 |
def main() -> None:
logging.getLogger("argus_cli").setLevel("WARNING")
first_fetch_period = parse_first_fetch(
demisto.params().get("first_fetch", "-1 day")
)
set_argus_settings(
demisto.params().get("api_key"),
demisto.params().get("api_url"),
handle_proxy(),
demisto.params().get("insecure"),
)
demisto.debug(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
return_results(test_module_command())
elif demisto.command() == "fetch-incidents":
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
last_run=demisto.getLastRun(),
first_fetch_period=first_fetch_period,
limit=demisto.params().get("max_fetch", 25),
min_severity=demisto.params().get("min_severity", "low").lower(),
integration_instance=demisto.integrationInstance(),
mirror_direction=demisto.params().get("mirror_direction", "None"),
mirror_tags=demisto.params().get("mirror_tag"),
exclude_tag=demisto.params().get("exclude_tag"),
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == "get-remote-data":
return_results(
get_remote_data_command(
demisto.args(),
integration_instance=demisto.integrationInstance(),
mirror_direction=demisto.params().get("mirror_direction", "None"),
mirror_tags=demisto.params().get("mirror_tag"),
)
)
if demisto.command() == "get-modified-remote-data":
# Hotfix for mirroring issues.
raise NotImplementedError
elif demisto.command() == "argus-add-attachment":
return_results(add_attachment_command(demisto.args()))
elif demisto.command() == "update-remote-system":
return_results(update_remote_system_command(demisto.args()))
elif demisto.command() == "argus-add-case-tag":
return_results(add_case_tag_command(demisto.args()))
elif demisto.command() == "argus-add-comment":
return_results(add_comment_command(demisto.args()))
elif demisto.command() == "argus-advanced-case-search":
return_results(advanced_case_search_command(demisto.args()))
elif demisto.command() == "argus-close-case":
return_results(close_case_command(demisto.args()))
elif demisto.command() == "argus-create-case":
return_results(create_case_command(demisto.args()))
elif demisto.command() == "argus-delete-case":
return_results(delete_case_command(demisto.args()))
elif demisto.command() == "argus-delete-comment":
return_results(delete_comment_command(demisto.args()))
elif demisto.command() == "argus-download-attachment":
return_results(download_attachment_command(demisto.args()))
elif demisto.command() == "argus-download-attachment-by-filename":
return_results(download_attachment_by_filename_command(demisto.args()))
elif demisto.command() == "argus-edit-comment":
return_results(edit_comment_command(demisto.args()))
elif demisto.command() == "argus-get-attachment":
return_results(get_attachment_command(demisto.args()))
elif demisto.command() == "argus-get-case-metadata-by-id":
return_results(get_case_metadata_by_id_command(demisto.args()))
elif demisto.command() == "argus-list-case-attachments":
return_results(list_case_attachments_command(demisto.args()))
elif demisto.command() == "argus-list-case-tags":
return_results(list_case_tags_command(demisto.args()))
elif demisto.command() == "argus-list-case-comments":
return_results(list_case_comments_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-id":
return_results(remove_case_tag_by_id_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-key-value":
return_results(remove_case_tag_by_key_value_command(demisto.args()))
elif demisto.command() == "argus-update-case":
return_results(update_case_command(demisto.args()))
elif demisto.command() == "argus-get-event":
return_results(get_event_command(demisto.args()))
elif demisto.command() == "argus-get-events-for-case":
return_results(get_events_for_case_command(demisto.args()))
elif demisto.command() == "argus-find-aggregated-events":
return_results(find_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-list-aggregated-events":
return_results(list_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-get-payload":
return_results(get_payload_command(demisto.args()))
elif demisto.command() == "argus-get-pcap":
return_results(get_pcap_command(demisto.args()))
elif demisto.command() == "argus-find-nids-events":
return_results(find_nids_events_command(demisto.args()))
elif demisto.command() == "argus-list-nids-events":
return_results(list_nids_events_command(demisto.args()))
elif demisto.command() == "argus-pdns-search-records":
return_results(search_records_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-domain":
return_results(fetch_observations_for_domain_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-ip":
return_results(fetch_observations_for_i_p_command(demisto.args()))
elif demisto.command() == "argus-print-case-comments":
return_results(print_case_comments_command(demisto.args()))
elif demisto.command() == "argus-print-case-metadata-by-id":
return_results(print_case_metadata_by_id_command(demisto.args()))
elif demisto.command() == "argus-download-case-attachments":
return_results(download_case_attachments_command(demisto.args()))
# Log exceptions and return errors
except AccessDeniedException as denied:
demisto.info(denied.message)
return_warning(denied.message)
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}"
)
|
def main() -> None:
logging.getLogger("argus_cli").setLevel("WARNING")
first_fetch_period = parse_first_fetch(
demisto.params().get("first_fetch", "-1 day")
)
set_argus_settings(
demisto.params().get("api_key"),
demisto.params().get("api_url"),
handle_proxy(),
demisto.params().get("insecure"),
)
demisto.debug(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
return_results(test_module_command())
elif demisto.command() == "fetch-incidents":
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
last_run=demisto.getLastRun(),
first_fetch_period=first_fetch_period,
limit=demisto.params().get("max_fetch", 25),
min_severity=demisto.params().get("min_severity", "low").lower(),
integration_instance=demisto.integrationInstance(),
mirror_direction=demisto.params().get("mirror_direction", "None"),
mirror_tags=demisto.params().get("mirror_tag"),
exclude_tag=demisto.params().get("exclude_tag"),
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == "get-remote-data":
return_results(
get_remote_data_command(
demisto.args(),
integration_instance=demisto.integrationInstance(),
mirror_direction=demisto.params().get("mirror_direction", "None"),
mirror_tags=demisto.params().get("mirror_tag"),
)
)
if demisto.command() == "get-modified-remote-data":
# Hotfix for mirroring issues.
raise NotImplementedError(f'The "get-modified-remote-data" command is not implemented')
elif demisto.command() == "argus-add-attachment":
return_results(add_attachment_command(demisto.args()))
elif demisto.command() == "update-remote-system":
return_results(update_remote_system_command(demisto.args()))
elif demisto.command() == "argus-add-case-tag":
return_results(add_case_tag_command(demisto.args()))
elif demisto.command() == "argus-add-comment":
return_results(add_comment_command(demisto.args()))
elif demisto.command() == "argus-advanced-case-search":
return_results(advanced_case_search_command(demisto.args()))
elif demisto.command() == "argus-close-case":
return_results(close_case_command(demisto.args()))
elif demisto.command() == "argus-create-case":
return_results(create_case_command(demisto.args()))
elif demisto.command() == "argus-delete-case":
return_results(delete_case_command(demisto.args()))
elif demisto.command() == "argus-delete-comment":
return_results(delete_comment_command(demisto.args()))
elif demisto.command() == "argus-download-attachment":
return_results(download_attachment_command(demisto.args()))
elif demisto.command() == "argus-download-attachment-by-filename":
return_results(download_attachment_by_filename_command(demisto.args()))
elif demisto.command() == "argus-edit-comment":
return_results(edit_comment_command(demisto.args()))
elif demisto.command() == "argus-get-attachment":
return_results(get_attachment_command(demisto.args()))
elif demisto.command() == "argus-get-case-metadata-by-id":
return_results(get_case_metadata_by_id_command(demisto.args()))
elif demisto.command() == "argus-list-case-attachments":
return_results(list_case_attachments_command(demisto.args()))
elif demisto.command() == "argus-list-case-tags":
return_results(list_case_tags_command(demisto.args()))
elif demisto.command() == "argus-list-case-comments":
return_results(list_case_comments_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-id":
return_results(remove_case_tag_by_id_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-key-value":
return_results(remove_case_tag_by_key_value_command(demisto.args()))
elif demisto.command() == "argus-update-case":
return_results(update_case_command(demisto.args()))
elif demisto.command() == "argus-get-event":
return_results(get_event_command(demisto.args()))
elif demisto.command() == "argus-get-events-for-case":
return_results(get_events_for_case_command(demisto.args()))
elif demisto.command() == "argus-find-aggregated-events":
return_results(find_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-list-aggregated-events":
return_results(list_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-get-payload":
return_results(get_payload_command(demisto.args()))
elif demisto.command() == "argus-get-pcap":
return_results(get_pcap_command(demisto.args()))
elif demisto.command() == "argus-find-nids-events":
return_results(find_nids_events_command(demisto.args()))
elif demisto.command() == "argus-list-nids-events":
return_results(list_nids_events_command(demisto.args()))
elif demisto.command() == "argus-pdns-search-records":
return_results(search_records_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-domain":
return_results(fetch_observations_for_domain_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-ip":
return_results(fetch_observations_for_i_p_command(demisto.args()))
elif demisto.command() == "argus-print-case-comments":
return_results(print_case_comments_command(demisto.args()))
elif demisto.command() == "argus-print-case-metadata-by-id":
return_results(print_case_metadata_by_id_command(demisto.args()))
elif demisto.command() == "argus-download-case-attachments":
return_results(download_case_attachments_command(demisto.args()))
# Log exceptions and return errors
except AccessDeniedException as denied:
demisto.info(denied.message)
return_warning(denied.message)
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}"
)
|
49,588 |
def single_partition_join(left, right, **kwargs):
# if the merge is performed on_index, divisions can be kept, otherwise the
# new index will not necessarily correspond with the current divisions
meta = left._meta_nonempty.merge(right._meta_nonempty, **kwargs)
use_left = kwargs.get("right_index") or right._contains_index_name(
kwargs.get("right_on")
)
use_right = kwargs.get("left_index") or left._contains_index_name(
kwargs.get("left_on")
)
if len(meta) == 0:
if use_left:
meta.index = meta.index.astype(left.index.dtype)
elif use_right:
meta.index = meta.index.astype(right.index.dtype)
else:
meta.index = meta.index.astype("int64")
kwargs["empty_index_dtype"] = meta.index.dtype
kwargs["categorical_columns"] = meta.select_dtypes(include="category").columns
if right.npartitions == 1 and kwargs["how"] in allowed_left:
joined = left.map_partitions(
merge_chunk,
right,
meta=meta,
enforce_metadata=False,
transform_divisions=False,
align_dataframes=False,
token="merge", # NOTE: misleadingly, this is actually the name
**kwargs,
)
if use_left:
joined.divisions = left.divisions
elif use_right and len(right.divisions) == len(left.divisions):
joined.divisions = right.divisions
else:
joined.divisions = [None for _ in left.divisions]
elif left.npartitions == 1 and kwargs["how"] in allowed_right:
joined = right.map_partitions(
lambda right, left, **kwargs: merge_chunk(left, right, **kwargs),
left,
meta=meta,
enforce_metadata=False,
transform_divisions=False,
align_dataframes=False,
token="merge", # NOTE: misleadingly, this is actually the name
**kwargs,
)
if use_right:
joined.divisions = right.divisions
elif use_left and len(left.divisions) == len(right.divisions):
joined.divisions = left.divisions
else:
joined.divisions = [None for _ in right.divisions]
else:
raise NotImplementedError(
"single_partition_join has no fallback for invalid calls"
)
return joined
|
def single_partition_join(left, right, **kwargs):
# if the merge is performed on_index, divisions can be kept, otherwise the
# new index will not necessarily correspond with the current divisions
meta = left._meta_nonempty.merge(right._meta_nonempty, **kwargs)
use_left = kwargs.get("right_index") or right._contains_index_name(
kwargs.get("right_on")
)
use_right = kwargs.get("left_index") or left._contains_index_name(
kwargs.get("left_on")
)
if len(meta) == 0:
if use_left:
meta.index = meta.index.astype(left.index.dtype)
elif use_right:
meta.index = meta.index.astype(right.index.dtype)
else:
meta.index = meta.index.astype("int64")
kwargs["empty_index_dtype"] = meta.index.dtype
kwargs["categorical_columns"] = meta.select_dtypes(include="category").columns
if right.npartitions == 1 and kwargs["how"] in allowed_left:
joined = left.map_partitions(
merge_chunk,
right,
meta=meta,
enforce_metadata=False,
transform_divisions=False,
align_dataframes=False,
token="merge", # NOTE: misleadingly, this is actually the name
**kwargs,
)
if use_left:
joined.divisions = left.divisions
elif use_right and len(right.divisions) == len(left.divisions):
joined.divisions = right.divisions
else:
joined.divisions = [None for _ in left.divisions]
elif left.npartitions == 1 and kwargs["how"] in allowed_right:
joined = map_partitions(
merge_chunk,
left,
right,
meta=meta,
enforce_metadata=False,
transform_divisions=False,
align_dataframes=False,
token="merge", # NOTE: misleadingly, this is actually the name
**kwargs,
)
if use_right:
joined.divisions = right.divisions
elif use_left and len(left.divisions) == len(right.divisions):
joined.divisions = left.divisions
else:
joined.divisions = [None for _ in right.divisions]
else:
raise NotImplementedError(
"single_partition_join has no fallback for invalid calls"
)
return joined
|
45,899 |
def filter2d(
input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False,
padding: str = 'same'
) -> torch.Tensor:
r"""Convolve a tensor with a 2d kernel.
The function applies a given kernel to a tensor. The kernel is applied
independently at each depth channel of the tensor. Before applying the
kernel, the function applies padding according to the specified mode so
that the output remains in the same shape.
Args:
input: the input tensor with shape of
:math:`(B, C, H, W)`.
kernel: the kernel to be convolved with the input
tensor. The kernel shape must be :math:`(1, kH, kW)` or :math:`(B, kH, kW)`.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``.
normalized: If True, kernel will be L1 normalized.
padding: This defines the type of padding.
2 modes available ``'same'`` or ``'valid'``
Return:
torch.Tensor: the convolved tensor of same size and numbers of channels
as the input with shape :math:`(B, C, H, W)`.
Example:
>>> input = torch.tensor([[[
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 5., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],]]])
>>> kernel = torch.ones(1, 3, 3)
>>> filter2d(input, kernel, padding='same')
tensor([[[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]]]])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input border_type is not torch.Tensor. Got {type(input)}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Input border_type is not torch.Tensor. Got {type(kernel)}")
if not isinstance(border_type, str):
raise TypeError(f"Input border_type is not string. Got {type(kernel)}")
if not isinstance(padding, str):
raise TypeError(f"Input padding is not string. Got {type(padding)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
if not len(kernel.shape) == 3 and kernel.shape[0] != 1:
raise ValueError(f"Invalid kernel shape, we expect 1xHxW. Got: {kernel.shape}")
# prepare kernel
b, c, h, w = input.shape
tmp_kernel: torch.Tensor = kernel.unsqueeze(1).to(input)
if normalized:
tmp_kernel = normalize_kernel2d(tmp_kernel)
tmp_kernel = tmp_kernel.expand(-1, c, -1, -1)
height, width = tmp_kernel.shape[-2:]
# pad the input tensor
if padding == 'same':
padding_shape: List[int] = _compute_padding([height, width])
input = F.pad(input, padding_shape, mode=border_type)
# kernel and input tensor reshape to align element-wise or batch-wise params
tmp_kernel = tmp_kernel.reshape(-1, 1, height, width)
input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1))
# convolve the tensor with the kernel.
output = F.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1)
if padding == 'same':
return output.view(b, c, h, w)
else:
return output.view(b, c, h - height + 1, w - width + 1)
|
def filter2d(
input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False,
padding: str = 'same'
) -> torch.Tensor:
r"""Convolve a tensor with a 2d kernel.
The function applies a given kernel to a tensor. The kernel is applied
independently at each depth channel of the tensor. Before applying the
kernel, the function applies padding according to the specified mode so
that the output remains in the same shape.
Args:
input: the input tensor with shape of
:math:`(B, C, H, W)`.
kernel: the kernel to be convolved with the input
tensor. The kernel shape must be :math:`(1, kH, kW)` or :math:`(B, kH, kW)`.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``.
normalized: If True, kernel will be L1 normalized.
padding: This defines the type of padding.
2 modes available ``'same'`` or ``'valid'``
Return:
torch.Tensor: the convolved tensor of same size and numbers of channels
as the input with shape :math:`(B, C, H, W)`.
Example:
>>> input = torch.tensor([[[
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 5., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],]]])
>>> kernel = torch.ones(1, 3, 3)
>>> filter2d(input, kernel, padding='same')
tensor([[[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]]]])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input border_type is not torch.Tensor. Got {type(input)}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Input border_type is not torch.Tensor. Got {type(kernel)}")
if not isinstance(border_type, str):
raise TypeError(f"Input border_type is not string. Got {type(kernel)}")
if not isinstance(padding, str):
raise TypeError(f"Input padding is not string. Got {type(padding)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
if not len(kernel.shape) == 3 and kernel.shape[0] != 1:
raise ValueError(f"Invalid kernel shape, we expect 1xHxW. Got: {kernel.shape}")
# prepare kernel
b, c, h, w = input.shape
tmp_kernel: torch.Tensor = kernel.unsqueeze(1).to(input)
if normalized:
tmp_kernel = normalize_kernel2d(tmp_kernel)
tmp_kernel = tmp_kernel.expand(-1, c, -1, -1)
height, width = tmp_kernel.shape[-2:]
# pad the input tensor
if padding == 'same':
padding_shape: List[int] = _compute_padding([height, width])
input = F.pad(input, padding_shape, mode=border_type)
# kernel and input tensor reshape to align element-wise or batch-wise params
tmp_kernel = tmp_kernel.reshape(-1, 1, height, width)
input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1))
# convolve the tensor with the kernel.
output = F.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1)
if padding == 'same':
out = output.view(b, c, h, w)
else:
return output.view(b, c, h - height + 1, w - width + 1)
|
42,824 |
def backup_dotfiles(backup_dest_path, dry_run=False, home_path=os.path.expanduser("~"), skip=False):
"""
Create `dotfiles` dir and makes copies of dotfiles and dotfolders.
Assumes that dotfiles are stored in the home directory.
:param skip: Boolean flag to skip prompting for overwrite. Used for scripting.
:param backup_dest_path: Destination path for dotfiles. Like, ~/shallow-backup/dotfiles. Used in tests.
:param home_path: Path where dotfiles will be found. $HOME by default.
:param dry_run: Flag for determining if debug info should be shown, or copying should occur.
"""
print_section_header("DOTFILES", Fore.BLUE)
if not dry_run:
overwrite_dir_prompt_if_needed(backup_dest_path, skip)
# get dotfolders and dotfiles
config = get_config()["dotfiles"]
# Aggregate pairs of [(Installed dotfile path, backup dest path)] in a list to be sorted into
# dotfiles and dotfolders later
dot_path_pairs = []
for dotfile_path_from_config, options in config.items():
# Evaluate condition, if specified. Skip if the command doesn't return true.
condition_success = evaluate_condition(condition=options["backup_condition"],
backup_or_reinstall="backup",
dotfile_path=dotfile_path_from_config)
if not condition_success:
continue
# If a file path in the config starts with /, it's a full path like /etc/ssh/
if dotfile_path_from_config.startswith("/"):
installed_dotfile_path = dotfile_path_from_config
installed_dotfile_path = quote(':' + installed_dotfile_path[1:])
backup_dotfile_path = quote(os.path.join(backup_dest_path, installed_dotfile_path))
dot_path_pairs.append((dotfile_path_from_config, backup_dotfile_path))
else: # Dotfile living in $HOME
installed_dotfile_path = quote(os.path.join(home_path, dotfile_path_from_config))
backup_dotfile_path = quote(os.path.join(backup_dest_path, dotfile_path_from_config))
dot_path_pairs.append((installed_dotfile_path, backup_dotfile_path))
# Separate dotfiles and dotfolders
dotfolders_mp_in = []
dotfiles_mp_in = []
for path_pair in dot_path_pairs:
installed_path = path_pair[0]
if os.path.isdir(installed_path):
dotfolders_mp_in.append(path_pair)
else:
dotfiles_mp_in.append(path_pair)
# Print source -> dest and skip the copying step
if dry_run:
print_yellow_bold("Dotfiles:")
for source, dest in dotfiles_mp_in:
print_dry_run_copy_info(source, dest)
print_yellow_bold("\nDotfolders:")
for source, dest in dotfolders_mp_in:
print_dry_run_copy_info(source, dest)
return
# Fix https://github.com/alichtman/shallow-backup/issues/230
for dest_path in [path_pair[1] for path_pair in dotfiles_mp_in + dotfolders_mp_in]:
print(f"Creating: {os.path.split(dest_path)[0]}")
safe_mkdir(os.path.split(dest_path)[0])
with mp.Pool(mp.cpu_count()):
print_blue_bold("Backing up dotfolders...")
for x in dotfolders_mp_in:
p = mp.Process(target=copy_dir_if_valid, args=(x[0], x[1],))
p.start()
p.join()
print_blue_bold("Backing up dotfiles...")
for x in dotfiles_mp_in:
p = mp.Process(target=copyfile, args=(x[0], x[1],))
p.start()
p.join()
|
def backup_dotfiles(backup_dest_path, dry_run=False, home_path=os.path.expanduser("~"), skip=False):
"""
Create `dotfiles` dir and makes copies of dotfiles and dotfolders.
Assumes that dotfiles are stored in the home directory.
:param skip: Boolean flag to skip prompting for overwrite. Used for scripting.
:param backup_dest_path: Destination path for dotfiles. Like, ~/shallow-backup/dotfiles. Used in tests.
:param home_path: Path where dotfiles will be found. $HOME by default.
:param dry_run: Flag for determining if debug info should be shown or copying should occur.
"""
print_section_header("DOTFILES", Fore.BLUE)
if not dry_run:
overwrite_dir_prompt_if_needed(backup_dest_path, skip)
# get dotfolders and dotfiles
config = get_config()["dotfiles"]
# Aggregate pairs of [(Installed dotfile path, backup dest path)] in a list to be sorted into
# dotfiles and dotfolders later
dot_path_pairs = []
for dotfile_path_from_config, options in config.items():
# Evaluate condition, if specified. Skip if the command doesn't return true.
condition_success = evaluate_condition(condition=options["backup_condition"],
backup_or_reinstall="backup",
dotfile_path=dotfile_path_from_config)
if not condition_success:
continue
# If a file path in the config starts with /, it's a full path like /etc/ssh/
if dotfile_path_from_config.startswith("/"):
installed_dotfile_path = dotfile_path_from_config
installed_dotfile_path = quote(':' + installed_dotfile_path[1:])
backup_dotfile_path = quote(os.path.join(backup_dest_path, installed_dotfile_path))
dot_path_pairs.append((dotfile_path_from_config, backup_dotfile_path))
else: # Dotfile living in $HOME
installed_dotfile_path = quote(os.path.join(home_path, dotfile_path_from_config))
backup_dotfile_path = quote(os.path.join(backup_dest_path, dotfile_path_from_config))
dot_path_pairs.append((installed_dotfile_path, backup_dotfile_path))
# Separate dotfiles and dotfolders
dotfolders_mp_in = []
dotfiles_mp_in = []
for path_pair in dot_path_pairs:
installed_path = path_pair[0]
if os.path.isdir(installed_path):
dotfolders_mp_in.append(path_pair)
else:
dotfiles_mp_in.append(path_pair)
# Print source -> dest and skip the copying step
if dry_run:
print_yellow_bold("Dotfiles:")
for source, dest in dotfiles_mp_in:
print_dry_run_copy_info(source, dest)
print_yellow_bold("\nDotfolders:")
for source, dest in dotfolders_mp_in:
print_dry_run_copy_info(source, dest)
return
# Fix https://github.com/alichtman/shallow-backup/issues/230
for dest_path in [path_pair[1] for path_pair in dotfiles_mp_in + dotfolders_mp_in]:
print(f"Creating: {os.path.split(dest_path)[0]}")
safe_mkdir(os.path.split(dest_path)[0])
with mp.Pool(mp.cpu_count()):
print_blue_bold("Backing up dotfolders...")
for x in dotfolders_mp_in:
p = mp.Process(target=copy_dir_if_valid, args=(x[0], x[1],))
p.start()
p.join()
print_blue_bold("Backing up dotfiles...")
for x in dotfiles_mp_in:
p = mp.Process(target=copyfile, args=(x[0], x[1],))
p.start()
p.join()
|
17,529 |
def construct_env(tools_to_activate):
env_string = ''
newpath, added_path = adjusted_path(tools_to_activate)
# Don't bother setting the path if there are no changes.
if os.environ['PATH'] != newpath:
if POWERSHELL:
env_string += '$env:PATH="' + newpath + '"\n'
elif CMD:
env_string += 'SET PATH=' + newpath + '\n'
elif CSH:
env_string += 'setenv PATH "' + newpath + '";\n'
elif BASH:
env_string += 'export PATH="' + newpath + '";\n'
else:
assert False
if added_path:
errlog('Adding directories to PATH:')
for item in added_path:
errlog('PATH += ' + item)
errlog('')
# A core variable EMSDK points to the root of Emscripten SDK directory.
env_vars = [('EMSDK', to_unix_path(emsdk_path()))]
em_config_path = os.path.normpath(dot_emscripten_path())
if to_unix_path(os.environ.get('EM_CONFIG', '')) != to_unix_path(em_config_path):
env_vars += [('EM_CONFIG', em_config_path)]
for tool in tools_to_activate:
config = tool.activated_config()
if 'EMSCRIPTEN_ROOT' in config:
# For older emscripten versions that don't use this default we export
# EM_CACHE.
em_cache_dir = os.path.join(config['EMSCRIPTEN_ROOT'], 'cache')
env_vars += [('EM_CACHE', em_cache_dir)]
envs = tool.activated_environment()
for env in envs:
key, value = parse_key_value(env)
value = to_native_path(tool.expand_vars(value))
env_vars += [(key, value)]
if env_vars:
errlog('Setting environment variables:')
for key, value in env_vars:
if POWERSHELL:
env_string += '$env:' + key + '="' + value + '"\n'
elif CMD:
env_string += 'SET ' + key + '=' + value + '\n'
elif CSH:
env_string += 'setenv ' + key + ' "' + value + '";\n'
elif BASH:
env_string += 'export ' + key + '="' + value + '";\n'
else:
assert False
if 'EMSDK_PYTHON' in env_vars:
# When using our bundled python we never want the users
# PYTHONHOME or PYTHONPATH
# See https://github.com/emscripten-core/emsdk/issues/598
if POWERSHELL:
env_string += 'Remove-Item env:PYTHONHOME\n'
env_string += 'Remove-Item env:PYTHONPATH\n'
elif CMD:
env_string += 'set PYTHONHOME=\n'
env_string += 'set PYTHONPATH=\n'
elif CSH:
env_string += 'unsetenv PYTHONHOME\n'
env_string += 'unsetenv PYTHONPATH\n'
elif BASH:
env_string += 'unset PYTHONHOME\n'
env_string += 'unset PYTHONPATH\n'
else:
assert False
errlog(key + ' = ' + value)
return env_string
|
def construct_env(tools_to_activate):
env_string = ''
newpath, added_path = adjusted_path(tools_to_activate)
# Don't bother setting the path if there are no changes.
if os.environ['PATH'] != newpath:
if POWERSHELL:
env_string += '$env:PATH="' + newpath + '"\n'
elif CMD:
env_string += 'SET PATH=' + newpath + '\n'
elif CSH:
env_string += 'setenv PATH "' + newpath + '";\n'
elif BASH:
env_string += 'export PATH="' + newpath + '";\n'
else:
assert False
if added_path:
errlog('Adding directories to PATH:')
for item in added_path:
errlog('PATH += ' + item)
errlog('')
# A core variable EMSDK points to the root of Emscripten SDK directory.
env_vars = [('EMSDK', to_unix_path(emsdk_path()))]
em_config_path = os.path.normpath(dot_emscripten_path())
if to_unix_path(os.environ.get('EM_CONFIG', '')) != to_unix_path(em_config_path):
env_vars += [('EM_CONFIG', em_config_path)]
for tool in tools_to_activate:
config = tool.activated_config()
if 'EMSCRIPTEN_ROOT' in config:
# For older emscripten versions that don't use this default we export
# EM_CACHE.
em_cache_dir = os.path.join(config['EMSCRIPTEN_ROOT'], 'cache')
env_vars += [('EM_CACHE', em_cache_dir)]
envs = tool.activated_environment()
for env in envs:
key, value = parse_key_value(env)
value = to_native_path(tool.expand_vars(value))
env_vars += [(key, value)]
if env_vars:
errlog('Setting environment variables:')
for key, value in env_vars:
if POWERSHELL:
env_string += '$env:' + key + '="' + value + '"\n'
elif CMD:
env_string += 'SET ' + key + '=' + value + '\n'
elif CSH:
env_string += 'setenv ' + key + ' "' + value + '";\n'
elif BASH:
env_string += 'export ' + key + '="' + value + '";\n'
else:
assert False
if 'EMSDK_PYTHON' in env_vars:
# When using our bundled python we never want the user's
# PYTHONHOME or PYTHONPATH
# See https://github.com/emscripten-core/emsdk/issues/598
if POWERSHELL:
env_string += 'Remove-Item env:PYTHONHOME\n'
env_string += 'Remove-Item env:PYTHONPATH\n'
elif CMD:
env_string += 'set PYTHONHOME=\n'
env_string += 'set PYTHONPATH=\n'
elif CSH:
env_string += 'unsetenv PYTHONHOME\n'
env_string += 'unsetenv PYTHONPATH\n'
elif BASH:
env_string += 'unset PYTHONHOME\n'
env_string += 'unset PYTHONPATH\n'
else:
assert False
errlog(key + ' = ' + value)
return env_string
|
45,896 |
def rgb_to_yuv420(image: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
r"""Convert an RGB image to YUV 420 (subsampled).
The image data is assumed to be in the range of (0, 1). Input need to be padded to be evenly divisible by 2
horizontal and vertical. This function will output chroma siting (0.5,0.5)
Args:
image: RGB Image to be converted to YUV with shape :math:`(*, 3, H, W)`.
Returns:
Tensor containing the Y plane with shape :math:`(*, 1, H, W)
Tensor containing the UV planes with shape :math:`(*, 2, H/2, W/2)
Example:
>>> input = torch.rand(2, 3, 4, 6)
>>> output = rgb_to_yuv420(input) # (2x1x4x6, 2x2x2x3)
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}")
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1:
raise ValueError(f"Input H&W must be evenly disible by 2. Got {image.shape}")
yuvimage = rgb_to_yuv(image)
yuvchunks = torch.chunk(yuvimage, 3, dim=-3)
return (yuvchunks[0], torch.nn.functional.avg_pool2d(torch.cat(yuvchunks[1:3], dim=-3),(2,2)))
|
def rgb_to_yuv420(image: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Convert an RGB image to YUV 420 (subsampled).
The image data is assumed to be in the range of (0, 1). Input need to be padded to be evenly divisible by 2
horizontal and vertical. This function will output chroma siting (0.5,0.5)
Args:
image: RGB Image to be converted to YUV with shape :math:`(*, 3, H, W)`.
Returns:
Tensor containing the Y plane with shape :math:`(*, 1, H, W)
Tensor containing the UV planes with shape :math:`(*, 2, H/2, W/2)
Example:
>>> input = torch.rand(2, 3, 4, 6)
>>> output = rgb_to_yuv420(input) # (2x1x4x6, 2x2x2x3)
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}")
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1:
raise ValueError(f"Input H&W must be evenly disible by 2. Got {image.shape}")
yuvimage = rgb_to_yuv(image)
yuvchunks = torch.chunk(yuvimage, 3, dim=-3)
return (yuvchunks[0], torch.nn.functional.avg_pool2d(torch.cat(yuvchunks[1:3], dim=-3),(2,2)))
|
25,783 |
def busmap_by_greedy_modularity(network, n_clusters, buses_i=None):
"""
Create a busmap according to Clauset-Newman-Moore greedy modularity
maximization [1].
Parameters
----------
network : pypsa.Network
n_clusters : int
Final number of clusters desired.
buses_i: None | pandas.Index, default=None
Subset of buses to cluster. If None, all buses are considered.
Returns
-------
busmap : pandas.Series
Mapping of network.buses to clusters (indexed by
non-negative integers).
References
----------
.. [1] Clauset, A., Newman, M. E., & Moore, C.
"Finding community structure in very large networks."
Physical Review E 70(6), 2004.
"""
if buses_i is None:
buses_i = network.buses.index
network.calculate_dependent_values()
lines = network.lines.query("bus0 in @buses_i and bus1 in @buses_i")
lines = (
lines.loc[:, ["bus0", "bus1"]]
.assign(weight=network.lines.s_nom / abs(lines.r + 1j * lines.x))
.set_index(["bus0", "bus1"])
)
G = nx.Graph()
G.add_nodes_from(buses_i)
G.add_edges_from((u, v, dict(weight=w)) for (u, v), w in lines.itertuples())
communities = nx.community.greedy_modularity_communities(
G, best_n=n_clusters, cutoff=n_clusters, weight="weight"
)
busmap = pd.Series(buses_i, buses_i)
for c in np.arange(len(communities)):
busmap.loc[communities[c]] = str(c)
busmap.index = busmap.index.astype(str)
return busmap
|
def busmap_by_greedy_modularity(network, n_clusters, buses_i=None):
"""
Create a busmap according to Clauset-Newman-Moore greedy modularity
maximization [1].
Parameters
----------
network : pypsa.Network
n_clusters : int
Final number of clusters desired.
buses_i: None | pandas.Index, default=None
Subset of buses to cluster. If None, all buses are considered.
Returns
-------
busmap : pandas.Series
Mapping of network.buses to clusters (indexed by
non-negative integers).
References
----------
.. [1] Clauset, A., Newman, M. E., & Moore, C.
"Finding community structure in very large networks."
Physical Review E 70(6), 2004.
"""
if buses_i is None:
buses_i = network.buses.index
network.calculate_dependent_values()
lines = network.lines.query("bus0 in @buses_i and bus1 in @buses_i")
lines = (
lines[["bus0", "bus1"]]
.assign(weight=network.lines.s_nom / abs(lines.r + 1j * lines.x))
.set_index(["bus0", "bus1"])
)
G = nx.Graph()
G.add_nodes_from(buses_i)
G.add_edges_from((u, v, dict(weight=w)) for (u, v), w in lines.itertuples())
communities = nx.community.greedy_modularity_communities(
G, best_n=n_clusters, cutoff=n_clusters, weight="weight"
)
busmap = pd.Series(buses_i, buses_i)
for c in np.arange(len(communities)):
busmap.loc[communities[c]] = str(c)
busmap.index = busmap.index.astype(str)
return busmap
|
40,114 |
def read_list_from_config(config_file: ConfigParser, section: str, key: str, default=None):
'''
Parses a comma seperated list in secton section with key key.
:param section: The section to read from
:param key: The key holding the list
:return: A list holding the values defined in the config
'''
if default is None:
default = []
if not config_file:
return default
try:
config_entry = config_file.get(section, key)
except (NoOptionError, NoSectionError):
return default
if not config_entry:
return default
return [item.strip() for item in config_entry.split(',') if item]
|
def read_list_from_config(config_file: ConfigParser, section: str, key: str, default=None):
'''
Parses a comma separated list in section `section` with key `key`.
:param section: The section to read from
:param key: The key holding the list
:return: A list holding the values defined in the config
'''
if default is None:
default = []
if not config_file:
return default
try:
config_entry = config_file.get(section, key)
except (NoOptionError, NoSectionError):
return default
if not config_entry:
return default
return [item.strip() for item in config_entry.split(',') if item]
|
47,243 |
def main():
args = parse_args()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called
# 'tokens' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
# Loading the dataset from local csv or json file.
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = (args.train_file if args.train_file is not None else args.valid_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
if raw_datasets["train"] is not None:
column_names = raw_datasets["train"].column_names
features = raw_datasets["train"].features
else:
column_names = raw_datasets["validation"].column_names
features = raw_datasets["validation"].features
if args.text_column_name is not None:
text_column_name = args.text_column_name
elif "tokens" in column_names:
text_column_name = "tokens"
else:
text_column_name = column_names[0]
if args.label_column_name is not None:
label_column_name = args.label_column_name
elif f"{args.task_name}_tags" in column_names:
label_column_name = f"{args.task_name}_tags"
else:
label_column_name = column_names[1]
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(raw_datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels)
if config.model_type in {"gpt2", "roberta"}:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True, add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True)
model = FlaxAutoModelForTokenClassification.from_pretrained(args.model_name_or_path, config=config)
# Preprocessing the datasets
# Tokenize all texts and align the labels with them.
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples[text_column_name],
max_length=args.max_length,
padding="max_length",
truncation=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
)
labels = []
for i, label in enumerate(examples[label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label_to_id[label[word_idx]] if args.label_all_tokens else -100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
processed_raw_datasets = raw_datasets.map(
tokenize_and_align_labels,
batched=True,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_raw_datasets["train"]
eval_dataset = processed_raw_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Define a summary writer
summary_writer = tensorboard.SummaryWriter(args.output_dir)
summary_writer.hparams(vars(args))
def write_metric(train_metrics, eval_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step)
num_epochs = int(args.num_train_epochs)
rng = jax.random.PRNGKey(args.seed)
dropout_rngs = jax.random.split(rng, jax.local_device_count())
train_batch_size = args.per_device_train_batch_size * jax.local_device_count()
eval_batch_size = args.per_device_eval_batch_size * jax.local_device_count()
learning_rate_fn = create_learning_rate_fn(
len(train_dataset), train_batch_size, args.num_train_epochs, args.num_warmup_steps, args.learning_rate
)
state = create_train_state(model, learning_rate_fn, num_labels=num_labels, weight_decay=args.weight_decay)
# define step functions
def train_step(
state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey
) -> Tuple[train_state.TrainState, float]:
"""Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`."""
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
targets = batch.pop("labels")
def loss_fn(params):
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = state.loss_fn(logits, targets)
return loss
grad_fn = jax.value_and_grad(loss_fn)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad)
metrics = jax.lax.pmean({"loss": loss, "learning_rate": learning_rate_fn(state.step)}, axis_name="batch")
return new_state, metrics, new_dropout_rng
p_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,))
def eval_step(state, batch):
logits = state.apply_fn(**batch, params=state.params, train=False)[0]
return state.logits_fn(logits)
p_eval_step = jax.pmap(eval_step, axis_name="batch")
metric = load_metric("seqeval")
def get_labels(y_pred, y_true):
# Transform predictions and references tensos to numpy arrays
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(pred, gold_label) if l != -100]
for pred, gold_label in zip(y_pred, y_true)
]
true_labels = [
[label_list[l] for (p, l) in zip(pred, gold_label) if l != -100]
for pred, gold_label in zip(y_pred, y_true)
]
return true_predictions, true_labels
def compute_metrics():
results = metric.compute()
if args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
logger.info(f"===== Starting training ({num_epochs} epochs) =====")
train_time = 0
# make sure weights are replicated on each device
state = replicate(state)
for epoch in range(1, num_epochs + 1):
logger.info(f"Epoch {epoch}")
logger.info(" Training...")
train_start = time.time()
train_metrics = []
rng, input_rng = jax.random.split(rng)
# train
for batch in train_data_collator(input_rng, train_dataset, train_batch_size):
state, metrics, dropout_rngs = p_train_step(state, batch, dropout_rngs)
train_metrics.append(metrics)
train_time += time.time() - train_start
logger.info(f" Done! Training metrics: {unreplicate(metrics)}")
logger.info(" Evaluating...")
# evaluate
for batch in eval_data_collator(eval_dataset, eval_batch_size):
labels = batch.pop("labels")
predictions = p_eval_step(state, batch)
predictions = jnp.array([pred for pred in chain(*predictions)])
labels = np.array([label for label in chain(*labels)])
labels[np.array(chain(*batch["attention_mask"])) == 0] = -100
preds, refs = get_labels(predictions, labels)
metric.add_batch(
predictions=preds,
references=refs,
)
# evaluate also on leftover examples (not divisible by batch_size)
num_leftover_samples = len(eval_dataset) % eval_batch_size
# make sure leftover batch is evaluated on one device
if num_leftover_samples > 0 and jax.process_index() == 0:
# take leftover samples
batch = eval_dataset[-num_leftover_samples:]
batch = {k: jnp.array(v) for k, v in batch.items()}
labels = batch.pop("labels")
predictions = eval_step(unreplicate(state), batch)
labels = np.array(labels)
labels[np.array(batch["attention_mask"]) == 0] = -100
preds, refs = get_labels(predictions, labels)
metric.add_batch(
predictions=preds,
references=refs,
)
eval_metric = compute_metrics()
logger.info(f" Done! Eval metrics: {eval_metric}")
for key, val in eval_metric.items():
logger.info(f"{key}: {val:.4f}")
cur_step = epoch * (len(train_dataset) // train_batch_size)
write_metric(train_metrics, eval_metric, train_time, cur_step)
# save checkpoint after each epoch and push checkpoint to the hub
if jax.process_index() == 0:
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
model.save_pretrained(
args.output_dir,
params=params,
push_to_hub=args.push_to_hub,
commit_message=f"Saving weights and logs of epoch {epoch}",
)
|
def main():
args = parse_args()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called
# 'tokens' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
# Loading the dataset from local csv or json file.
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = (args.train_file if args.train_file is not None else args.valid_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
if raw_datasets["train"] is not None:
column_names = raw_datasets["train"].column_names
features = raw_datasets["train"].features
else:
column_names = raw_datasets["validation"].column_names
features = raw_datasets["validation"].features
if args.text_column_name is not None:
text_column_name = args.text_column_name
elif "tokens" in column_names:
text_column_name = "tokens"
else:
text_column_name = column_names[0]
if args.label_column_name is not None:
label_column_name = args.label_column_name
elif f"{args.task_name}_tags" in column_names:
label_column_name = f"{args.task_name}_tags"
else:
label_column_name = column_names[1]
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(raw_datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels)
if config.model_type in {"gpt2", "roberta"}:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True, add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True)
model = FlaxAutoModelForTokenClassification.from_pretrained(args.model_name_or_path, config=config)
# Preprocessing the datasets
# Tokenize all texts and align the labels with them.
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples[text_column_name],
max_length=args.max_length,
padding="max_length",
truncation=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
)
labels = []
for i, label in enumerate(examples[label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label_to_id[label[word_idx]] if args.label_all_tokens else -100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
processed_raw_datasets = raw_datasets.map(
tokenize_and_align_labels,
batched=True,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_raw_datasets["train"]
eval_dataset = processed_raw_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Define a summary writer
summary_writer = tensorboard.SummaryWriter(args.output_dir)
summary_writer.hparams(vars(args))
def write_metric(train_metrics, eval_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step)
num_epochs = int(args.num_train_epochs)
rng = jax.random.PRNGKey(args.seed)
dropout_rngs = jax.random.split(rng, jax.local_device_count())
train_batch_size = args.per_device_train_batch_size * jax.local_device_count()
eval_batch_size = args.per_device_eval_batch_size * jax.local_device_count()
learning_rate_fn = create_learning_rate_fn(
len(train_dataset), train_batch_size, args.num_train_epochs, args.num_warmup_steps, args.learning_rate
)
state = create_train_state(model, learning_rate_fn, num_labels=num_labels, weight_decay=args.weight_decay)
# define step functions
def train_step(
state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey
) -> Tuple[train_state.TrainState, float]:
"""Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`."""
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
targets = batch.pop("labels")
def loss_fn(params):
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = state.loss_fn(logits, targets)
return loss
grad_fn = jax.value_and_grad(loss_fn)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad)
metrics = jax.lax.pmean({"loss": loss, "learning_rate": learning_rate_fn(state.step)}, axis_name="batch")
return new_state, metrics, new_dropout_rng
p_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,))
def eval_step(state, batch):
logits = state.apply_fn(**batch, params=state.params, train=False)[0]
return state.logits_fn(logits)
p_eval_step = jax.pmap(eval_step, axis_name="batch")
metric = load_metric("seqeval")
def get_labels(y_pred, y_true):
# Transform predictions and references tensos to numpy arrays
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(pred, gold_label) if l != -100]
for pred, gold_label in zip(y_pred, y_true)
]
true_labels = [
[label_list[l] for (p, l) in zip(pred, gold_label) if l != -100]
for pred, gold_label in zip(y_pred, y_true)
]
return true_predictions, true_labels
def compute_metrics():
results = metric.compute()
if args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
logger.info(f"===== Starting training ({num_epochs} epochs) =====")
train_time = 0
# make sure weights are replicated on each device
state = replicate(state)
for epoch in range(1, num_epochs + 1):
logger.info(f"Epoch {epoch}")
logger.info(" Training...")
train_start = time.time()
train_metrics = []
rng, input_rng = jax.random.split(rng)
# train
for batch in train_data_collator(input_rng, train_dataset, train_batch_size):
state, metrics, dropout_rngs = p_train_step(state, batch, dropout_rngs)
train_metrics.append(metrics)
train_time += time.time() - train_start
logger.info(f" Done! Training metrics: {unreplicate(metrics)}")
logger.info(" Evaluating...")
# evaluate
for batch in eval_data_collator(eval_dataset, eval_batch_size):
labels = batch.pop("labels")
predictions = p_eval_step(state, batch)
predictions = jnp.array([pred for pred in chain(*predictions)])
labels = np.array([label for label in chain(*labels)])
labels[np.array(chain(*batch["attention_mask"])) == 0] = -100
preds, refs = get_labels(predictions, labels)
metric.add_batch(
predictions=preds,
references=refs,
)
# evaluate also on leftover examples (not divisible by batch_size)
num_leftover_samples = len(eval_dataset) % eval_batch_size
# make sure leftover batch is evaluated on one device
if num_leftover_samples > 0 and jax.process_index() == 0:
# take leftover samples
batch = eval_dataset[-num_leftover_samples:]
batch = {k: jnp.array(v) for k, v in batch.items()}
labels = batch.pop("labels")
predictions = eval_step(unreplicate(state), batch)
labels = np.array(labels)
labels[np.array(batch["attention_mask"]) == 0] = -100
preds, refs = get_labels(predictions, labels)
metric.add_batch(
predictions=preds,
references=refs,
)
eval_metric = compute_metrics()
logger.info(f" Done! Eval metrics: {eval_metric}")
for key, val in eval_metric.items():
logger.info(f"{key}: {val:.4f}")
cur_step = epoch * (len(train_dataset) // train_batch_size)
write_metric(train_metrics, eval_metric, train_time, cur_step)
# save checkpoint after each epoch and push checkpoint to the hub
if jax.process_index() == 0:
params = jax.device_get(unreplicate(state.params))
model.save_pretrained(
args.output_dir,
params=params,
push_to_hub=args.push_to_hub,
commit_message=f"Saving weights and logs of epoch {epoch}",
)
|
2,829 |
def chi2_kernel(X, Y=None, gamma=1.0):
"""Compute the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Input array/matrix X.
Y : ndarray of shape (n_samples_Y, n_features), default=None
Input array/matrix Y.
gamma : float, default=1
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
Returns the exponential chi-squared kernel X and Y.
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://hal.archives-ouvertes.fr/hal-00171412/document
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
|
def chi2_kernel(X, Y=None, gamma=1.0):
"""Compute the exponential chi-squared kernel between X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Input array/matrix X.
Y : ndarray of shape (n_samples_Y, n_features), default=None
Input array/matrix Y.
gamma : float, default=1
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
Returns the exponential chi-squared kernel X and Y.
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://hal.archives-ouvertes.fr/hal-00171412/document
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
|
470 |
def scrub_form_meta(form_id, form_data):
"""
Cleans up old format metadata to our current standard.
Does NOT save the doc, but returns whether the doc needs to be saved.
"""
property_map = {'TimeStart': 'timeStart',
'TimeEnd': 'timeEnd',
'chw_id': 'userID',
'DeviceID': 'deviceID',
'uid': 'instanceID'}
# hack to make sure uppercase meta still ends up in the right place
found_old = False
if 'Meta' in form_data:
form_data['meta'] = form_data['Meta']
del form_data['Meta']
found_old = True
if 'meta' in form_data:
meta_block = form_data['meta']
# scrub values from 0.9 to 1.0
if isinstance(meta_block, list):
if isinstance(meta_block[0], dict):
# if it's a list of dictionaries, arbitrarily pick the first one
# this is a pretty serious error, but it's also recoverable
form_data['meta'] = meta_block = meta_block[0]
logging.error((
'form %s contains multiple meta blocks. '
'this is not correct but we picked one abitrarily'
) % form_id)
else:
# if it's a list of something other than dictionaries.
# don't bother scrubbing.
logging.error('form %s contains a poorly structured meta block.'
'this might cause data display problems.')
if isinstance(meta_block, dict):
for key in list(meta_block.keys()):
if key in property_map and property_map[key] not in meta_block:
meta_block[property_map[key]] = meta_block[key]
del meta_block[key]
found_old = True
return found_old
|
def scrub_form_meta(form_id, form_data):
"""
Cleans up old format metadata to our current standard.
Does NOT save the doc, but returns whether the doc needs to be saved.
"""
property_map = {'TimeStart': 'timeStart',
'TimeEnd': 'timeEnd',
'chw_id': 'userID',
'DeviceID': 'deviceID',
'uid': 'instanceID'}
# hack to make sure uppercase meta still ends up in the right place
found_old = False
if 'Meta' in form_data:
form_data['meta'] = form_data['Meta']
del form_data['Meta']
found_old = True
if 'meta' in form_data:
meta_block = form_data['meta']
# scrub values from 0.9 to 1.0
if isinstance(meta_block, list):
if isinstance(meta_block[0], dict):
# if it's a list of dictionaries, arbitrarily pick the first one
# this is a pretty serious error, but it's also recoverable
form_data['meta'] = meta_block = meta_block[0]
logging.error((
'form %s contains multiple meta blocks. '
'this is not correct but we picked one abitrarily'
) % form_id)
else:
# if it's a list of something other than dictionaries.
# don't bother scrubbing.
logging.error('form %s contains a poorly structured meta block.'
'this might cause data display problems.')
if isinstance(meta_block, dict):
for key in list(meta_block):
if key in property_map and property_map[key] not in meta_block:
meta_block[property_map[key]] = meta_block[key]
del meta_block[key]
found_old = True
return found_old
|
32,939 |
def is_single_span_sampled(
span, # type: Span
):
return span.get_metric("_dd.span_sampling.mechanism") == SamplingMechanism.SPAN_SAMPLING_RULE
|
def is_single_span_sampled(
span, # type: Span
):
return span.get_metric(_SINGLE_SPAN_SAMPLING_MECHANISM) == SamplingMechanism.SPAN_SAMPLING_RULE
|
5,626 |
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
workers=1, points=None, quadrature='gk21', full_output=False):
"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number bytes to use for memoization.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using `multiprocessing.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quardarture : {'gk21', 'trapz'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'trapz' (composite trapezoid rule).
full_output : bool, optional
If true, populate ``info`` return value with "alist", "blist",
"rlist", "elist", "iord" keys.
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : dict
Info dictionary. Has always the keys "neval" and "last"
ier : int
Result code
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
"""
a = float(a)
b = float(b)
# Use simple transformations to deal with integrals over infinite
# intervals.
args = (epsabs, epsrel, norm, cache_size, limit, workers, points, quadrature, full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, *args)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, *args)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. first interval split occurs at t=0, which separates
# positive and negative sides of the integral
f2 = DoubleInfiniteFunc(f)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
if a != b:
res = quad_vec(f2, -1, 1, *args)
else:
res = quad_vec(f2, 1, 1, *args)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
mapwrapper = MapWrapper(workers)
parallel_count = 8
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'trapz': _quadrature_trapz}[quadrature]
except KeyError:
raise ValueError("unknown quadrature {!r}".format(quadrature))
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if p <= a or p >= b or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, (float, complex)):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
# Process intervals
with mapwrapper:
ier = 1
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
for j in range(parallel_count):
if not intervals:
break
if j > 0 and abs(intervals[0][0]) * len(intervals) < 0.5*tol:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
# Subdivide intervals
for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = 0
break
if global_error < rounding_error:
ier = 2
break
res = global_integral
err = global_error + rounding_error
info = dict(neval=neval,
last=len(intervals))
if full_output:
info['alist'] = np.array([z[1] for z in intervals])
info['blist'] = np.array([z[2] for z in intervals])
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
info['rlist'] = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
info['elist'] = np.array([-z[0] for z in intervals])
info['iord'] = np.argsort(-info['elist'])
return (res, err, info, ier)
|
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
workers=1, points=None, quadrature='gk21', full_output=False):
"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number bytes to use for memoization.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using `multiprocessing.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quadrature : {'gk21', 'trapz'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'trapz' (composite trapezoid rule).
full_output : bool, optional
If true, populate ``info`` return value with "alist", "blist",
"rlist", "elist", "iord" keys.
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : dict
Info dictionary. Has always the keys "neval" and "last"
ier : int
Result code
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
"""
a = float(a)
b = float(b)
# Use simple transformations to deal with integrals over infinite
# intervals.
args = (epsabs, epsrel, norm, cache_size, limit, workers, points, quadrature, full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, *args)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, *args)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. first interval split occurs at t=0, which separates
# positive and negative sides of the integral
f2 = DoubleInfiniteFunc(f)
if points is not None:
points = tuple(f2.get_t(xp) for xp in points)
if a != b:
res = quad_vec(f2, -1, 1, *args)
else:
res = quad_vec(f2, 1, 1, *args)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
mapwrapper = MapWrapper(workers)
parallel_count = 8
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'trapz': _quadrature_trapz}[quadrature]
except KeyError:
raise ValueError("unknown quadrature {!r}".format(quadrature))
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if p <= a or p >= b or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, (float, complex)):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
# Process intervals
with mapwrapper:
ier = 1
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
for j in range(parallel_count):
if not intervals:
break
if j > 0 and abs(intervals[0][0]) * len(intervals) < 0.5*tol:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
# Subdivide intervals
for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = 0
break
if global_error < rounding_error:
ier = 2
break
res = global_integral
err = global_error + rounding_error
info = dict(neval=neval,
last=len(intervals))
if full_output:
info['alist'] = np.array([z[1] for z in intervals])
info['blist'] = np.array([z[2] for z in intervals])
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
info['rlist'] = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
info['elist'] = np.array([-z[0] for z in intervals])
info['iord'] = np.argsort(-info['elist'])
return (res, err, info, ier)
|
7,283 |
def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs):
"""
Function to add random noise of various types to a floating-point image.
Parameters
----------
image : ndarray
Input image data. Will be converted to float.
mode : str, optional
One of the following strings, selecting the type of noise to add:
- 'gaussian' Gaussian-distributed additive noise.
- 'localvar' Gaussian-distributed additive noise, with specified
local variance at each point of `image`.
- 'poisson' Poisson-distributed noise generated from the data.
- 'salt' Replaces random pixels with 1.
- 'pepper' Replaces random pixels with 0 (for unsigned images) or
-1 (for signed images).
- 's&p' Replaces random pixels with either 1 or `low_val`, where
`low_val` is 0 for unsigned images or -1 for signed
images.
- 'speckle' Multiplicative noise using out = image + n*image, where
n is Gaussian noise with specified mean & variance.
seed : int, optional
If provided, this will set the random seed before generating noise,
for valid pseudo-random comparisons.
clip : bool, optional
If True (default), the output will be clipped after noise applied
for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
needed to maintain the proper image data range. If False, clipping
is not applied, and the output may extend beyond the range [-1, 1].
mean : float, optional
Mean of random distribution. Used in 'gaussian' and 'speckle'.
Default : 0.
var : float, optional
Variance of random distribution. Used in 'gaussian' and 'speckle'.
Note: variance = (standard deviation) ** 2. Default : 0.01
local_vars : ndarray, optional
Array of positive floats, same shape as `image`, defining the local
variance at every image point. Used in 'localvar'.
amount : float, optional
Proportion of image pixels to replace with noise on range [0, 1].
Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
salt_vs_pepper : float, optional
Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
Higher values represent more salt. Default : 0.5 (equal amounts)
Returns
-------
out : ndarray
Output floating-point image data on range [0, 1] or [-1, 1] if the
input `image` was unsigned or signed, respectively.
Notes
-----
Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
the valid image range. The default is to clip (not alias) these values,
but they may be preserved by setting `clip=False`. Note that in this case
the output may contain values outside the ranges [0, 1] or [-1, 1].
Use this option with care.
Because of the prevalence of exclusively positive floating-point images in
intermediate calculations, it is not possible to intuit if an input is
signed based on dtype alone. Instead, negative values are explicitly
searched for. Only if found does this function assume signed input.
Unexpected results only occur in rare, poorly exposes cases (e.g. if all
values are above 50 percent gray in a signed `image`). In this event,
manually scaling the input to the positive domain will solve the problem.
The Poisson distribution is only defined for positive integers. To apply
this noise type, the number of unique values in the image is found and
the next round power of two is used to scale up the floating-point result,
after which it is scaled back down to the floating-point image range.
To generate Poisson noise against a signed image, the signed image is
temporarily converted to an unsigned image in the floating point domain,
Poisson noise is generated, then it is returned to the original range.
"""
mode = mode.lower()
# Detect if a signed image was input
if image.min() < 0:
low_clip = -1.
else:
low_clip = 0.
image = img_as_float(image)
if seed is not None:
np.random.seed(seed=seed)
allowedtypes = {
'gaussian': 'gaussian_values',
'localvar': 'localvar_values',
'poisson': 'poisson_values',
'salt': 'sp_values',
'pepper': 'sp_values',
's&p': 's&p_values',
'speckle': 'gaussian_values'}
kwdefaults = {
'mean': 0.,
'var': 0.01,
'amount': 0.05,
'salt_vs_pepper': 0.5,
'local_vars': np.zeros_like(image) + 0.01}
allowedkwargs = {
'gaussian_values': ['mean', 'var'],
'localvar_values': ['local_vars'],
'sp_values': ['amount'],
's&p_values': ['amount', 'salt_vs_pepper'],
'poisson_values': []}
for key in kwargs:
if key not in allowedkwargs[allowedtypes[mode]]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[allowedtypes[mode]]))
# Set kwarg defaults
for kw in allowedkwargs[allowedtypes[mode]]:
kwargs.setdefault(kw, kwdefaults[kw])
if mode == 'gaussian':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + noise
elif mode == 'localvar':
# Ensure local variance input is correct
if (kwargs['local_vars'] <= 0).any():
raise ValueError('All values of `local_vars` must be > 0.')
# Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc
out = image + np.random.normal(0, kwargs['local_vars'] ** 0.5)
elif mode == 'poisson':
# Determine unique values in image & calculate the next power of two
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
# Ensure image is exclusively positive
if low_clip == -1.:
old_max = image.max()
image = (image + 1.) / (old_max + 1.)
# Generating noise for each unique value in image.
out = np.random.poisson(image * vals) / float(vals)
# Return image to original range if input was signed
if low_clip == -1.:
out = out * (old_max + 1.) - 1.
elif mode == 'salt':
# Re-call function with mode='s&p' and p=1 (all salt noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=1.)
elif mode == 'pepper':
# Re-call function with mode='s&p' and p=1 (all pepper noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=0.)
elif mode == 's&p':
out = image.copy()
p = kwargs['amount']
q = kwargs['salt_vs_pepper']
flipped = choose(p, image.shape)
salted = choose(q, image.shape)
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = low_clip
elif mode == 'speckle':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + image * noise
# Clip back to original range, if necessary
if clip:
out = np.clip(out, low_clip, 1.0)
return out
|
def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs):
"""
Function to add random noise of various types to a floating-point image.
Parameters
----------
image : ndarray
Input image data. Will be converted to float.
mode : str, optional
One of the following strings, selecting the type of noise to add:
- 'gaussian' Gaussian-distributed additive noise.
- 'localvar' Gaussian-distributed additive noise, with specified
local variance at each point of `image`.
- 'poisson' Poisson-distributed noise generated from the data.
- 'salt' Replaces random pixels with 1.
- 'pepper' Replaces random pixels with 0 (for unsigned images) or
-1 (for signed images).
- 's&p' Replaces random pixels with either 1 or `low_val`, where
`low_val` is 0 for unsigned images or -1 for signed
images.
- 'speckle' Multiplicative noise using out = image + n*image, where
n is Gaussian noise with specified mean & variance.
seed : int, optional
If provided, this will set the random seed before generating noise,
for valid pseudo-random comparisons.
clip : bool, optional
If True (default), the output will be clipped after noise applied
for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
needed to maintain the proper image data range. If False, clipping
is not applied, and the output may extend beyond the range [-1, 1].
mean : float, optional
Mean of random distribution. Used in 'gaussian' and 'speckle'.
Default : 0.
var : float, optional
Variance of random distribution. Used in 'gaussian' and 'speckle'.
Note: variance = (standard deviation) ** 2. Default : 0.01
local_vars : ndarray, optional
Array of positive floats, same shape as `image`, defining the local
variance at every image point. Used in 'localvar'.
amount : float, optional
Proportion of image pixels to replace with noise on range [0, 1].
Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
salt_vs_pepper : float, optional
Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
Higher values represent more salt. Default : 0.5 (equal amounts)
Returns
-------
out : ndarray
Output floating-point image data on range [0, 1] or [-1, 1] if the
input `image` was unsigned or signed, respectively.
Notes
-----
Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
the valid image range. The default is to clip (not alias) these values,
but they may be preserved by setting `clip=False`. Note that in this case
the output may contain values outside the ranges [0, 1] or [-1, 1].
Use this option with care.
Because of the prevalence of exclusively positive floating-point images in
intermediate calculations, it is not possible to intuit if an input is
signed based on dtype alone. Instead, negative values are explicitly
searched for. Only if found does this function assume signed input.
Unexpected results only occur in rare, poorly exposes cases (e.g. if all
values are above 50 percent gray in a signed `image`). In this event,
manually scaling the input to the positive domain will solve the problem.
The Poisson distribution is only defined for positive integers. To apply
this noise type, the number of unique values in the image is found and
the next round power of two is used to scale up the floating-point result,
after which it is scaled back down to the floating-point image range.
To generate Poisson noise against a signed image, the signed image is
temporarily converted to an unsigned image in the floating point domain,
Poisson noise is generated, then it is returned to the original range.
"""
mode = mode.lower()
# Detect if a signed image was input
if image.min() < 0:
low_clip = -1.
else:
low_clip = 0.
image = img_as_float(image)
if seed is not None:
np.random.seed(seed=seed)
allowedtypes = {
'gaussian': 'gaussian_values',
'localvar': 'localvar_values',
'poisson': 'poisson_values',
'salt': 'sp_values',
'pepper': 'sp_values',
's&p': 's&p_values',
'speckle': 'gaussian_values'}
kwdefaults = {
'mean': 0.,
'var': 0.01,
'amount': 0.05,
'salt_vs_pepper': 0.5,
'local_vars': np.zeros_like(image) + 0.01}
allowedkwargs = {
'gaussian_values': ['mean', 'var'],
'localvar_values': ['local_vars'],
'sp_values': ['amount'],
's&p_values': ['amount', 'salt_vs_pepper'],
'poisson_values': []}
for key in kwargs:
if key not in allowedkwargs[allowedtypes[mode]]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[allowedtypes[mode]]))
# Set kwarg defaults
for kw in allowedkwargs[allowedtypes[mode]]:
kwargs.setdefault(kw, kwdefaults[kw])
if mode == 'gaussian':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + noise
elif mode == 'localvar':
# Ensure local variance input is correct
if (kwargs['local_vars'] <= 0).any():
raise ValueError('All values of `local_vars` must be > 0.')
# Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc
out = image + np.random.normal(0, kwargs['local_vars'] ** 0.5)
elif mode == 'poisson':
# Determine unique values in image & calculate the next power of two
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
# Ensure image is exclusively positive
if low_clip == -1.:
old_max = image.max()
image = (image + 1.) / (old_max + 1.)
# Generating noise for each unique value in image.
out = np.random.poisson(image * vals) / float(vals)
# Return image to original range if input was signed
if low_clip == -1.:
out = out * (old_max + 1.) - 1.
elif mode == 'salt':
# Re-call function with mode='s&p' and p=1 (all salt noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=1.)
elif mode == 'pepper':
# Re-call function with mode='s&p' and p=1 (all pepper noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=0.)
elif mode == 's&p':
out = image.copy()
p = kwargs['amount']
q = kwargs['salt_vs_pepper']
flipped = choose(p, image.shape)
salted = _bernoulli(q, image.shape)
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = low_clip
elif mode == 'speckle':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + image * noise
# Clip back to original range, if necessary
if clip:
out = np.clip(out, low_clip, 1.0)
return out
|
23,609 |
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
32,149 |
def cve_command(client: Client, args: dict) -> Union[List[CommandResults], CommandResults]:
"""Search for cve with the given ID and returns the cve data if found.
Args:
client: Integration client
args :The demisto args containing the cve_id
Returns:
CVE details containing ID, CVSS, modified date, published date and description.
"""
cve_id = args.get('cve_id', '')
cve_ids = argToList(cve_id)
command_results: List[CommandResults] = []
for _id in cve_ids:
if not valid_cve_id_format(_id):
raise DemistoException(f'"{_id}" is not a valid cve ID')
response = client.cve(_id)
if not response:
cr = CommandResults(readable_output=f'### No results found for cve {_id}')
else:
data = cve_to_context(response)
indicator = generate_indicator(data)
cr = CommandResults(
outputs_prefix='CVE',
outputs_key_field='ID',
outputs=data,
raw_response=response,
indicator=indicator
)
command_results.append(cr)
return command_results
|
def cve_command(client: Client, args: dict) -> Union[List[CommandResults], CommandResults]:
"""Search for cve with the given ID and returns the cve data if found.
Args:
client: Integration client
args :The demisto args containing the cve_id
Returns:
CVE details containing ID, CVSS, modified date, published date and description.
"""
cve_id = args.get('cve_id', '')
cve_ids = argToList(cve_id)
command_results: List[CommandResults] = []
for _id in cve_ids:
if not valid_cve_id_format(_id):
raise DemistoException(f'"{_id}" is not a valid cve ID')
response = client.cve(_id)
if not response:
cr = CommandResults(readable_output=f'### No results found for cve {_id}')
else:
data = cve_to_context(response)
indicator = generate_indicator(data)
cr = CommandResults(
outputs_prefix='CVE',
outputs_key_field='ID',
outputs=data,
raw_response=response,
indicator=indicator,
)
command_results.append(cr)
return command_results
|
42,938 |
def rbfkernel(R, sigma):
r"""This function generates a radial basis function (RBF) kernel matrix.
The elements of the RBF kernel are computed as:
.. math::
K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/(2*\sigma^2)},
where :math:`\bf{r}_i` is the coordinates of point :math:`i` and :math:`\sigma`
is a constant.
**Example usage:**
>>> R = array([[0, 1],
[1, 0],
[0, 0],
[1, 1]])
>>> sigma = 1.0
>>> rbfkernel (R, sigma)
array([[1. , 0.36787944, 0.60653066, 0.60653066],
[0.36787944, 1. , 0.60653066, 0.60653066],
[0.60653066, 0.60653066, 1. , 0.36787944],
[0.60653066, 0.60653066, 0.36787944, 1. ]])
Args:
R (array): coordinates of the points.
sigma (float): a constant.
Returns:
K (array): the kernel matrix.
"""
K = np.exp(-(cdist(R, R)) ** 2 / 2 / sigma ** 2)
return K
|
def rbfkernel(R, sigma):
r"""This function generates a radial basis function (RBF) kernel matrix.
The elements of the RBF kernel are computed as:
.. math::
K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/(2*\sigma^2)},
where :math:`\bf{r}_i` are the coordinates of point :math:`i` and :math:`\sigma`
is a constant.
**Example usage:**
>>> R = array([[0, 1],
[1, 0],
[0, 0],
[1, 1]])
>>> sigma = 1.0
>>> rbfkernel (R, sigma)
array([[1. , 0.36787944, 0.60653066, 0.60653066],
[0.36787944, 1. , 0.60653066, 0.60653066],
[0.60653066, 0.60653066, 1. , 0.36787944],
[0.60653066, 0.60653066, 0.36787944, 1. ]])
Args:
R (array): coordinates of the points.
sigma (float): a constant.
Returns:
K (array): the kernel matrix.
"""
K = np.exp(-(cdist(R, R)) ** 2 / 2 / sigma ** 2)
return K
|
528 |
def recommit_migration(domain):
print("This is meant be used when a previous COMMIT failed with the error:")
print("could not set use_sql_backend for domain womenengagement (try again)")
answer = input("Type 'commit' to continue: ")
if answer != "commit":
print("Abort.")
return
import_statement = "from corehq.apps.couch_sql_migration.progress import set_couch_sql_migration_complete"
py(
"manage.py", "shell", "-c",
f'{import_statement}; set_couch_sql_migration_complete("{domain}")',
)
raise MigrationFinished
|
def recommit_migration(domain):
print("This is meant be used when a previous COMMIT failed with the error:")
print(f"could not set use_sql_backend for domain {domain} (try again)")
answer = input("Type 'commit' to continue: ")
if answer != "commit":
print("Abort.")
return
import_statement = "from corehq.apps.couch_sql_migration.progress import set_couch_sql_migration_complete"
py(
"manage.py", "shell", "-c",
f'{import_statement}; set_couch_sql_migration_complete("{domain}")',
)
raise MigrationFinished
|
11,914 |
def getmode(mode):
"""Gets a mode descriptor for the given mode."""
global _modes
if not _modes:
# initialize mode cache
modes = {}
for m, (basemode, basetype, bands, typestr) in {
# core modes
# Bits need to be extended to bytes
"1": ("L", "L", ("1",), "|b1"),
"L": ("L", "L", ("L",), "|u1"),
"I": ("L", "I", ("I",), Image._ENDIAN + "i4"),
"F": ("L", "F", ("F",), Image._ENDIAN + "f4"),
"P": ("P", "L", ("P",), "|u1"),
"RGB": ("RGB", "L", ("R", "G", "B"), "|u1"),
"RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"),
"RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"),
# UNDONE - unsigned |u1i1i1
"LAB": ("RGB", "L", ("L", "A", "B"), "|u1"),
"HSV": ("RGB", "L", ("H", "S", "V"), "|u1"),
# extra experimental modes
"RGBa": ("RGB", "L", ("R", "G", "B", "a"), None),
"BGR;15": ("RGB", "L", ("B", "G", "R"), None),
"BGR;16": ("RGB", "L", ("B", "G", "R"), None),
"BGR;24": ("RGB", "L", ("B", "G", "R"), None),
"BGR;32": ("RGB", "L", ("B", "G", "R"), None),
"LA": ("L", "L", ("L", "A"), "|u1"),
"La": ("L", "L", ("L", "a"), None),
"PA": ("RGB", "L", ("P", "A"), "|u1"),
}.items():
modes[m] = ModeDescriptor(m, bands, basemode, basetype, typestr)
# mapping modes
for i16mode, typestr in {
# I;16 == I;16L, and I;32 == I;32L
"I;16": "<u2",
"I;16S": "<i2",
"I;16L": "<u2",
"I;16LS": "<i2",
"I;16B": ">u2",
"I;16BS": ">i2",
"I;16N": Image._ENDIAN + "u2",
"I;16NS": Image._ENDIAN + "i2",
"I;32": "<u4",
"I;32B": ">u4",
"I;32L": "<u4",
"I;32S": "<i4",
"I;32BS": ">i4",
"I;32LS": "<i4",
}.items():
modes[i16mode] = ModeDescriptor(i16mode, ("I",), "L", "L", typestr)
# set global mode cache atomically
_modes = modes
return _modes[mode]
|
def getmode(mode):
"""Gets a mode descriptor for the given mode."""
global _modes
if not _modes:
# initialize mode cache
modes = {}
for m, (basemode, basetype, bands, typestr) in {
# core modes
# Bits need to be extended to bytes
"1": ("L", "L", ("1",), "|b1"),
"L": ("L", "L", ("L",), "|u1"),
"I": ("L", "I", ("I",), Image._ENDIAN + "i4"),
"F": ("L", "F", ("F",), Image._ENDIAN + "f4"),
"P": ("P", "L", ("P",), "|u1"),
"RGB": ("RGB", "L", ("R", "G", "B"), "|u1"),
"RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"),
"RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"),
# UNDONE - unsigned |u1i1i1
"LAB": ("RGB", "L", ("L", "A", "B"), "|u1"),
"HSV": ("RGB", "L", ("H", "S", "V"), "|u1"),
# extra experimental modes
"RGBa": ("RGB", "L", ("R", "G", "B", "a"), None),
"BGR;15": ("RGB", "L", ("B", "G", "R"), None),
"BGR;16": ("RGB", "L", ("B", "G", "R"), "u2"),
"BGR;24": ("RGB", "L", ("B", "G", "R"), None),
"BGR;32": ("RGB", "L", ("B", "G", "R"), None),
"LA": ("L", "L", ("L", "A"), "|u1"),
"La": ("L", "L", ("L", "a"), None),
"PA": ("RGB", "L", ("P", "A"), "|u1"),
}.items():
modes[m] = ModeDescriptor(m, bands, basemode, basetype, typestr)
# mapping modes
for i16mode, typestr in {
# I;16 == I;16L, and I;32 == I;32L
"I;16": "<u2",
"I;16S": "<i2",
"I;16L": "<u2",
"I;16LS": "<i2",
"I;16B": ">u2",
"I;16BS": ">i2",
"I;16N": Image._ENDIAN + "u2",
"I;16NS": Image._ENDIAN + "i2",
"I;32": "<u4",
"I;32B": ">u4",
"I;32L": "<u4",
"I;32S": "<i4",
"I;32BS": ">i4",
"I;32LS": "<i4",
}.items():
modes[i16mode] = ModeDescriptor(i16mode, ("I",), "L", "L", typestr)
# set global mode cache atomically
_modes = modes
return _modes[mode]
|
29,119 |
def prepare_for_next_release():
"""Asks the release co-ordinator:
1. To create a new chat group for the next release and send a message
to make the release & QA co-ordinators aware.
2. Send message to oppia-dev to inform about next release cut.
3. Send message to oppia-dev as a reminder for job submissions.
"""
common.open_new_tab_in_browser_if_possible(
release_constants.RELEASE_ROTA_URL)
common.ask_user_to_confirm(
'Create a new chat group for the next release, '
'and add the release coordinator, QA lead, Ankita '
'and Nithesh to that group. You can find the release schedule '
'and coordinators here: %s\n' % release_constants.RELEASE_ROTA_URL)
common.ask_user_to_confirm(
'Please send the following message to the newly created group:\n\n'
'Hi all, This is the group chat for the next release. '
'[Release co-ordinator\'s name] and [QA Lead\'s name] will be '
'the release co-ordinator & QA Lead for next release. '
'Please follow the release process doc: '
'[Add link to release process doc] to ensure the release '
'follows the schedule. Thanks!\n')
common.open_new_tab_in_browser_if_possible(
release_constants.OPPIA_DEV_GROUP_URL)
common.ask_user_to_confirm(
'Send the following message to oppia-dev:\n\n'
'Hi all, This is an update for the next month\'s release. '
'The next month release cut is [Add release cut date for next month]. '
'Make sure you plan your tasks accordingly. Thanks!\n'
'The subject for the message: Updates for next release\n')
common.ask_user_to_confirm(
'Send the following message to oppia-dev:\n\n'
'Hi all, This is a reminder to fill in the job requests '
'here: %s if you are planning to run your job in the next release. '
'Please fill in the requests by [Add a deadline which is at least 7 '
'days before the next release cut]. Thanks!\n'
'The subject for the message: Deadline for job requests for '
'next release\n' % release_constants.JOBS_FORM_URL)
|
def prepare_for_next_release():
"""Asks the release co-ordinator:
1. To create a new chat group for the next release and send a message
to make the release & QA co-ordinators aware.
2. Send message to oppia-dev to inform about next release cut.
3. Send message to oppia-dev as a reminder for job submissions.
"""
common.open_new_tab_in_browser_if_possible(
release_constants.RELEASE_ROTA_URL)
common.ask_user_to_confirm(
'Create a new chat group for the next release, '
'and add the release coordinator, QA lead, Ankita '
'and Nithesh to that group. You can find the release schedule '
'and coordinators here: %s\n' % release_constants.RELEASE_ROTA_URL)
common.ask_user_to_confirm(
'Please send the following message to the newly created group:\n\n'
'Hi all, This is the group chat for the next release. '
'[Release co-ordinator\'s name] and [QA Lead\'s name] will be '
'the release co-ordinator & QA Lead for next release. '
'Please follow the release process doc: '
'[Add link to release process doc] to ensure the release '
'follows the schedule. Thanks!\n')
common.open_new_tab_in_browser_if_possible(
release_constants.OPPIA_DEV_GROUP_URL)
common.ask_user_to_confirm(
'Send the following message to oppia-dev:\n\n'
'Hi all, This is an update for the next month\'s release. '
'The next month release cut is [Add release cut date for next month]. '
'Make sure you plan your tasks accordingly. Thanks!\n'
'The subject for the message: Updates for next release\n')
common.ask_user_to_confirm(
'Send the following message to oppia-dev:\n\n'
'Hi all, This is a reminder to fill in the job requests '
'here: %s if you are planning to run your job in the next release. '
'Please fill in the requests by [Add a deadline which is at least 7 '
'days before the next release cut]. Thanks!\n'
'The subject for the message: Deadline for job requests for '
'the next release\n' % release_constants.JOBS_FORM_URL)
|
57,802 |
def main():
"""
Intercept and execute commands.
"""
# IdentityNow API Base URL (https://org.api.identitynow.com)
base_url = demisto.params().get('identitynow_url')
# OAuth 2.0 Credentials
client_id = demisto.params().get('client_id')
client_secret = demisto.params().get('client_secret')
grant_type = 'client_credentials'
# Convert the argument to an int or set to MAX_INCIDENTS_TO_FETCH
max_results = int(demisto.params().get('max_fetch'))
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
# first_fetch_str = demisto.params().get('first_fetch', '3 days')
# Other configs
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
request_timeout = 10
headers = get_headers(base_url, client_id, client_secret, grant_type)
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
max_results=max_results,
request_timeout=request_timeout)
demisto.debug(f'Command being called is {demisto.command()}')
try:
results = None
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
results = test_connection(base_url, client_id, client_secret, grant_type)
elif demisto.command() == 'identitynow-search-identities':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'identities', query, offset, limit)
results = build_results('IdentityNow.Identity', 'id', response)
elif demisto.command() == 'identitynow-get-accounts':
id = demisto.args().get('id', None)
name = demisto.args().get('name', None)
native_identity = demisto.args().get('native_identity', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = get_accounts(client, id, name, native_identity, offset, limit)
results = build_results('IdentityNow.Account', 'id', response)
elif demisto.command() == 'identitynow-get-accountactivities':
id = demisto.args().get('id', None)
requested_for = demisto.args().get('requested_for', None)
requested_by = demisto.args().get('requested_by', None)
regarding_identity = demisto.args().get('regarding_identity', None)
type = demisto.args().get('type', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = get_account_activities(client, id, requested_for, requested_by, regarding_identity, type, offset,
limit)
results = build_results('IdentityNow.AccountActivity', 'id', response)
elif demisto.command() == 'identitynow-search-accessprofiles':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'accessprofiles', query, offset, limit)
results = build_results('IdentityNow.AccessProfile', 'id', response)
elif demisto.command() == 'identitynow-search-roles':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'roles', query, offset, limit)
results = build_results('IdentityNow.Role', 'id', response)
elif demisto.command() == 'identitynow-search-entitlements':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'entitlements', query, offset, limit)
results = build_results('IdentityNow.Entitlement', 'id', response)
elif demisto.command() == 'identitynow-search-events':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'events', query, offset, limit)
results = build_results('IdentityNow.Event', 'id', response)
elif demisto.command() == 'identitynow-request-grant':
requested_for = demisto.args().get('requested_for', None)
requested_item = demisto.args().get('requested_item', None)
requested_item_type = demisto.args().get('requested_item_type', None)
comment = demisto.args().get('comment', None)
results = access_request(client, "GRANT_ACCESS", requested_for, requested_item, requested_item_type,
comment)
elif demisto.command() == 'identitynow-request-revoke':
requested_for = demisto.args().get('requested_for', None)
requested_item = demisto.args().get('requested_item', None)
requested_item_type = demisto.args().get('requested_item_type', None)
comment = demisto.args().get('comment', None)
results = access_request(client, "REVOKE_ACCESS", requested_for, requested_item, requested_item_type,
comment)
return_results(results)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main():
"""
Intercept and execute commands.
"""
# IdentityNow API Base URL (https://org.api.identitynow.com)
params = demisto.params()
base_url = params.get('identitynow_url')
# OAuth 2.0 Credentials
client_id = params.get('client_id')
client_secret = params.get('client_secret')
grant_type = 'client_credentials'
# Convert the argument to an int or set to MAX_INCIDENTS_TO_FETCH
max_results = int(params.get('max_fetch'))
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
# first_fetch_str = params.get('first_fetch', '3 days')
# Other configs
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
request_timeout = 10
headers = get_headers(base_url, client_id, client_secret, grant_type)
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
max_results=max_results,
request_timeout=request_timeout)
demisto.debug(f'Command being called is {demisto.command()}')
try:
results = None
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
results = test_connection(base_url, client_id, client_secret, grant_type)
elif demisto.command() == 'identitynow-search-identities':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'identities', query, offset, limit)
results = build_results('IdentityNow.Identity', 'id', response)
elif demisto.command() == 'identitynow-get-accounts':
id = demisto.args().get('id', None)
name = demisto.args().get('name', None)
native_identity = demisto.args().get('native_identity', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = get_accounts(client, id, name, native_identity, offset, limit)
results = build_results('IdentityNow.Account', 'id', response)
elif demisto.command() == 'identitynow-get-accountactivities':
id = demisto.args().get('id', None)
requested_for = demisto.args().get('requested_for', None)
requested_by = demisto.args().get('requested_by', None)
regarding_identity = demisto.args().get('regarding_identity', None)
type = demisto.args().get('type', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = get_account_activities(client, id, requested_for, requested_by, regarding_identity, type, offset,
limit)
results = build_results('IdentityNow.AccountActivity', 'id', response)
elif demisto.command() == 'identitynow-search-accessprofiles':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'accessprofiles', query, offset, limit)
results = build_results('IdentityNow.AccessProfile', 'id', response)
elif demisto.command() == 'identitynow-search-roles':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'roles', query, offset, limit)
results = build_results('IdentityNow.Role', 'id', response)
elif demisto.command() == 'identitynow-search-entitlements':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'entitlements', query, offset, limit)
results = build_results('IdentityNow.Entitlement', 'id', response)
elif demisto.command() == 'identitynow-search-events':
query = demisto.args().get('query', None)
offset = int(demisto.args().get('offset', OFFSET_DEFAULT))
limit = int(demisto.args().get('limit', LIMIT_DEFAULT))
response = search(client, 'events', query, offset, limit)
results = build_results('IdentityNow.Event', 'id', response)
elif demisto.command() == 'identitynow-request-grant':
requested_for = demisto.args().get('requested_for', None)
requested_item = demisto.args().get('requested_item', None)
requested_item_type = demisto.args().get('requested_item_type', None)
comment = demisto.args().get('comment', None)
results = access_request(client, "GRANT_ACCESS", requested_for, requested_item, requested_item_type,
comment)
elif demisto.command() == 'identitynow-request-revoke':
requested_for = demisto.args().get('requested_for', None)
requested_item = demisto.args().get('requested_item', None)
requested_item_type = demisto.args().get('requested_item_type', None)
comment = demisto.args().get('comment', None)
results = access_request(client, "REVOKE_ACCESS", requested_for, requested_item, requested_item_type,
comment)
return_results(results)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
5,324 |
def _change_str_to_bytes(data, encoding='utf-8', encode_keys=False):
'''
info: will make string objects into byte objects
warning: this function will destroy the data object and objects that data links to
:param data: object
:param encoding: str
:param encode_keys: bool: if false key strings will not be turned into bytes
:return: new object
'''
if isinstance(data, dict):
new_dict = {}
# recursively check every item in dict
for key in data:
item = _change_str_to_bytes(data.get(key), encoding)
if encode_keys:
# keys that are strings most be made into bytes
key = _change_str_to_bytes(key, encoding)
new_dict[key] = item
data = new_dict
elif isinstance(data, list):
new_list = []
# recursively check every item in list
for item in data:
new_list.append(_change_str_to_bytes(item, encoding))
data = new_list
elif isinstance(data, tuple):
new_list = []
# recursively check every item in list
for item in data:
new_list.append(_change_str_to_bytes(item, encoding))
data = tuple(new_list)
elif isinstance(data, str):
# data is turning into bytes because if was a string
data = data.encode(encoding)
return data
|
def _change_str_to_bytes(data, encoding='utf-8', encode_keys=False):
'''
Convert strings to bytes and return bytes.
warning: this function will destroy the data object and objects that data links to
:param data: object
:param encoding: str
:param encode_keys: bool: if false key strings will not be turned into bytes
:return: new object
'''
if isinstance(data, dict):
new_dict = {}
# recursively check every item in dict
for key in data:
item = _change_str_to_bytes(data.get(key), encoding)
if encode_keys:
# keys that are strings most be made into bytes
key = _change_str_to_bytes(key, encoding)
new_dict[key] = item
data = new_dict
elif isinstance(data, list):
new_list = []
# recursively check every item in list
for item in data:
new_list.append(_change_str_to_bytes(item, encoding))
data = new_list
elif isinstance(data, tuple):
new_list = []
# recursively check every item in list
for item in data:
new_list.append(_change_str_to_bytes(item, encoding))
data = tuple(new_list)
elif isinstance(data, str):
# data is turning into bytes because if was a string
data = data.encode(encoding)
return data
|
29,709 |
def test_heapset_pickle():
"""Test pickle roundtrip for a HeapSet.
Note
----
To make this test work with plain pickle and not need cloudpickle, we had to avoid
lambdas and local classes in our test. Here we're testing that HeapSet doesn't add
lambdas etc. of its own.
"""
heap = HeapSet(key=operator.attrgetter("i"))
# Test edge case with broken weakrefs
for i in range(200):
c = C(f"y{i}", random.random())
heap.add(c)
if random.random() > 0.7:
heap.remove(c)
heap2 = pickle.loads(pickle.dumps(heap))
assert len(heap) == len(heap2)
# Test that the heap has been re-heapified upon unpickle
assert len(heap2._heap) < len(heap._heap)
while heap:
assert heap.pop() == heap2.pop()
|
def test_heapset_pickle():
"""Test pickle roundtrip for a HeapSet.
Note
----
To make this test work with plain pickle and not need cloudpickle, we had to avoid
lambdas and local classes in our test. Here we're testing that HeapSet doesn't add
lambdas etc. of its own.
"""
heap = HeapSet(key=operator.attrgetter("i"))
# The heap contains broken weakrefs
for i in range(200):
c = C(f"y{i}", random.random())
heap.add(c)
if random.random() > 0.7:
heap.remove(c)
heap2 = pickle.loads(pickle.dumps(heap))
assert len(heap) == len(heap2)
# Test that the heap has been re-heapified upon unpickle
assert len(heap2._heap) < len(heap._heap)
while heap:
assert heap.pop() == heap2.pop()
|
50,395 |
def test_best_response_2p():
test_case0 = {
'payoff_array': np.array([[4, 0], [3, 2], [0, 3]]),
'mixed_actions':
[np.array([1, 0]), np.array([0.5, 0.5]), np.array([0, 1])],
'brs_expected': [0, 1, 2]
}
test_case1 = {'payoff_array': np.zeros((2, 3)), 'mixed_actions': [np.array(
[1, 0, 0]), np.array([1 / 3, 1 / 3, 1 / 3])], 'brs_expected': [0, 0]}
for test_case in [test_case0, test_case1]:
for mixed_action, br_expected in zip(test_case['mixed_actions'],
test_case['brs_expected']):
br_computed = \
best_response_2p(test_case['payoff_array'], mixed_action)
assert_(br_computed == br_expected)
|
def test_best_response_2p():
test_case0 = {
'payoff_array': np.array([[4, 0], [3, 2], [0, 3]]),
'mixed_actions':
[np.array([1, 0]), np.array([0.5, 0.5]), np.array([0, 1])],
'brs_expected': [0, 1, 2]
}
test_case1 = {
'payoff_array': np.zeros((2, 3)),
'mixed_actions':
[np.array([1, 0, 0]), np.array([1 / 3, 1 / 3, 1 / 3])],
'brs_expected': [0, 0]
}
for test_case in [test_case0, test_case1]:
for mixed_action, br_expected in zip(test_case['mixed_actions'],
test_case['brs_expected']):
br_computed = \
best_response_2p(test_case['payoff_array'], mixed_action)
assert_(br_computed == br_expected)
|
8,058 |
def fromhdf5(source, where=None, name=None, condition=None,
condvars=None, start=None, stop=None, step=None):
"""
Provides access to an HDF5 table. E.g.::
>>> import petl as etl
>>>
>>> # set up a new hdf5 table to demonstrate with
>>> class FooBar(tables.IsDescription): # doctest: +SKIP
... foo = tables.Int32Col(pos=0) # doctest: +SKIP
... bar = tables.StringCol(6, pos=2) # doctest: +SKIP
>>> #
>>> def setup_hdfs5_table():
... import tables
... h5file = tables.open_file('example.h5', mode='w',
... title='Example file')
... h5file.create_group('/', 'testgroup', 'Test Group')
... h5table = h5file.create_table('/testgroup', 'testtable', FooBar,
... 'Test Table')
... # load some data into the table
... table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
... for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
... h5file.flush()
... h5file.close()
>>>
>>> setup_hdfs5_table() # doctest: +SKIP
>>>
>>> # now demonstrate use of fromhdf5
>>> table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable') # doctest: +SKIP
>>> table1 # doctest: +SKIP
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
>>> # alternatively just specify path to table node
... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable') # doctest: +SKIP
>>> # ...or use an existing tables.File object
... h5file = tables.open_file('example.h5') # doctest: +SKIP
>>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable') # doctest: +SKIP
>>> # ...or use an existing tables.Table object
... h5tbl = h5file.get_node('/testgroup/testtable') # doctest: +SKIP
>>> table1 = etl.fromhdf5(h5tbl) # doctest: +SKIP
>>> # use a condition to filter data
... table2 = etl.fromhdf5(h5tbl, condition='foo < 3') # doctest: +SKIP
>>> table2 # doctest: +SKIP
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
>>> h5file.close() # doctest: +SKIP
"""
return HDF5View(source, where=where, name=name,
condition=condition, condvars=condvars,
start=start, stop=stop, step=step)
|
def fromhdf5(source, where=None, name=None, condition=None,
condvars=None, start=None, stop=None, step=None):
"""
Provides access to an HDF5 table. E.g.::
>>> import petl as etl
>>>
>>> # set up a new hdf5 table to demonstrate with
>>> class FooBar(tables.IsDescription): # doctest: +SKIP
... foo = tables.Int32Col(pos=0) # doctest: +SKIP
... bar = tables.StringCol(6, pos=2) # doctest: +SKIP
>>> #
>>> def setup_hdfs5_table():
... import tables
... h5file = tables.open_file('example.h5', mode='w',
... title='Example file')
... h5file.create_group('/', 'testgroup', 'Test Group')
... h5table = h5file.create_table('/testgroup', 'testtable', FooBar,
... 'Test Table')
... # load some data into the table
... table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
... for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
... h5file.flush()
... h5file.close()
>>>
>>> setup_hdf5_table() # doctest: +SKIP
>>>
>>> # now demonstrate use of fromhdf5
>>> table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable') # doctest: +SKIP
>>> table1 # doctest: +SKIP
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
>>> # alternatively just specify path to table node
... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable') # doctest: +SKIP
>>> # ...or use an existing tables.File object
... h5file = tables.open_file('example.h5') # doctest: +SKIP
>>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable') # doctest: +SKIP
>>> # ...or use an existing tables.Table object
... h5tbl = h5file.get_node('/testgroup/testtable') # doctest: +SKIP
>>> table1 = etl.fromhdf5(h5tbl) # doctest: +SKIP
>>> # use a condition to filter data
... table2 = etl.fromhdf5(h5tbl, condition='foo < 3') # doctest: +SKIP
>>> table2 # doctest: +SKIP
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
>>> h5file.close() # doctest: +SKIP
"""
return HDF5View(source, where=where, name=name,
condition=condition, condvars=condvars,
start=start, stop=stop, step=step)
|
1,305 |
def test_bagging_classifier_voting():
# Test BaggingClassifier when base_estimator doesn't define predict_proba
A = np.random.rand(10, 4)
Y = np.random.randint(2, size=10, dtype=np.bool)
bagging_classifier = BaggingClassifier(DummyVoteClassifier())
bagging_classifier.fit(A, Y)
# All ensemble members predict True; BaggingClassifier should predict True
assert(bagging_classifier.predict(A).all())
|
def test_bagging_classifier_voting():
# Test BaggingClassifier when base_estimator doesn't define predict_proba
A = np.random.rand(10, 4)
y = np.random.randint(2, size=10, dtype=np.bool)
bagging_classifier = BaggingClassifier(DummyVoteClassifier())
bagging_classifier.fit(A, Y)
# All ensemble members predict True; BaggingClassifier should predict True
assert(bagging_classifier.predict(A).all())
|
19,059 |
def test_process(pkg, kwargs):
with tty.log.log_output(pkg.test_log_file) as logger:
with logger.force_echo():
tty.msg('Testing package {0}'
.format(pkg.test_suite.test_pkg_id(pkg.spec)))
# use debug print levels for log file to record commands
old_debug = tty.is_debug()
tty.set_debug(True)
# run test methods from the package and all virtuals it
# provides virtuals have to be deduped by name
v_names = list(set([vspec.name
for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ('gcc', 'intel', 'intel-parallel-studio', 'pgi')
if pkg.name in c_names:
v_names.extend(['c', 'cxx', 'fortran'])
if pkg.spec.satisfies('llvm+clang'):
v_names.extend(['c', 'cxx'])
test_specs = [pkg.spec] + [spack.spec.Spec(v_name)
for v_name in sorted(v_names)]
ran_actual_test_function = False
try:
with fsys.working_dir(
pkg.test_suite.test_dir_for_spec(pkg.spec)):
for spec in test_specs:
pkg.test_suite.current_test_spec = spec
# Fail gracefully if a virtual has no package/tests
try:
spec_pkg = spec.package
except spack.repo.UnknownPackageError:
continue
# copy installed test sources cache into test cache dir
if spec.concrete:
cache_source = spec_pkg.install_test_root
cache_dir = pkg.test_suite.current_test_cache_dir
if (os.path.isdir(cache_source) and
not os.path.exists(cache_dir)):
fsys.install_tree(cache_source, cache_dir)
# copy test data into test data dir
data_source = Prefix(spec_pkg.package_dir).test
data_dir = pkg.test_suite.current_test_data_dir
if (os.path.isdir(data_source) and
not os.path.exists(data_dir)):
# We assume data dir is used read-only
# maybe enforce this later
shutil.copytree(data_source, data_dir)
# grab the function for each method so we can call
# it with the package
test_fn = spec_pkg.__class__.test
if not isinstance(test_fn, types.FunctionType):
test_fn = test_fn.__func__
# Skip any test methods consisting solely of 'pass'
# since they do not contribute to package testing.
source = (inspect.getsource(test_fn)).split('\n')[1:]
lines = [ln.strip() for ln in source]
statements = [ln for ln in lines if not ln.startswith('#')]
if len(statements) > 0 and statements[0] == 'pass':
continue
# Run the tests
ran_actual_test_function = True
test_fn(pkg)
# If fail-fast was on, we error out above
# If we collect errors, raise them in batch here
if pkg.test_failures:
raise TestFailure(pkg.test_failures)
finally:
# reset debug level
tty.set_debug(old_debug)
# flag the package as having been tested (i.e., ran one or more
# non-pass-only methods
if ran_actual_test_function:
fsys.touch(pkg.tested_file)
|
def test_process(pkg, kwargs):
with tty.log.log_output(pkg.test_log_file) as logger:
with logger.force_echo():
tty.msg('Testing package {0}'
.format(pkg.test_suite.test_pkg_id(pkg.spec)))
# use debug print levels for log file to record commands
old_debug = tty.is_debug()
tty.set_debug(True)
# run test methods from the package and all virtuals it
# provides virtuals have to be deduped by name
v_names = list(set([vspec.name
for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ('gcc', 'intel', 'intel-parallel-studio', 'pgi')
if pkg.name in c_names:
v_names.extend(['c', 'cxx', 'fortran'])
if pkg.spec.satisfies('llvm+clang'):
v_names.extend(['c', 'cxx'])
test_specs = [pkg.spec] + [spack.spec.Spec(v_name)
for v_name in sorted(v_names)]
ran_actual_test_function = False
try:
with fsys.working_dir(
pkg.test_suite.test_dir_for_spec(pkg.spec)):
for spec in test_specs:
pkg.test_suite.current_test_spec = spec
# Fail gracefully if a virtual has no package/tests
try:
spec_pkg = spec.package
except spack.repo.UnknownPackageError:
continue
# copy installed test sources cache into test cache dir
if spec.concrete:
cache_source = spec_pkg.install_test_root
cache_dir = pkg.test_suite.current_test_cache_dir
if (os.path.isdir(cache_source) and
not os.path.exists(cache_dir)):
fsys.install_tree(cache_source, cache_dir)
# copy test data into test data dir
data_source = Prefix(spec_pkg.package_dir).test
data_dir = pkg.test_suite.current_test_data_dir
if (os.path.isdir(data_source) and
not os.path.exists(data_dir)):
# We assume data dir is used read-only
# maybe enforce this later
shutil.copytree(data_source, data_dir)
# grab the function for each method so we can call
# it with the package
test_fn = spec_pkg.__class__.test
if not isinstance(test_fn, types.FunctionType):
test_fn = test_fn.__func__
# Skip any test methods consisting solely of 'pass'
# since they do not contribute to package testing.
source = (inspect.getsource(test_fn)).splitlines()[1:]
lines = [ln.strip() for ln in source]
statements = [ln for ln in lines if not ln.startswith('#')]
if len(statements) > 0 and statements[0] == 'pass':
continue
# Run the tests
ran_actual_test_function = True
test_fn(pkg)
# If fail-fast was on, we error out above
# If we collect errors, raise them in batch here
if pkg.test_failures:
raise TestFailure(pkg.test_failures)
finally:
# reset debug level
tty.set_debug(old_debug)
# flag the package as having been tested (i.e., ran one or more
# non-pass-only methods
if ran_actual_test_function:
fsys.touch(pkg.tested_file)
|
44,015 |
def _reconstruct_gen(fun, spectrum, shifts=None, fun_at_zero=None):
r"""Reconstruct a univariate (real-valued) Fourier series with given spectrum.
Args:
fun (callable): Fourier series to reconstruct with signature ``float -> float``
spectrum (Sequence): Frequency spectrum of the Fourier series; non-positive
frequencies are ignored
shifts (list): Shift angles at which to evaluate ``fun`` for the reconstruction
Chosen equidistantly within the interval :math:`[0, 2\pi/f_\text{min}]` if ``shifts=None``
where :math:`f_\text{min}` is the smallest frequency in ``spectrum``.
fun_at_zero (float): Value of ``fun`` at zero. If :math:`0` is among the ``shifts``
and ``fun_at_zero`` is provided, one evaluation of ``fun`` is saved.
Returns:
callable: Reconstructed Fourier series with :math:`R` frequencies in ``spectrum``,
as ``qml.numpy`` based function and coinciding with ``fun`` on :math:`2R+1` points.
"""
# pylint: disable=unused-argument
have_fun_at_zero = fun_at_zero is not None
have_shifts = shifts is not None
# For an empty/trivial spectrum, the function simply is constant
if spectrum in ([], [0.0]):
if have_shifts:
fun_value = fun(shifts[0])
if have_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
else:
fun_value = fun_at_zero if have_fun_at_zero else fun(0.0)
def constant_fn(x):
"""Univariate reconstruction of a constant Fourier series."""
return fun_value
return constant_fn
spectrum = np.array([f for f in spectrum if f > 0.0])
f_max = max(spectrum)
# If no shifts are provided, choose equidistant ones
if not have_shifts:
R = len(spectrum)
shifts = np.arange(-R, R + 1) * 2 * np.pi / (f_max * (2 * R + 1)) * R
zero_idx = R
need_fun_at_zero = True
elif have_fun_at_zero:
zero_idx = np.where(np.isclose(shifts, 0.0))[0]
zero_idx = zero_idx[0] if len(zero_idx) > 0 else None
need_fun_at_zero = zero_idx is not None
# Take care of shifts close to zero if fun_at_zero was provided
if have_fun_at_zero and need_fun_at_zero:
# Only one shift may be zero at a time
shifts = np.concatenate([[shifts[zero_idx]], shifts[:zero_idx], shifts[zero_idx + 1 :]])
evals = np.array([fun_at_zero] + list(map(fun, shifts[1:])))
else:
if have_fun_at_zero and not need_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
evals = np.array(list(map(fun, shifts)))
L = len(shifts)
# Construct the coefficient matrix case by case
C1 = np.ones((L, 1))
C2 = np.cos(np.outer(shifts, spectrum))
C3 = np.sin(np.outer(shifts, spectrum))
C = np.hstack([C1, C2, C3])
# Solve the system of linear equations
cond = np.linalg.cond(C)
if cond > 1e8:
warnings.warn(
f"The condition number of the Fourier transform matrix is very large: {cond}.",
UserWarning,
)
W = np.linalg.solve(C, evals)
# Extract the Fourier coefficients
R = (L - 1) // 2
a0 = W[0]
a = W[1 : R + 1]
b = W[R + 1 :]
# Construct the Fourier series
def _reconstruction(x):
"""Univariate reconstruction based on arbitrary shifts."""
return a0 + np.dot(a, np.cos(spectrum * x)) + np.dot(b, np.sin(spectrum * x))
return _reconstruction
|
def _reconstruct_gen(fun, spectrum, shifts=None, fun_at_zero=None):
r"""Reconstruct a univariate (real-valued) Fourier series with given spectrum.
Args:
fun (callable): Fourier series to reconstruct with signature ``float -> float``
spectrum (Sequence): Frequency spectrum of the Fourier series; non-positive
frequencies are ignored
shifts (list): Shift angles at which to evaluate ``fun`` for the reconstruction
Chosen equidistantly within the interval :math:`[0, 2\pi/f_\text{min}]` if ``shifts=None``
where :math:`f_\text{min}` is the smallest frequency in ``spectrum``.
fun_at_zero (float): Value of ``fun`` at zero. If :math:`0` is among the ``shifts``
and ``fun_at_zero`` is provided, one evaluation of ``fun`` is saved.
Returns:
callable: Reconstructed Fourier series with :math:`R` frequencies in ``spectrum``,
as ``qml.numpy`` based function and coinciding with ``fun`` on :math:`2R+1` points.
"""
# pylint: disable=unused-argument
have_fun_at_zero = fun_at_zero is not None
have_shifts = shifts is not None
# For an empty/trivial spectrum, the function simply is constant
if qml.math.toarray(spectrum).tolist() in ([], [0.0]):
if have_shifts:
fun_value = fun(shifts[0])
if have_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
else:
fun_value = fun_at_zero if have_fun_at_zero else fun(0.0)
def constant_fn(x):
"""Univariate reconstruction of a constant Fourier series."""
return fun_value
return constant_fn
spectrum = np.array([f for f in spectrum if f > 0.0])
f_max = max(spectrum)
# If no shifts are provided, choose equidistant ones
if not have_shifts:
R = len(spectrum)
shifts = np.arange(-R, R + 1) * 2 * np.pi / (f_max * (2 * R + 1)) * R
zero_idx = R
need_fun_at_zero = True
elif have_fun_at_zero:
zero_idx = np.where(np.isclose(shifts, 0.0))[0]
zero_idx = zero_idx[0] if len(zero_idx) > 0 else None
need_fun_at_zero = zero_idx is not None
# Take care of shifts close to zero if fun_at_zero was provided
if have_fun_at_zero and need_fun_at_zero:
# Only one shift may be zero at a time
shifts = np.concatenate([[shifts[zero_idx]], shifts[:zero_idx], shifts[zero_idx + 1 :]])
evals = np.array([fun_at_zero] + list(map(fun, shifts[1:])))
else:
if have_fun_at_zero and not need_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
evals = np.array(list(map(fun, shifts)))
L = len(shifts)
# Construct the coefficient matrix case by case
C1 = np.ones((L, 1))
C2 = np.cos(np.outer(shifts, spectrum))
C3 = np.sin(np.outer(shifts, spectrum))
C = np.hstack([C1, C2, C3])
# Solve the system of linear equations
cond = np.linalg.cond(C)
if cond > 1e8:
warnings.warn(
f"The condition number of the Fourier transform matrix is very large: {cond}.",
UserWarning,
)
W = np.linalg.solve(C, evals)
# Extract the Fourier coefficients
R = (L - 1) // 2
a0 = W[0]
a = W[1 : R + 1]
b = W[R + 1 :]
# Construct the Fourier series
def _reconstruction(x):
"""Univariate reconstruction based on arbitrary shifts."""
return a0 + np.dot(a, np.cos(spectrum * x)) + np.dot(b, np.sin(spectrum * x))
return _reconstruction
|
43,306 |
def test_graph_constructor_extra_nodes_in_edges():
nodes = pd.DataFrame(np.ones((5, 1)), index=[0, 1, 2, 3, 4])
edges = {
"a": pd.DataFrame({"source": [1], "target": [0]}, index=[0]),
"b": pd.DataFrame({"source": [4, 5], "target": [0, 2]}, index=[1, 2]),
}
with pytest.raises(ValueError):
g = StellarGraph(nodes=nodes, edges=edges)
# adding an extra node should fix things
nodes = pd.DataFrame(np.ones((6, 1)), index=[0, 1, 2, 3, 4, 5])
g = StellarGraph(nodes=nodes, edges=edges)
# removing the bad edge should also fix
nodes = pd.DataFrame(np.ones((5, 1)), index=[0, 1, 2, 3, 4])
edges = {
"a": pd.DataFrame({"source": [1], "target": [0]}, index=[0]),
"b": pd.DataFrame({"source": [4], "target": [0]}, index=[1]),
}
|
def test_graph_constructor_extra_nodes_in_edges():
nodes = pd.DataFrame(np.ones((5, 1)), index=[0, 1, 2, 3, 4])
edges = {
"a": pd.DataFrame({"source": [1], "target": [0]}, index=[0]),
"b": pd.DataFrame({"source": [4, 5], "target": [0, 2]}, index=[1, 2]),
}
with pytest.raises(ValueError):
g = StellarGraph(nodes, edges)
# adding an extra node should fix things
nodes = pd.DataFrame(np.ones((6, 1)), index=[0, 1, 2, 3, 4, 5])
g = StellarGraph(nodes=nodes, edges=edges)
# removing the bad edge should also fix
nodes = pd.DataFrame(np.ones((5, 1)), index=[0, 1, 2, 3, 4])
edges = {
"a": pd.DataFrame({"source": [1], "target": [0]}, index=[0]),
"b": pd.DataFrame({"source": [4], "target": [0]}, index=[1]),
}
|
34,538 |
def _create_from_endpoint_config(
endpoint_config: Optional[EndpointConfig],
) -> "rasa.shared.nlu.interpreter.NaturalLanguageInterpreter":
"""Instantiate a natural language interpreter based on its configuration."""
if endpoint_config is None:
return rasa.shared.nlu.interpreter.RegexInterpreter()
elif endpoint_config.type is None or endpoint_config.type.lower() == "http":
return RasaNLUHttpInterpreter(endpoint_config=endpoint_config)
else:
return _load_from_module_name_in_endpoint_config(endpoint_config)
|
def _create_from_endpoint_config(
endpoint_config: Optional[EndpointConfig],
) -> rasa.shared.nlu.interpreter.NaturalLanguageInterpreter:
"""Instantiate a natural language interpreter based on its configuration."""
if endpoint_config is None:
return rasa.shared.nlu.interpreter.RegexInterpreter()
elif endpoint_config.type is None or endpoint_config.type.lower() == "http":
return RasaNLUHttpInterpreter(endpoint_config=endpoint_config)
else:
return _load_from_module_name_in_endpoint_config(endpoint_config)
|
20,836 |
def get_configuration_string(manager_params, browser_params, versions):
"""Construct a well-formatted string for {manager,browser}params
Constructs a pretty printed string of all parameters. The config
dictionaries are split to try to avoid line wrapping for reasonably
size terminal windows.
"""
config_str = "\n\nOpenWPM Version: %s\nFirefox Version: %s\n" % versions
config_str += "\n========== Manager Configuration ==========\n"
config_str += json.dumps(manager_params, sort_keys=True,
indent=2, separators=(',', ': '))
config_str += "\n\n========== Browser Configuration ==========\n"
print_params = [deepcopy(x) for x in browser_params]
table_input = list()
profile_dirs = OrderedDict()
archive_dirs = OrderedDict()
js_config = OrderedDict()
profile_all_none = archive_all_none = True
for item in print_params:
browser_id = item['browser_id']
# Update print flags
if item['profile_tar'] is not None:
profile_all_none = False
if item['profile_archive_dir'] is not None:
archive_all_none = False
# Separate out long configuration parameters
profile_dirs[browser_id] = item.pop('profile_tar')
archive_dirs[browser_id] = item.pop('profile_archive_dir')
js_config[browser_id] = item.pop('js_instrument_settings')
# Copy items in sorted order
dct = OrderedDict()
dct[u'browser_id'] = browser_id
for key in sorted(item.keys()):
dct[key] = item[key]
table_input.append(dct)
key_dict = OrderedDict()
counter = 0
for key in table_input[0].keys():
key_dict[key] = counter
counter += 1
config_str += "Keys:\n"
config_str += json.dumps(key_dict, indent=2,
separators=(',', ': '))
config_str += '\n\n'
config_str += tabulate(table_input, headers=key_dict)
config_str += "\n\n========== JS Instrument Settings ==========\n"
config_str += json.dumps(js_config, indent=2, separators=(',', ': '))
config_str += "\n\n========== Input profile tar files ==========\n"
if profile_all_none:
config_str += " No profile tar files specified"
else:
config_str += json.dumps(profile_dirs, indent=2,
separators=(',', ': '))
config_str += "\n\n========== Output (archive) profile dirs ==========\n"
if archive_all_none:
config_str += " No profile archive directories specified"
else:
config_str += json.dumps(archive_dirs, indent=2,
separators=(',', ': '))
config_str += '\n\n'
return config_str
|
def get_configuration_string(manager_params, browser_params, versions):
"""Construct a well-formatted string for {manager,browser}params
Constructs a pretty printed string of all parameters. The config
dictionaries are split to try to avoid line wrapping for reasonably
size terminal windows.
"""
config_str = "\n\nOpenWPM Version: %s\nFirefox Version: %s\n" % versions
config_str += "\n========== Manager Configuration ==========\n"
config_str += json.dumps(manager_params, sort_keys=True,
indent=2, separators=(',', ': '))
config_str += "\n\n========== Browser Configuration ==========\n"
print_params = [deepcopy(x) for x in browser_params]
table_input = list()
profile_dirs = OrderedDict()
archive_dirs = OrderedDict()
js_config = OrderedDict()
profile_all_none = archive_all_none = True
for item in print_params:
browser_id = item['browser_id']
# Update print flags
if item['profile_tar'] is not None:
profile_all_none = False
if item['profile_archive_dir'] is not None:
archive_all_none = False
# Separate out long configuration parameters
profile_dirs[browser_id] = item.pop('profile_tar')
archive_dirs[browser_id] = item.pop('profile_archive_dir')
js_config[browser_id] = json.loads(item.pop('js_instrument_settings'))
# Copy items in sorted order
dct = OrderedDict()
dct[u'browser_id'] = browser_id
for key in sorted(item.keys()):
dct[key] = item[key]
table_input.append(dct)
key_dict = OrderedDict()
counter = 0
for key in table_input[0].keys():
key_dict[key] = counter
counter += 1
config_str += "Keys:\n"
config_str += json.dumps(key_dict, indent=2,
separators=(',', ': '))
config_str += '\n\n'
config_str += tabulate(table_input, headers=key_dict)
config_str += "\n\n========== JS Instrument Settings ==========\n"
config_str += json.dumps(js_config, indent=2, separators=(',', ': '))
config_str += "\n\n========== Input profile tar files ==========\n"
if profile_all_none:
config_str += " No profile tar files specified"
else:
config_str += json.dumps(profile_dirs, indent=2,
separators=(',', ': '))
config_str += "\n\n========== Output (archive) profile dirs ==========\n"
if archive_all_none:
config_str += " No profile archive directories specified"
else:
config_str += json.dumps(archive_dirs, indent=2,
separators=(',', ': '))
config_str += '\n\n'
return config_str
|
27,327 |
def _build_stub(method_name, method, attribute_name):
"""
Build a stub for a transplanted method.
A transplanted stub is a dummy method that get attached to a core class
(usually from :mod:`MDAnalysis.core.groups`) and raise a
:exc:`NoDataError`.
The stub mimics the original method for everything that as traits with the
documentation (docstring, name, signature). It gets overwritten by the
actual method when the later is transplanted at universe creation.
Parameters
----------
method_name: str
The name of the attribute in the destination class.
method: Callable
The method to be mimicked.
attribute_name: str
The name topology attribute that is required for the method to be
relevant (e.g. masses, charges, ...)
Returns
-------
The stub.
"""
def stub_method(self, *args, **kwargs):
message = (
f'{self.__class__.__name__}.{method_name}() '
f'not available; this requires {attribute_name}'
)
raise NoDataError(message)
annotation = textwrap.dedent("""\
.. note::
This requires the underlying topology to have {}. Otherwise, a
:exc:`~MDAnalysis.exceptions.NoDataError` is raised.
""".format(attribute_name))
# The first line of the original docstring is not indented, but the
# subsequent lines are. We want to dedent the whole docstring.
first_line, other_lines = method.__doc__.split('\n', 1)
stub_method.__doc__ = (
first_line + '\n'
+ textwrap.dedent(other_lines)
+ '\n\n' + annotation
)
stub_method.__name__ = method_name
stub_method.__signature__ = inspect_signature(method)
return stub_method
|
def _build_stub(method_name, method, attribute_name):
"""
Build a stub for a transplanted method.
A transplanted stub is a dummy method that get attached to a core class
(usually from :mod:`MDAnalysis.core.groups`) and raise a
:exc:`NoDataError`.
The stub mimics the original method for everything that as traits with the
documentation (docstring, name, signature). It gets overwritten by the
actual method when the latter is transplanted at universe creation.
Parameters
----------
method_name: str
The name of the attribute in the destination class.
method: Callable
The method to be mimicked.
attribute_name: str
The name topology attribute that is required for the method to be
relevant (e.g. masses, charges, ...)
Returns
-------
The stub.
"""
def stub_method(self, *args, **kwargs):
message = (
f'{self.__class__.__name__}.{method_name}() '
f'not available; this requires {attribute_name}'
)
raise NoDataError(message)
annotation = textwrap.dedent("""\
.. note::
This requires the underlying topology to have {}. Otherwise, a
:exc:`~MDAnalysis.exceptions.NoDataError` is raised.
""".format(attribute_name))
# The first line of the original docstring is not indented, but the
# subsequent lines are. We want to dedent the whole docstring.
first_line, other_lines = method.__doc__.split('\n', 1)
stub_method.__doc__ = (
first_line + '\n'
+ textwrap.dedent(other_lines)
+ '\n\n' + annotation
)
stub_method.__name__ = method_name
stub_method.__signature__ = inspect_signature(method)
return stub_method
|
57,797 |
def download_attachment_by_filename_command(args: Dict[str, Any]) -> dict:
case_id = args.get("case_id", None)
file_name = args.get("file_name", None)
if case_id is None:
raise ValueError("case id not specified")
if not file_name:
raise ValueError("file name not given")
attachment_id = ""
case_attachments = list_case_attachments(caseID=case_id).get("data", [])
for attachment in case_attachments:
if file_name in attachment.get("name", ""):
attachment_id = attachment.get("id", "")
file_name = attachment.get("name", "")
break
if not attachment_id:
raise ValueError("file name not found in case")
result = download_attachment(caseID=case_id, attachmentID=attachment_id)
return fileResult(file_name, result.content)
|
def download_attachment_by_filename_command(args: Dict[str, Any]) -> dict:
case_id = args.get("case_id")
file_name = args.get("file_name")
if case_id is None:
raise ValueError("case id not specified")
if not file_name:
raise ValueError("file name not given")
attachment_id = ""
case_attachments = list_case_attachments(caseID=case_id).get("data", [])
for attachment in case_attachments:
if file_name in attachment.get("name", ""):
attachment_id = attachment.get("id", "")
file_name = attachment.get("name", "")
break
if not attachment_id:
raise ValueError("file name not found in case")
result = download_attachment(caseID=case_id, attachmentID=attachment_id)
return fileResult(file_name, result.content)
|
38,528 |
def test_incompressible_flow_model_no_modification():
"""Test that the raw contact mechanics model with no modifications can be run with
no error messages. Failure of this test would signify rather fundamental problems
in the model.
"""
model = pp.IncompressibleFlow({"use_ad": True})
pp.run_stationary_model(model, {})
|
def test_incompressible_flow_model_no_modification():
"""Test that the raw incompressible flow model with no modifications can be run with
no error messages. Failure of this test would signify rather fundamental problems
in the model.
"""
model = pp.IncompressibleFlow({"use_ad": True})
pp.run_stationary_model(model, {})
|
32,038 |
def get_list_contact_count_by_id(args: dict, sg):
listID = args.get('list_id')
response = sg.client.marketing.lists._(listID).contacts.count.get()
if response.status_code == 200:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.ListCount': body}
md = tableToMarkdown('List contact count details ', body)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'Retrieving list contact count details has been failed: ' + str(response.body)
|
def get_list_contact_count_by_id(args: dict, sg):
listID = args.get('list_id')
response = sg.client.marketing.lists._(listID).contacts.count.get()
if response.status_code == 200:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.ListCount': body}
md = tableToMarkdown('List contact count details ', body)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'Failed to retrieve contact list count information: ' + str(response.body)
|
31,223 |
def gh_search_message_command(client: Client, args: Dict[str, Any]) -> CommandResults:
fields = argToList(args.get('fields'))
limit = args.get('limit')
sort = args.get('sort')
sortDir = args.get('sortDir')
offset = args.get('offset')
filters = json.loads(args.get('filters', "[]"))
args = {"filters": filters}
if fields is not None and len(fields) > 0:
if "eventId" not in fields:
fields.append("eventId")
args['fields'] = fields
if limit is not None:
args['limit'] = limit
if offset is not None:
args['offset'] = offset
if sort is not None:
args['sort'] = sort
if sortDir is not None:
args['sortDir'] = sortDir
results = client.search_events(args)
events = []
if fields is None or len(fields) == 0:
for event in results.get("results", []):
e = {
"ID": event.get("eventId"),
"From Address": event.get("source"),
"Mailbox": event.get("origin"),
"Return Path": event.get("sourcePath"),
"Subject": event.get("subject"),
"Occurred": event.get("timestamp")
}
policy_names = []
policy_actions = []
if event.get("flag") is not None:
for policy_id in event.get("flag", []):
policy = client.get_policy(policy_id).get("policy", {})
actions = []
for action in policy.get("actions"):
actions.append(action.get("type"))
policy_names.append(policy.get("name"))
policy_actions.extend(actions)
e['Policy Hits'] = policy_names
e['Policy Actions'] = policy_actions
if len(event.get('files', [])) > 0:
e['Has Attachments'] = True
else:
e['Has Attachments'] = False
if len(event.get('links', [])) > 0:
e['Has Links'] = True
else:
e['Has Links'] = False
events.append(e)
events_md = tableToMarkdown("Events", events, ["ID", "From Address", "Mailbox", "Return Path",
"Subject", "Policy Hits", "Policy Actions",
"Occurred", "Has Attachments", "Has Links"])
else:
events_md = tableToMarkdown("Events", results.get("results", []), fields)
result = {'Message': results.get("results"), 'SearchCount': results.get("total")}
return CommandResults(
readable_output=events_md,
outputs_prefix='GreatHorn',
outputs_key_field='eventId',
outputs=result
)
|
def gh_search_message_command(client: Client, args: Dict[str, Any]) -> CommandResults:
fields = argToList(args.get('fields'))
limit = args.get('limit')
sort = args.get('sort')
sortDir = args.get('sortDir')
offset = args.get('offset')
filters = json.loads(args.get('filters', "[]"))
args = {"filters": filters}
if fields is not None and len(fields) > 0:
if "eventId" not in fields:
fields.append("eventId")
args['fields'] = fields
if limit is not None:
args['limit'] = limit
if offset is not None:
args['offset'] = offset
if sort is not None:
args['sort'] = sort
if sortDir is not None:
args['sortDir'] = sortDir
results = client.search_events(args)
events = []
if fields is None or len(fields) == 0:
for event in results.get("results", []):
e = {
"ID": event.get("eventId"),
"From Address": event.get("source"),
"Mailbox": event.get("origin"),
"Return Path": event.get("sourcePath"),
"Subject": event.get("subject"),
"Occurred": event.get("timestamp")
}
policy_names = []
policy_actions = []
if event.get("flag") is not None:
for policy_id in event.get("flag", []):
policy = client.get_policy(policy_id).get("policy", {})
actions = []
for action in policy.get("actions"):
actions.append(action.get("type"))
policy_names.append(policy.get("name"))
policy_actions.extend(actions)
e['Policy Hits'] = policy_names
e['Policy Actions'] = policy_actions
if len(event.get('files', [])) > 0:
e['Has Attachments'] = True
else:
e['Has Attachments'] = False
if len(event.get('links', [])) > 0:
e['Has Links'] = True
else:
e['Has Links'] = False
events.append(e)
events_md = tableToMarkdown("Events", events, ["ID", "From Address", "Mailbox", "Return Path",
"Subject", "Policy Hits", "Policy Actions",
"Occurred", "Has Attachments", "Has Links"])
else:
events_md = tableToMarkdown("Events", results.get("results", []), fields)
result = {'Message': results.get("results"), 'SearchCount': results.get("total")}
return CommandResults(
readable_output=events_md,
outputs_prefix='GreatHorn.Message',
outputs_key_field='eventId',
outputs=result
)
|
1,263 |
def _signature_matches_extension(filename):
"""Check if signature aka magic number matches filename extension.
Parameters
----------
filename : str or os.PathLike
Path to the file to check
Returns
-------
matches : bool
- `True` if the filename extension is not recognized (not .gz nor .bz2)
- `True` if the magic number was successfully read and corresponds to
the format indicated by the extension.
- `False` otherwise.
error_message : str
An error message if opening the file failed or a mismatch is detected;
the empty string otherwise.
"""
signatures = {
".gz": {"signature": b"\x1f\x8b", "format_name": "gzip"},
".bz2": {"signature": b"\x42\x5a\x68", "format_name": "bzip2"}
}
filename = _stringify_path(filename)
*_, ext = splitext_addext(filename)
ext = ext.lower()
if ext not in signatures:
return True, ""
expected_signature = signatures[ext]["signature"]
try:
with open(filename, "rb") as fh:
found_signature = fh.read(len(expected_signature))
except Exception:
return False, f"Could not read file: {filename}"
if found_signature == expected_signature:
return True, ""
format_name = signatures[ext]["format_name"]
return False, f"File {filename} is not a {format_name} file"
|
def _signature_matches_extension(filename):
"""Check if signature aka magic number matches filename extension.
Parameters
----------
filename : str or os.PathLike
Path to the file to check
Returns
-------
matches : bool
- `True` if the filename extension is not recognized (not .gz nor .bz2)
- `True` if the magic number was successfully read and corresponds to
the format indicated by the extension.
- `False` otherwise.
error_message : str
An error message if opening the file failed or a mismatch is detected;
the empty string otherwise.
"""
signatures = {
".gz": {"signature": b"\x1f\x8b", "format_name": "gzip"},
".bz2": {"signature": b"\x42\x5a\x68", "format_name": "bzip2"}
}
filename = _stringify_path(filename)
*_, ext = splitext_addext(filename)
ext = ext.lower()
if ext not in signatures:
return True, ""
expected_signature = signatures[ext]["signature"]
try:
with open(filename, "rb") as fh:
found_signature = fh.read(len(expected_signature))
except OSError:
return False, f"Could not read file: {filename}"
if found_signature == expected_signature:
return True, ""
format_name = signatures[ext]["format_name"]
return False, f"File {filename} is not a {format_name} file"
|
39,399 |
def test_copy_vtk_array():
with pytest.raises(TypeError, match='Invalid type'):
pyvista.utilities.misc.copy_vtk_array([1, 2, 3])
value = 10
arr = vtk.vtkFloatArray()
arr.SetNumberOfValues(2)
arr.SetValue(0, value)
arr_copy = pyvista.utilities.misc.copy_vtk_array(arr, deep=True)
arr_copy.GetValue(0)
assert value == arr_copy.GetValue(0)
arr_copy_not_deep = pyvista.utilities.misc.copy_vtk_array(arr, deep=False)
new_value = 5
arr.SetValue(1, new_value)
arr_copy.GetValue(1)
assert new_value == arr_copy_not_deep.GetValue(1)
|
def test_copy_vtk_array():
with pytest.raises(TypeError, match='Invalid type'):
pyvista.utilities.misc.copy_vtk_array([1, 2, 3])
value = 10
arr = vtk.vtkFloatArray()
arr.SetNumberOfValues(2)
arr.SetValue(0, value)
arr_copy = pyvista.utilities.misc.copy_vtk_array(arr, deep=True)
assert arr_copy.GetNumberOfValues()
assert value == arr_copy.GetValue(0)
arr_copy_not_deep = pyvista.utilities.misc.copy_vtk_array(arr, deep=False)
new_value = 5
arr.SetValue(1, new_value)
arr_copy.GetValue(1)
assert new_value == arr_copy_not_deep.GetValue(1)
|
26,922 |
def chain(*tasks: Union[BaseOperator, "XComArg", Sequence[BaseOperator], Sequence["XComArg"]]):
r"""
Given a number of tasks, builds a dependency chain.
Support mix airflow.models.BaseOperator, List[airflow.models.BaseOperator], XComArg, and
List[airflow.models.XComArg]. If you want to chain between two List[airflow.models.BaseOperator]
or List[airflow.models.XComArg], you have to make sure they have same length.
.. code-block:: python
chain(t1, [t2, t3], [t4, t5], t6)
is equivalent to::
/ -> t2 -> t4 \
t1 -> t6
\ -> t3 -> t5 /
.. code-block:: python
t1.set_downstream(t2)
t1.set_downstream(t3)
t2.set_downstream(t4)
t3.set_downstream(t5)
t4.set_downstream(t6)
t5.set_downstream(t6)
:param tasks: List of tasks, List[airflow.models.BaseOperator], XComArg, or List[airflow.models.XComArg]
to set dependencies
:type tasks: List[airflow.models.BaseOperator], airflow.models.BaseOperator, List[airflow.models.XComArg],
or XComArg
"""
from airflow.models.xcom_arg import XComArg
for index, up_task in enumerate(tasks[:-1]):
down_task = tasks[index + 1]
if isinstance(up_task, (BaseOperator, XComArg)):
up_task.set_downstream(down_task)
continue
if isinstance(down_task, (BaseOperator, XComArg)):
down_task.set_upstream(up_task)
continue
if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence):
raise TypeError(
"Chain not supported between instances of {up_type} and {down_type}".format(
up_type=type(up_task), down_type=type(down_task)
)
)
up_task_list = up_task
down_task_list = down_task
if len(up_task_list) != len(down_task_list):
raise AirflowException(
f"Chain not supported different length Iterable "
f"but get {len(up_task_list)} and {len(down_task_list)}"
)
for up_t, down_t in zip(up_task_list, down_task_list):
up_t.set_downstream(down_t)
|
def chain(*tasks: Union[BaseOperator, "XComArg", Sequence[BaseOperator], Sequence["XComArg"]]):
r"""
Given a number of tasks, builds a dependency chain.
Support mix airflow.models.BaseOperator, List[airflow.models.BaseOperator], XComArg, and
List[airflow.models.XComArg]. If you want to chain between two List[airflow.models.BaseOperator]
or List[airflow.models.XComArg], you have to make sure they have same length.
.. code-block:: python
chain(t1, [t2, t3], [t4, t5], t6)
is equivalent to::
/ -> t2 -> t4 \
t1 -> t6
\ -> t3 -> t5 /
.. code-block:: python
t1.set_downstream(t2)
t1.set_downstream(t3)
t2.set_downstream(t4)
t3.set_downstream(t5)
t4.set_downstream(t6)
t5.set_downstream(t6)
:param tasks: List of tasks, List[airflow.models.BaseOperator], XComArg, or List[airflow.models.XComArg]
to set dependencies
:type tasks: List[airflow.models.BaseOperator], airflow.models.BaseOperator, List[airflow.models.XComArg],
or XComArg
"""
from airflow.models.xcom_arg import XComArg
for index, up_task in enumerate(tasks[:-1]):
down_task = tasks[index + 1]
if isinstance(up_task, (BaseOperator, XComArg)):
up_task.set_downstream(down_task)
continue
if isinstance(down_task, (BaseOperator, XComArg)):
down_task.set_upstream(up_task)
continue
if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence):
raise TypeError(
'Chain not supported between instances of {up_type} and {down_type}'.format(
up_type=type(up_task), down_type=type(down_task)
)
)
up_task_list = up_task
down_task_list = down_task
if len(up_task_list) != len(down_task_list):
raise AirflowException(
f"Chain not supported different length Iterable "
f"but get {len(up_task_list)} and {len(down_task_list)}"
)
for up_t, down_t in zip(up_task_list, down_task_list):
up_t.set_downstream(down_t)
|
736 |
def open_in_browser(
response: Union["scrapy.http.response.html.HtmlResponse", "scrapy.http.response.text.TextResponse"],
_openfunc: Callable[[str], Any] = webbrowser.open,
) -> Any:
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = f'\\1<base href="{response.url}">'
body = re.sub(b"(<head.*?>)", to_bytes(repl), body)
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: "
f"{response.__class__.__name__}")
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc(f"file://{fname}")
|
def open_in_browser(
response: Union["scrapy.http.response.html.HtmlResponse", "scrapy.http.response.text.TextResponse"],
_openfunc: Callable[[str], Any] = webbrowser.open,
) -> Any:
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = f'\\1<base href="{response.url}">'
body = re.sub(b"(<head(?:>|\s.*?>))", to_bytes(repl), body)
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: "
f"{response.__class__.__name__}")
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc(f"file://{fname}")
|
43,300 |
def test_mutag_load() -> None:
graphs, labels = MUTAG().load()
n_graphs = 188
assert len(graphs) == n_graphs
assert len(labels) == n_graphs # one label per graph
# get a list with the number of nodes in each graph
n_nodes = [g.number_of_nodes() for g in graphs]
# calculate average and max number of nodes across all graphs
n_avg_nodes = np.mean(n_nodes)
max_nodes = np.max(n_nodes)
# average number of nodes should be 17.93085... or approximately 18.
assert round(n_avg_nodes) == 18
# maximum number of nodes should be 28
assert max_nodes == 28
# There are two labels -1 and 1
assert len(np.unique(labels)) == 2
|
def test_mutag_load() -> None:
graphs, labels = MUTAG().load()
n_graphs = 188
assert len(graphs) == n_graphs
assert len(labels) == n_graphs # one label per graph
# get a list with the number of nodes in each graph
n_nodes = [g.number_of_nodes() for g in graphs]
# calculate average and max number of nodes across all graphs
n_avg_nodes = np.mean(n_nodes)
max_nodes = np.max(n_nodes)
# average number of nodes should be 17.93085... or approximately 18.
assert sum(n_nodes) == 3371
# maximum number of nodes should be 28
assert max_nodes == 28
# There are two labels -1 and 1
assert len(np.unique(labels)) == 2
|
7,863 |
def test_reduce(gnd_simple_chain):
ref_U5 = gnd_simple_chain["U235"]
ref_iodine = gnd_simple_chain["I135"]
ref_U5_yields = ref_U5.yield_data
no_depth = gnd_simple_chain.reduce(["U235", "I135"], 0)
# We should get a chain just containing U235 and I135
assert len(no_depth) == 2
assert set(no_depth.reactions) == set(gnd_simple_chain.reactions)
u5_round0 = no_depth["U235"]
assert u5_round0.n_decay_modes == ref_U5.n_decay_modes
for newmode, refmode in zip(u5_round0.decay_modes, ref_U5.decay_modes):
assert newmode.target is None
assert newmode.type == refmode.type, newmode
assert newmode.branching_ratio == refmode.branching_ratio, newmode
assert u5_round0.n_reaction_paths == ref_U5.n_reaction_paths
for newrxn, refrxn in zip(u5_round0.reactions, ref_U5.reactions):
assert newrxn.target is None, newrxn
assert newrxn.type == refrxn.type, newrxn
assert newrxn.Q == refrxn.Q, newrxn
assert newrxn.branching_ratio == refrxn.branching_ratio, newrxn
assert u5_round0.yield_data is not None
assert u5_round0.yield_data.products == ("I135", )
assert u5_round0.yield_data.yield_matrix == (
ref_U5_yields.yield_matrix[:, ref_U5_yields.products.index("I135")]
)
bareI5 = no_depth["I135"]
assert bareI5.n_decay_modes == ref_iodine.n_decay_modes
for newmode, refmode in zip(bareI5.decay_modes, ref_iodine.decay_modes):
assert newmode.target is None
assert newmode.type == refmode.type, newmode
assert newmode.branching_ratio == refmode.branching_ratio, newmode
assert bareI5.n_reaction_paths == ref_iodine.n_reaction_paths
for newrxn, refrxn in zip(bareI5.reactions, ref_iodine.reactions):
assert newrxn.target is None, newrxn
assert newrxn.type == refrxn.type, newrxn
assert newrxn.Q == refrxn.Q, newrxn
assert newrxn.branching_ratio == refrxn.branching_ratio, newrxn
follow_u5 = gnd_simple_chain.reduce(["U235"], 1)
u5_round1 = follow_u5["U235"]
assert u5_round1.decay_modes == ref_U5.decay_modes
assert u5_round1.reactions == ref_U5.reactions
assert u5_round1.yield_data is not None
assert (
u5_round1.yield_data.yield_matrix == ref_U5_yields.yield_matrix
).all()
# Per the chain_simple.xml
# I135 -> Xe135 -> Cs135
# I135 -> Xe136
# No limit on depth
iodine_chain = gnd_simple_chain.reduce(["I135"])
truncated_iodine = gnd_simple_chain.reduce(["I135"], 1)
assert len(iodine_chain) == 4
assert len(truncated_iodine) == 3
assert set(iodine_chain.nuclide_dict) == {
"I135", "Xe135", "Xe136", "Cs135"}
assert set(truncated_iodine.nuclide_dict) == {"I135", "Xe135", "Xe136"}
assert iodine_chain.reactions == ["(n,gamma)"]
assert iodine_chain["I135"].decay_modes == ref_iodine.decay_modes
assert iodine_chain["I135"].reactions == ref_iodine.reactions
for mode in truncated_iodine["Xe135"].decay_modes:
assert mode.target is None
# Test that no FissionYieldDistribution is made if there are no
# fission products
u5_noyields = gnd_simple_chain.reduce(["U235"], 0)["U235"]
assert u5_noyields.yield_data is None
# Check early termination if the eventual full chain
# is specified by using the iodine isotopes
new_iodine = gnd_simple_chain.reduce(set(iodine_chain.nuclide_dict))
assert set(iodine_chain.nuclide_dict) == set(new_iodine.nuclide_dict)
# Failure if some requested isotopes not in chain
with pytest.raises(IndexError, match=".*not found.*Xx999"):
gnd_simple_chain.reduce(["U235", "Xx999"])
|
def test_reduce(gnd_simple_chain):
ref_U5 = gnd_simple_chain["U235"]
ref_iodine = gnd_simple_chain["I135"]
ref_U5_yields = ref_U5.yield_data
no_depth = gnd_simple_chain.reduce(["U235", "I135"], 0)
# We should get a chain just containing U235 and I135
assert len(no_depth) == 2
assert set(no_depth.reactions) == set(gnd_simple_chain.reactions)
u5_round0 = no_depth["U235"]
assert u5_round0.n_decay_modes == ref_U5.n_decay_modes
for newmode, refmode in zip(u5_round0.decay_modes, ref_U5.decay_modes):
assert newmode.target is None
assert newmode.type == refmode.type, newmode
assert newmode.branching_ratio == refmode.branching_ratio, newmode
assert u5_round0.n_reaction_paths == ref_U5.n_reaction_paths
for newrxn, refrxn in zip(u5_round0.reactions, ref_U5.reactions):
assert newrxn.target is None, newrxn
assert newrxn.type == refrxn.type, newrxn
assert newrxn.Q == refrxn.Q, newrxn
assert newrxn.branching_ratio == refrxn.branching_ratio, newrxn
assert u5_round0.yield_data is not None
assert u5_round0.yield_data.products == ("I135",)
assert u5_round0.yield_data.yield_matrix == (
ref_U5_yields.yield_matrix[:, ref_U5_yields.products.index("I135")]
)
bareI5 = no_depth["I135"]
assert bareI5.n_decay_modes == ref_iodine.n_decay_modes
for newmode, refmode in zip(bareI5.decay_modes, ref_iodine.decay_modes):
assert newmode.target is None
assert newmode.type == refmode.type, newmode
assert newmode.branching_ratio == refmode.branching_ratio, newmode
assert bareI5.n_reaction_paths == ref_iodine.n_reaction_paths
for newrxn, refrxn in zip(bareI5.reactions, ref_iodine.reactions):
assert newrxn.target is None, newrxn
assert newrxn.type == refrxn.type, newrxn
assert newrxn.Q == refrxn.Q, newrxn
assert newrxn.branching_ratio == refrxn.branching_ratio, newrxn
follow_u5 = gnd_simple_chain.reduce(["U235"], 1)
u5_round1 = follow_u5["U235"]
assert u5_round1.decay_modes == ref_U5.decay_modes
assert u5_round1.reactions == ref_U5.reactions
assert u5_round1.yield_data is not None
assert (
u5_round1.yield_data.yield_matrix == ref_U5_yields.yield_matrix
).all()
# Per the chain_simple.xml
# I135 -> Xe135 -> Cs135
# I135 -> Xe136
# No limit on depth
iodine_chain = gnd_simple_chain.reduce(["I135"])
truncated_iodine = gnd_simple_chain.reduce(["I135"], 1)
assert len(iodine_chain) == 4
assert len(truncated_iodine) == 3
assert set(iodine_chain.nuclide_dict) == {
"I135", "Xe135", "Xe136", "Cs135"}
assert set(truncated_iodine.nuclide_dict) == {"I135", "Xe135", "Xe136"}
assert iodine_chain.reactions == ["(n,gamma)"]
assert iodine_chain["I135"].decay_modes == ref_iodine.decay_modes
assert iodine_chain["I135"].reactions == ref_iodine.reactions
for mode in truncated_iodine["Xe135"].decay_modes:
assert mode.target is None
# Test that no FissionYieldDistribution is made if there are no
# fission products
u5_noyields = gnd_simple_chain.reduce(["U235"], 0)["U235"]
assert u5_noyields.yield_data is None
# Check early termination if the eventual full chain
# is specified by using the iodine isotopes
new_iodine = gnd_simple_chain.reduce(set(iodine_chain.nuclide_dict))
assert set(iodine_chain.nuclide_dict) == set(new_iodine.nuclide_dict)
# Failure if some requested isotopes not in chain
with pytest.raises(IndexError, match=".*not found.*Xx999"):
gnd_simple_chain.reduce(["U235", "Xx999"])
|
4,140 |
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None,
exclude_failures=False, show_all_warnings=False, **options):
"""
Compile a set of source modules into C/C++ files and return a list of distutils
Extension objects for them.
:param module_list: As module list, pass either a glob pattern, a list of glob
patterns or a list of Extension objects. The latter
allows you to configure the extensions separately
through the normal distutils options.
You can also pass Extension objects that have
glob patterns as their sources. Then, cythonize
will resolve the pattern and create a
copy of the Extension for every matching file.
:param exclude: When passing glob patterns as ``module_list``, you can exclude certain
module names explicitly by passing them into the ``exclude`` option.
:param nthreads: The number of concurrent builds for parallel compilation
(requires the ``multiprocessing`` module).
:param aliases: If you want to use compiler directives like ``# distutils: ...`` but
can only know at compile time (when running the ``setup.py``) which values
to use, you can use aliases and pass a dictionary mapping those aliases
to Python strings when calling :func:`cythonize`. As an example, say you
want to use the compiler
directive ``# distutils: include_dirs = ../static_libs/include/``
but this path isn't always fixed and you want to find it when running
the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``,
find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python
variable called ``foo`` as a string, and then call
``cythonize(..., aliases={'MY_HEADERS': foo})``.
:param quiet: If True, Cython won't print error, warning, or status messages during the
compilation.
:param force: Forces the recompilation of the Cython modules, even if the timestamps
don't indicate that a recompilation is necessary.
:param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this
will be determined at a per-file level based on compiler directives. This
affects only modules found based on file names. Extension instances passed
into :func:`cythonize` will not be changed. It is recommended to rather
use the compiler directive ``# distutils: language = c++`` than this option.
:param exclude_failures: For a broad 'try to compile' mode that ignores compilation
failures and simply excludes the failed extensions,
pass ``exclude_failures=True``. Note that this only
really makes sense for compiling ``.py`` files which can also
be used without compilation.
:param show_all_warnings: By default, not all Cython warnings are printed.
Set to true to show all warnings.
:param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py``
files compiled. The HTML file gives an indication
of how much Python interaction there is in
each of the source code lines, compared to plain C code.
It also allows you to see the C/C++ code
generated for each line of Cython code. This report is invaluable when
optimizing a function for speed,
and for determining when to :ref:`release the GIL <nogil>`:
in general, a ``nogil`` block may contain only "white" code.
See examples in :ref:`determining_where_to_add_types` or
:ref:`primes`.
:param annotate-fullc: If ``True`` will produce a colorized HTML version of
the source which includes entire generated C/C++-code.
:param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this:
``compiler_directives={'embedsignature': True}``.
See :ref:`compiler-directives`.
:param depfile: produce depfiles for the sources if True.
"""
if exclude is None:
exclude = []
if 'include_path' not in options:
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
safe_makedirs(options['common_utility_include_dir'])
depfile = options.pop('depfile', None)
if pythran is None:
pythran_options = None
else:
pythran_options = CompilationOptions(**options)
pythran_options.cplus = True
pythran_options.np_pythran = True
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = Context.from_options(c_options)
options = c_options
module_list, module_metadata = create_extension_list(
module_list,
exclude=exclude,
ctx=ctx,
quiet=quiet,
exclude_failures=exclude_failures,
language=language,
aliases=aliases)
fix_windows_unicode_modules(module_list)
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
def copy_to_build_dir(filepath, root=os.getcwd()):
filepath_abs = os.path.abspath(filepath)
if os.path.isabs(filepath):
filepath = filepath_abs
if filepath_abs.startswith(root):
# distutil extension depends are relative to cwd
mod_dir = join_path(build_dir,
os.path.dirname(_relpath(filepath, root)))
copy_once_if_newer(filepath_abs, mod_dir)
modules_by_cfile = collections.defaultdict(list)
to_compile = []
for m in module_list:
if build_dir:
for dep in m.depends:
copy_to_build_dir(dep)
cy_sources = [
source for source in m.sources
if os.path.splitext(source)[1] in ('.pyx', '.py')]
if len(cy_sources) == 1:
# normal "special" case: believe the Extension module name to allow user overrides
full_module_name = m.name
else:
# infer FQMN from source files
full_module_name = None
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.np_pythran:
c_file = base + '.cpp'
options = pythran_options
elif m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
c_file = base + '.c'
options = c_options
# setup for out of place build directory if enabled
if build_dir:
if os.path.isabs(c_file):
c_file = os.path.splitdrive(c_file)[1]
c_file = c_file.split(os.sep, 1)[1]
c_file = os.path.join(build_dir, c_file)
dir = os.path.dirname(c_file)
safe_makedirs_once(dir)
# write out the depfile, if requested
if depfile:
src_base_dir, _ = os.path.split(source)
relpaths = [os.path.relpath(fname, src_base_dir)
for fname in deps.all_dependencies(source) ]
depline = os.path.split(c_file)[1] + ": "
depline += " \ \n".join(relpaths) + "\n"
with open(c_file+'.dep', 'w') as outfile:
outfile.write(depline)
if os.path.exists(c_file):
c_timestamp = os.path.getmtime(c_file)
else:
c_timestamp = -1
# Priority goes first to modified files, second to direct
# dependents, and finally to indirect dependents.
if c_timestamp < deps.timestamp(source):
dep_timestamp, dep = deps.timestamp(source), source
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(source)
priority = 2 - (dep in deps.immediate_dependencies(source))
if force or c_timestamp < dep_timestamp:
if not quiet and not force:
if source == dep:
print(u"Compiling %s because it changed." % Utils.decode_filename(source))
else:
print(u"Compiling %s because it depends on %s." % (
Utils.decode_filename(source),
Utils.decode_filename(dep),
))
if not force and options.cache:
fingerprint = deps.transitive_fingerprint(source, m, options)
else:
fingerprint = None
to_compile.append((
priority, source, c_file, fingerprint, quiet,
options, not exclude_failures, module_metadata.get(m.name),
full_module_name, show_all_warnings))
new_sources.append(c_file)
modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
copy_to_build_dir(source)
m.sources = new_sources
if options.cache:
if not os.path.exists(options.cache):
os.makedirs(options.cache)
to_compile.sort()
# Drop "priority" component of "to_compile" entries and add a
# simple progress indicator.
N = len(to_compile)
progress_fmt = "[{0:%d}/{1}] " % len(str(N))
for i in range(N):
progress = progress_fmt.format(i+1, N)
to_compile[i] = to_compile[i][1:] + (progress,)
if N <= 1:
nthreads = 0
if nthreads:
import multiprocessing
pool = multiprocessing.Pool(
nthreads, initializer=_init_multiprocessing_helper)
# This is a bit more involved than it should be, because KeyboardInterrupts
# break the multiprocessing workers when using a normal pool.map().
# See, for example:
# https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
try:
result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
pool.close()
while not result.ready():
try:
result.get(99999) # seconds
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt:
pool.terminate()
raise
pool.join()
else:
for args in to_compile:
cythonize_one(*args)
if exclude_failures:
failed_modules = set()
for c_file, modules in modules_by_cfile.items():
if not os.path.exists(c_file):
failed_modules.update(modules)
elif os.path.getsize(c_file) < 200:
f = io_open(c_file, 'r', encoding='iso8859-1')
try:
if f.read(len('#error ')) == '#error ':
# dead compilation result
failed_modules.update(modules)
finally:
f.close()
if failed_modules:
for module in failed_modules:
module_list.remove(module)
print(u"Failed compilations: %s" % ', '.join(sorted([
module.name for module in failed_modules])))
if options.cache:
cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
# cythonize() is often followed by the (non-Python-buffered)
# compiler output, flush now to avoid interleaving output.
sys.stdout.flush()
return module_list
|
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None,
exclude_failures=False, show_all_warnings=False, **options):
"""
Compile a set of source modules into C/C++ files and return a list of distutils
Extension objects for them.
:param module_list: As module list, pass either a glob pattern, a list of glob
patterns or a list of Extension objects. The latter
allows you to configure the extensions separately
through the normal distutils options.
You can also pass Extension objects that have
glob patterns as their sources. Then, cythonize
will resolve the pattern and create a
copy of the Extension for every matching file.
:param exclude: When passing glob patterns as ``module_list``, you can exclude certain
module names explicitly by passing them into the ``exclude`` option.
:param nthreads: The number of concurrent builds for parallel compilation
(requires the ``multiprocessing`` module).
:param aliases: If you want to use compiler directives like ``# distutils: ...`` but
can only know at compile time (when running the ``setup.py``) which values
to use, you can use aliases and pass a dictionary mapping those aliases
to Python strings when calling :func:`cythonize`. As an example, say you
want to use the compiler
directive ``# distutils: include_dirs = ../static_libs/include/``
but this path isn't always fixed and you want to find it when running
the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``,
find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python
variable called ``foo`` as a string, and then call
``cythonize(..., aliases={'MY_HEADERS': foo})``.
:param quiet: If True, Cython won't print error, warning, or status messages during the
compilation.
:param force: Forces the recompilation of the Cython modules, even if the timestamps
don't indicate that a recompilation is necessary.
:param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this
will be determined at a per-file level based on compiler directives. This
affects only modules found based on file names. Extension instances passed
into :func:`cythonize` will not be changed. It is recommended to rather
use the compiler directive ``# distutils: language = c++`` than this option.
:param exclude_failures: For a broad 'try to compile' mode that ignores compilation
failures and simply excludes the failed extensions,
pass ``exclude_failures=True``. Note that this only
really makes sense for compiling ``.py`` files which can also
be used without compilation.
:param show_all_warnings: By default, not all Cython warnings are printed.
Set to true to show all warnings.
:param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py``
files compiled. The HTML file gives an indication
of how much Python interaction there is in
each of the source code lines, compared to plain C code.
It also allows you to see the C/C++ code
generated for each line of Cython code. This report is invaluable when
optimizing a function for speed,
and for determining when to :ref:`release the GIL <nogil>`:
in general, a ``nogil`` block may contain only "white" code.
See examples in :ref:`determining_where_to_add_types` or
:ref:`primes`.
:param annotate-fullc: If ``True`` will produce a colorized HTML version of
the source which includes entire generated C/C++-code.
:param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this:
``compiler_directives={'embedsignature': True}``.
See :ref:`compiler-directives`.
:param depfile: produce depfiles for the sources if True.
"""
if exclude is None:
exclude = []
if 'include_path' not in options:
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
safe_makedirs(options['common_utility_include_dir'])
depfile = options.pop('depfile', None)
if pythran is None:
pythran_options = None
else:
pythran_options = CompilationOptions(**options)
pythran_options.cplus = True
pythran_options.np_pythran = True
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = Context.from_options(c_options)
options = c_options
module_list, module_metadata = create_extension_list(
module_list,
exclude=exclude,
ctx=ctx,
quiet=quiet,
exclude_failures=exclude_failures,
language=language,
aliases=aliases)
fix_windows_unicode_modules(module_list)
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
def copy_to_build_dir(filepath, root=os.getcwd()):
filepath_abs = os.path.abspath(filepath)
if os.path.isabs(filepath):
filepath = filepath_abs
if filepath_abs.startswith(root):
# distutil extension depends are relative to cwd
mod_dir = join_path(build_dir,
os.path.dirname(_relpath(filepath, root)))
copy_once_if_newer(filepath_abs, mod_dir)
modules_by_cfile = collections.defaultdict(list)
to_compile = []
for m in module_list:
if build_dir:
for dep in m.depends:
copy_to_build_dir(dep)
cy_sources = [
source for source in m.sources
if os.path.splitext(source)[1] in ('.pyx', '.py')]
if len(cy_sources) == 1:
# normal "special" case: believe the Extension module name to allow user overrides
full_module_name = m.name
else:
# infer FQMN from source files
full_module_name = None
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.np_pythran:
c_file = base + '.cpp'
options = pythran_options
elif m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
c_file = base + '.c'
options = c_options
# setup for out of place build directory if enabled
if build_dir:
if os.path.isabs(c_file):
c_file = os.path.splitdrive(c_file)[1]
c_file = c_file.split(os.sep, 1)[1]
c_file = os.path.join(build_dir, c_file)
dir = os.path.dirname(c_file)
safe_makedirs_once(dir)
# write out the depfile, if requested
if depfile:
src_base_dir, _ = os.path.split(source)
relpaths = [os.path.relpath(fname, src_base_dir)
for fname in deps.all_dependencies(source) ]
depline = os.path.split(c_file)[1] + ": \\\n "
depline += " \ \n".join(relpaths) + "\n"
with open(c_file+'.dep', 'w') as outfile:
outfile.write(depline)
if os.path.exists(c_file):
c_timestamp = os.path.getmtime(c_file)
else:
c_timestamp = -1
# Priority goes first to modified files, second to direct
# dependents, and finally to indirect dependents.
if c_timestamp < deps.timestamp(source):
dep_timestamp, dep = deps.timestamp(source), source
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(source)
priority = 2 - (dep in deps.immediate_dependencies(source))
if force or c_timestamp < dep_timestamp:
if not quiet and not force:
if source == dep:
print(u"Compiling %s because it changed." % Utils.decode_filename(source))
else:
print(u"Compiling %s because it depends on %s." % (
Utils.decode_filename(source),
Utils.decode_filename(dep),
))
if not force and options.cache:
fingerprint = deps.transitive_fingerprint(source, m, options)
else:
fingerprint = None
to_compile.append((
priority, source, c_file, fingerprint, quiet,
options, not exclude_failures, module_metadata.get(m.name),
full_module_name, show_all_warnings))
new_sources.append(c_file)
modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
copy_to_build_dir(source)
m.sources = new_sources
if options.cache:
if not os.path.exists(options.cache):
os.makedirs(options.cache)
to_compile.sort()
# Drop "priority" component of "to_compile" entries and add a
# simple progress indicator.
N = len(to_compile)
progress_fmt = "[{0:%d}/{1}] " % len(str(N))
for i in range(N):
progress = progress_fmt.format(i+1, N)
to_compile[i] = to_compile[i][1:] + (progress,)
if N <= 1:
nthreads = 0
if nthreads:
import multiprocessing
pool = multiprocessing.Pool(
nthreads, initializer=_init_multiprocessing_helper)
# This is a bit more involved than it should be, because KeyboardInterrupts
# break the multiprocessing workers when using a normal pool.map().
# See, for example:
# https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
try:
result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
pool.close()
while not result.ready():
try:
result.get(99999) # seconds
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt:
pool.terminate()
raise
pool.join()
else:
for args in to_compile:
cythonize_one(*args)
if exclude_failures:
failed_modules = set()
for c_file, modules in modules_by_cfile.items():
if not os.path.exists(c_file):
failed_modules.update(modules)
elif os.path.getsize(c_file) < 200:
f = io_open(c_file, 'r', encoding='iso8859-1')
try:
if f.read(len('#error ')) == '#error ':
# dead compilation result
failed_modules.update(modules)
finally:
f.close()
if failed_modules:
for module in failed_modules:
module_list.remove(module)
print(u"Failed compilations: %s" % ', '.join(sorted([
module.name for module in failed_modules])))
if options.cache:
cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
# cythonize() is often followed by the (non-Python-buffered)
# compiler output, flush now to avoid interleaving output.
sys.stdout.flush()
return module_list
|
43,271 |
def set_seed(seed, set_np_seed=True, set_tf_seed=True, set_random_seed=True):
"""
Set the seed for all possible randomness in StellarGraph. Note that this
also sets the global random seed for the following external modules:
* numpy
* tensorflow
* random
Args:
seed (int, optional): seed value
set_np_seed (bool, default True): If true, mutate the global numpy seed
set_tf_seed (bool, default True): If true, mutate the global tensorflow seed
set_random_seed (bool, default True): If true, mutate the global random module seed
"""
global _sg_seed
_sg_seed = seed
if set_np_seed:
np.random.seed(seed)
if set_tf_seed:
tf.random.set_seed(seed)
if set_random_seed:
random.seed(seed)
|
def set_seed(seed, set_numpy=True, set_tensorflow=True, set_random=True):
"""
Set the seed for all possible randomness in StellarGraph. Note that this
also sets the global random seed for the following external modules:
* numpy
* tensorflow
* random
Args:
seed (int, optional): seed value
set_np_seed (bool, default True): If true, mutate the global numpy seed
set_tf_seed (bool, default True): If true, mutate the global tensorflow seed
set_random_seed (bool, default True): If true, mutate the global random module seed
"""
global _sg_seed
_sg_seed = seed
if set_np_seed:
np.random.seed(seed)
if set_tf_seed:
tf.random.set_seed(seed)
if set_random_seed:
random.seed(seed)
|
32,345 |
def main():
try:
if demisto.command() == "fetch-indicators":
fetch_indicators()
elif demisto.command() == "reset-data-stream":
days = demisto.getArg("reset")
days = int(days)
new_date = reset_data_stream(int(days))
demisto.results(new_date)
elif demisto.command() == "test-module":
connect()
return_results("ok")
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}")
|
def main():
try:
if demisto.command() == "fetch-indicators":
fetch_indicators()
elif demisto.command() == "reset-data-stream":
days = arg_to_number(demisto.getArg("reset"))
new_date = reset_data_stream(int(days))
demisto.results(new_date)
elif demisto.command() == "test-module":
connect()
return_results("ok")
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}")
|
42,818 |
def copy_dir(source_dir, backup_path):
"""
Copy dotfolder from $HOME.
"""
invalid = set(Constants.INVALIDS)
if len(invalid.intersection(set(source_dir.split("/")))) != 0:
return
if "Application\ Support" not in source_dir:
command = "cp -aRp '" + source_dir + "' '" + backup_path + "/" + source_dir.split("/")[-2] + "'"
elif "Sublime" in source_dir:
command = "cp -aRp '" + source_dir + "' '" + backup_path + "/" + source_dir.split("/")[-3] + "'"
else:
command = "cp -a '" + source_dir + "' '" + backup_path + "/'"
process = sp.run(command, shell=True, stdout=sp.PIPE)
return process
|
def copy_dir(source_dir, backup_path):
"""
Copy dotfolder from $HOME.
"""
invalid = set(Constants.INVALID_DIRS)
if len(invalid.intersection(set(source_dir.split("/")))) != 0:
return
if "Application\ Support" not in source_dir:
command = "cp -aRp '" + source_dir + "' '" + backup_path + "/" + source_dir.split("/")[-2] + "'"
elif "Sublime" in source_dir:
command = "cp -aRp '" + source_dir + "' '" + backup_path + "/" + source_dir.split("/")[-3] + "'"
else:
command = "cp -a '" + source_dir + "' '" + backup_path + "/'"
process = sp.run(command, shell=True, stdout=sp.PIPE)
return process
|
3,745 |
def deprecate_with_doc(msg):
"""
Returns new _Deprecate class object.
The object can be used to Issue a DeprecationWarning, by passing `func`
as arguement,this adds warning to `old_name`'s docstring, rebinds
``old_name.__name__`` and returns the new function object.
This function may also be used as a decorator.
See Also
--------
deprecate`
Parameters
----------
message : str
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
_Deprecate object : object
The _Deprecate class object.
Examples
--------
Note that ``olduint`` returns a value after printing DeprecationWarning
with msg:
>>>oldobj = np.deprecate_with_doc("Use np.int_ instead.")
>>>olduint = oldobj(np.uint)
>>>DeprecationWarning: `uint64` is deprecated! #may vary
... Use np.int_ instead.
>>>olduint(6)
>>>6
"""
return _Deprecate(message=msg)
|
def deprecate_with_doc(msg):
"""
Returns new _Deprecate class object.
The object can be used to Issue a DeprecationWarning, by passing `func`
as arguement,this adds warning to `old_name`'s docstring, rebinds
``old_name.__name__`` and returns the new function object.
This function may also be used as a decorator.
See Also
--------
deprecate`
Parameters
----------
message : str
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
_Deprecate object : object
The _Deprecate class object.
Examples
--------
Note that ``olduint`` returns a value after printing DeprecationWarning
with msg:
>>> oldobj = np.deprecate_with_doc("Use np.int_ instead.")
>>>olduint = oldobj(np.uint)
>>>DeprecationWarning: `uint64` is deprecated! #may vary
... Use np.int_ instead.
>>>olduint(6)
>>>6
"""
return _Deprecate(message=msg)
|
42,966 |
def random_symplectic(N, passive=False, block_diag=False, scale=1.0):
r"""Random symplectic matrix representing a Gaussian transformation.
The squeezing parameters :math:`r` for active transformations are randomly
sampled from the standard normal distribution, while passive transformations
are randomly sampled from the Haar measure. Note that for the Symplectic
group there is no notion of Haar measure since this is group is not compact.
Args:
N (int): number of modes
passive (bool): If True, returns a passive Gaussian transformation (i.e.,
one that preserves photon number). If False (default), returns an active
transformation.
block_diag (bool): If True, uses passive Gaussian transformations that are orthogonal
instead of unitary. This implies that the positions :math:`q` do not mix with
the momenta :math:`p` and thus the symplectic operator is block diagonal
scale (float): Sets the scale of the random values used as squeezing parameters.
They will range from 0 to :math:`\sqrt{2}`*scale
Returns:
array: random :math:`2N\times 2N` symplectic matrix
"""
U = random_interferometer(N, real=block_diag)
O = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])])
if passive:
return O
U = random_interferometer(N, real=block_diag)
P = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])])
r = scale * np.abs(randnc(N))
Sq = np.diag(np.concatenate([np.exp(-r), np.exp(r)]))
return O @ Sq @ P
|
def random_symplectic(N, passive=False, block_diag=False, scale=1.0):
r"""Random symplectic matrix representing a Gaussian transformation.
The squeezing parameters :math:`r` for active transformations are randomly
sampled from the standard normal distribution, while passive transformations
are randomly sampled from the Haar measure. Note that for the Symplectic
group there is no notion of Haar measure since this is group is not compact.
Args:
N (int): number of modes
passive (bool): If True, returns a passive Gaussian transformation (i.e.,
one that preserves photon number). If False (default), returns an active
transformation.
block_diag (bool): If True, uses passive Gaussian transformations that are orthogonal
instead of unitary. This implies that the positions :math:`q` do not mix with
the momenta :math:`p` and thus the symplectic operator is block diagonal
scale (float): Sets the scale of the random values used as squeezing parameters.
They will range from 0 to :math:`\sqrt{2}\texttt{scale}`
Returns:
array: random :math:`2N\times 2N` symplectic matrix
"""
U = random_interferometer(N, real=block_diag)
O = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])])
if passive:
return O
U = random_interferometer(N, real=block_diag)
P = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])])
r = scale * np.abs(randnc(N))
Sq = np.diag(np.concatenate([np.exp(-r), np.exp(r)]))
return O @ Sq @ P
|
33,789 |
def make_fastapi_class_based_view(fastapi_app, cls: Type) -> None:
"""Transform the `cls`'s methods and class annotations to FastAPI routes.
Modified from
https://github.com/dmontagu/fastapi-utils/blob/master/fastapi_utils/cbv.py
Usage:
>>> app = FastAPI()
>>> class A:
@app.route("/{i}")
def func(self, i: int) -> str:
return self.dep + i
>>> # just running the app won't work, here.
>>> make_fastapi_class_based_view(app, A)
>>> # now app can be run properly
"""
# Delayed import to prevent ciruclar imports in workers.
from fastapi import Depends, APIRouter
from fastapi.routing import APIRoute
def get_current_servable_instance():
from ray import serve
return serve.get_replica_context().servable_object
# Find all the class method routes
member_methods = {
func
for _, func in inspect.getmembers(cls, inspect.isfunction)
}
class_method_routes = [
route for route in fastapi_app.routes
if isinstance(route, APIRoute) and route.endpoint in member_methods
]
# Modify these routes and mount it to a new APIRouter.
# We need to to this (instead of modifying in place) because we want to use
# the laster fastapi_app.include_router to re-run the dependency analysis
# for each routes.
new_router = APIRouter()
for route in class_method_routes:
fastapi_app.routes.remove(route)
# This block just adds a default values to the self parameters so that
# FastAPI knows to inject the object when calling the route.
# Before: def method(self, i): ...
# After: def method(self=Depends(...), *, i):...
old_endpoint = route.endpoint
old_signature = inspect.signature(old_endpoint)
old_parameters = list(old_signature.parameters.values())
if len(old_parameters) == 0:
# TODO(simon): make it more flexible to support no arguments.
raise RayServeException(
"Methods in FastAPI class based view must have ``self`` as "
"first argument.")
old_self_parameter = old_parameters[0]
new_self_parameter = old_self_parameter.replace(
default=Depends(get_current_servable_instance))
new_parameters = [new_self_parameter] + [
# Make the rest of the parameters keyword only because
# the first argument is no longer positional.
parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)
for parameter in old_parameters[1:]
]
new_signature = old_signature.replace(parameters=new_parameters)
setattr(route.endpoint, "__signature__", new_signature)
setattr(route.endpoint, "_serve_cls", cls)
new_router.routes.append(route)
fastapi_app.include_router(new_router)
# Remove endpoints that belong to other class based views.
routes = fastapi_app.routes
for route in routes:
serve_cls = getattr(route.endpoint, "_serve_cls", None)
if serve_cls is not None and serve_cls != cls:
routes.remove(route)
|
def make_fastapi_class_based_view(fastapi_app, cls: Type) -> None:
"""Transform the `cls`'s methods and class annotations to FastAPI routes.
Modified from
https://github.com/dmontagu/fastapi-utils/blob/master/fastapi_utils/cbv.py
Usage:
>>> app = FastAPI()
>>> class A:
@app.route("/{i}")
def func(self, i: int) -> str:
return self.dep + i
>>> # just running the app won't work, here.
>>> make_fastapi_class_based_view(app, A)
>>> # now app can be run properly
"""
# Delayed import to prevent ciruclar imports in workers.
from fastapi import Depends, APIRouter
from fastapi.routing import APIRoute
def get_current_servable_instance():
from ray import serve
return serve.get_replica_context().servable_object
# Find all the class method routes
member_methods = {
func
for _, func in inspect.getmembers(cls, inspect.isfunction)
}
class_method_routes = [
route for route in fastapi_app.routes
if isinstance(route, APIRoute) and route.endpoint in member_methods
]
# Modify these routes and mount it to a new APIRouter.
# We need to to this (instead of modifying in place) because we want to use
# the laster fastapi_app.include_router to re-run the dependency analysis
# for each routes.
new_router = APIRouter()
for route in class_method_routes:
fastapi_app.routes.remove(route)
# This block just adds a default values to the self parameters so that
# FastAPI knows to inject the object when calling the route.
# Before: def method(self, i): ...
# After: def method(self=Depends(...), *, i):...
old_endpoint = route.endpoint
old_signature = inspect.signature(old_endpoint)
old_parameters = list(old_signature.parameters.values())
if len(old_parameters) == 0:
# TODO(simon): make it more flexible to support no arguments.
raise RayServeException(
"Methods in FastAPI class-based view must have ``self`` as "
"first argument.")
old_self_parameter = old_parameters[0]
new_self_parameter = old_self_parameter.replace(
default=Depends(get_current_servable_instance))
new_parameters = [new_self_parameter] + [
# Make the rest of the parameters keyword only because
# the first argument is no longer positional.
parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)
for parameter in old_parameters[1:]
]
new_signature = old_signature.replace(parameters=new_parameters)
setattr(route.endpoint, "__signature__", new_signature)
setattr(route.endpoint, "_serve_cls", cls)
new_router.routes.append(route)
fastapi_app.include_router(new_router)
# Remove endpoints that belong to other class based views.
routes = fastapi_app.routes
for route in routes:
serve_cls = getattr(route.endpoint, "_serve_cls", None)
if serve_cls is not None and serve_cls != cls:
routes.remove(route)
|
2,819 |
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ... .
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 distances between the row vectors of `X` and the row vectors
of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.eye(3, k=1)
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
|
def paired_manhattan_distances(X, Y):
"""Compute the paired L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ... .
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 distances between the row vectors of `X` and the row vectors
of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.eye(3, k=1)
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
|
25,658 |
def setup(app):
""" Entry point to sphinx build customisation. """
app.connect('autodoc-skip-member', special_methods_callback)
|
def setup(app):
""" Entry point to sphinx build customisation. """
app.connect('autodoc-skip-member', autodoc_skip_member_callback)
|
14,495 |
def _items_sorter(
sort_keys: bool,
key_order: Optional[Sequence[str]],
drop_missing: bool,
) -> Callable[[EventDict], List[Tuple[str, Any]]]:
"""
Return a function to sort items from an `event_dict`.
See :class:`KeyValueRenderer` for an explanation of the parameters.
"""
# Use an optimized version for each case.
if key_order and sort_keys:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
items = []
for key in key_order: # type: ignore
value = event_dict.pop(key, None)
if value is not None or not drop_missing:
items.append((key, value))
items += sorted(event_dict.items())
return items
elif key_order:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
items = []
for key in key_order: # type: ignore
value = event_dict.pop(key, None)
if value is not None or not drop_missing:
items.append((key, value))
items += event_dict.items()
return items
elif sort_keys:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
return sorted(event_dict.items())
else:
ordered_items = operator.methodcaller("items") # type: ignore
return ordered_items
|
def _items_sorter(
sort_keys: bool,
key_order: Optional[Sequence[str]],
drop_missing: bool,
) -> Callable[[EventDict], List[Tuple[str, Any]]]:
"""
Return a function to sort items from an ``event_dict``.
See :class:`KeyValueRenderer` for an explanation of the parameters.
"""
# Use an optimized version for each case.
if key_order and sort_keys:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
items = []
for key in key_order: # type: ignore
value = event_dict.pop(key, None)
if value is not None or not drop_missing:
items.append((key, value))
items += sorted(event_dict.items())
return items
elif key_order:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
items = []
for key in key_order: # type: ignore
value = event_dict.pop(key, None)
if value is not None or not drop_missing:
items.append((key, value))
items += event_dict.items()
return items
elif sort_keys:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
return sorted(event_dict.items())
else:
ordered_items = operator.methodcaller("items") # type: ignore
return ordered_items
|
6,932 |
def import_file_by_path(path, force=False, data_import=False, pre_process=None, ignore_version=None,
reset_permissions=False, for_sync=False):
if not frappe.flags.dt:
frappe.flags.dt = []
try:
docs = read_doc_from_file(path)
except IOError:
print(path + " missing")
return
curr_hash = md5(path)
if docs:
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if not force:
try:
db_hash = frappe.db.get_value(doc["doctype"], doc["name"], "migration_hash")
except Exception:
frappe.flags.dt += [doc["doctype"]]
db_hash = None
if not db_hash:
db_modified = frappe.db.get_value(doc["doctype"], doc["name"], "modified")
if db_modified and doc.get("modified") == get_datetime_str(db_modified):
return False
if curr_hash == db_hash:
return False
original_modified = doc.get("modified")
import_doc(
docdict=doc,
force=force,
data_import=data_import,
pre_process=pre_process,
ignore_version=ignore_version,
reset_permissions=reset_permissions,
path=path,
)
if doc["doctype"] == "DocType":
if doc["name"] == "DocType":
Doctype_table=frappe.qb.DocType("DocType")
frappe.qb.update(Doctype_table).set(Doctype_table.migration_hash, curr_hash).where(Doctype_table.name == "DocType").run()
else:
frappe.db.set_value(doc["doctype"], doc["name"], "migration_hash", curr_hash)
if original_modified:
update_modified(original_modified, doc)
return True
|
def import_file_by_path(path, force=False, data_import=False, pre_process=None, ignore_version=None,
reset_permissions=False, for_sync=False):
if not frappe.flags.dt:
frappe.flags.dt = []
try:
docs = read_doc_from_file(path)
except IOError:
print(f"{path} missing")
return
curr_hash = md5(path)
if docs:
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if not force:
try:
db_hash = frappe.db.get_value(doc["doctype"], doc["name"], "migration_hash")
except Exception:
frappe.flags.dt += [doc["doctype"]]
db_hash = None
if not db_hash:
db_modified = frappe.db.get_value(doc["doctype"], doc["name"], "modified")
if db_modified and doc.get("modified") == get_datetime_str(db_modified):
return False
if curr_hash == db_hash:
return False
original_modified = doc.get("modified")
import_doc(
docdict=doc,
force=force,
data_import=data_import,
pre_process=pre_process,
ignore_version=ignore_version,
reset_permissions=reset_permissions,
path=path,
)
if doc["doctype"] == "DocType":
if doc["name"] == "DocType":
Doctype_table=frappe.qb.DocType("DocType")
frappe.qb.update(Doctype_table).set(Doctype_table.migration_hash, curr_hash).where(Doctype_table.name == "DocType").run()
else:
frappe.db.set_value(doc["doctype"], doc["name"], "migration_hash", curr_hash)
if original_modified:
update_modified(original_modified, doc)
return True
|
33,781 |
def ensure_runtime_env_setup(pkg_uris: List[str]) -> str:
"""Make sure all required packages are downloaded it local.
Necessary packages required to run the job will be downloaded
into local file system if it doesn't exist.
Args:
pkg_uri list(str): Package of the working dir for the runtime env.
Return:
Working directory is returned.
"""
pkg_dir = None
assert _internal_kv_initialized()
for pkg_uri in pkg_uris:
# For each node, the package will only be downloaded one time
# Locking to avoid multiple process download concurrently
pkg_file = Path(_get_local_path(pkg_uri))
with FileLock(str(pkg_file) + ".lock"):
pkg_dir = fetch_package(pkg_uri)
sys.path.insert(0, str(pkg_dir))
# Right now, multiple pkg_uris are not supported correctly.
# We return the last one as working directory
return str(pkg_dir) if pkg_dir else None
|
def ensure_runtime_env_setup(pkg_uris: List[str]) -> Optional[str]:
"""Make sure all required packages are downloaded it local.
Necessary packages required to run the job will be downloaded
into local file system if it doesn't exist.
Args:
pkg_uri list(str): Package of the working dir for the runtime env.
Return:
Working directory is returned.
"""
pkg_dir = None
assert _internal_kv_initialized()
for pkg_uri in pkg_uris:
# For each node, the package will only be downloaded one time
# Locking to avoid multiple process download concurrently
pkg_file = Path(_get_local_path(pkg_uri))
with FileLock(str(pkg_file) + ".lock"):
pkg_dir = fetch_package(pkg_uri)
sys.path.insert(0, str(pkg_dir))
# Right now, multiple pkg_uris are not supported correctly.
# We return the last one as working directory
return str(pkg_dir) if pkg_dir else None
|
31,668 |
def execute_command(command, args, extract_contents=True):
"""
Run demisto.executeCommand and check for errors.
:type command: ``str``
:param command: The command to run. (required)
:type args: ``dict``
:param args: The command arguments. (required)
:type extract_contents: ``bool``
:param extract_contents: Whether to return only the Contents part of the results. Default is True.
:return: The command results
:rtype: ``list`` or ``dict`` or ``str``
"""
if not hasattr(demisto, 'executeCommand'):
raise DemistoException('Cannot run execute command from integrations.')
res = demisto.executeCommand(command, args)
if is_error(res):
return_results(res)
return_error('Failed to execute {}. See additional error details in the above entries.'.format(command))
if not extract_contents:
return res
if isinstance(res, dict):
return res.get('Contents', {})
elif not isinstance(res, list):
return res
contents = [entry.get('Contents', {}) for entry in res]
return contents[0] if len(contents) == 1 else contents
|
def execute_command(command, args, extract_contents=True):
"""
Runs the `demisto.executeCommand()` function and checks for errors.
:type command: ``str``
:param command: The command to run. (required)
:type args: ``dict``
:param args: The command arguments. (required)
:type extract_contents: ``bool``
:param extract_contents: Whether to return only the Contents part of the results. Default is True.
:return: The command results
:rtype: ``list`` or ``dict`` or ``str``
"""
if not hasattr(demisto, 'executeCommand'):
raise DemistoException('Cannot run execute command from integrations.')
res = demisto.executeCommand(command, args)
if is_error(res):
return_results(res)
return_error('Failed to execute {}. See additional error details in the above entries.'.format(command))
if not extract_contents:
return res
if isinstance(res, dict):
return res.get('Contents', {})
elif not isinstance(res, list):
return res
contents = [entry.get('Contents', {}) for entry in res]
return contents[0] if len(contents) == 1 else contents
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.