id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
31,675 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
params = demisto.params()
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not params.get('insecure', False)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = params.get('proxy', False)
app_id = params.get('app_id')
base_url = params.get('base_url')
tenant_id = params.get('tenant_id')
self_deployed = params.get('self_deployed', False)
enc_key = params.get('enc_key')
first_fetch_time = params.get('first_fetch', '3 days').strip()
fetch_limit = params.get('max_fetch', 10)
fetch_timeout = arg_to_number(params.get('fetch_timeout', TIMEOUT))
demisto.debug(f'Command being called is {demisto.command()}')
command = demisto.command()
args = demisto.args()
try:
client = Client(
app_id=app_id,
verify=verify_certificate,
base_url=base_url,
proxy=proxy,
tenant_id=tenant_id,
enc_key=enc_key,
self_deployed=self_deployed,
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif command == 'microsoft-365-defender-auth-start':
return_results(start_auth(client))
elif command == 'microsoft-365-defender-auth-complete':
return_results(complete_auth(client))
elif command == 'microsoft-365-defender-auth-reset':
return_results(reset_auth())
elif command == 'microsoft-365-defender-auth-test':
return_results(test_connection(client))
elif command == 'microsoft-365-defender-incidents-list':
test_context_for_token(client)
return_results(microsoft_365_defender_incidents_list_command(client, args))
elif command == 'microsoft-365-defender-incident-update':
test_context_for_token(client)
return_results(microsoft_365_defender_incident_update_command(client, args))
elif command == 'microsoft-365-defender-advanced-hunting':
test_context_for_token(client)
return_results(microsoft_365_defender_advanced_hunting_command(client, args))
elif command == 'fetch-incidents':
fetch_limit = arg_to_number(fetch_limit)
fetch_timeout = arg_to_number(fetch_timeout) if fetch_timeout else None
incidents = fetch_incidents(client, first_fetch_time, fetch_limit, fetch_timeout)
demisto.incidents(incidents)
else:
raise NotImplementedError
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
params = demisto.params()
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not params.get('insecure', False)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = params.get('proxy', False)
app_id = params.get('app_id')
base_url = params.get('base_url')
tenant_id = params.get('tenant_id')
self_deployed = argToBoolean(params.get('self_deployed', 'false'))
enc_key = params.get('enc_key')
first_fetch_time = params.get('first_fetch', '3 days').strip()
fetch_limit = params.get('max_fetch', 10)
fetch_timeout = arg_to_number(params.get('fetch_timeout', TIMEOUT))
demisto.debug(f'Command being called is {demisto.command()}')
command = demisto.command()
args = demisto.args()
try:
client = Client(
app_id=app_id,
verify=verify_certificate,
base_url=base_url,
proxy=proxy,
tenant_id=tenant_id,
enc_key=enc_key,
self_deployed=self_deployed,
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif command == 'microsoft-365-defender-auth-start':
return_results(start_auth(client))
elif command == 'microsoft-365-defender-auth-complete':
return_results(complete_auth(client))
elif command == 'microsoft-365-defender-auth-reset':
return_results(reset_auth())
elif command == 'microsoft-365-defender-auth-test':
return_results(test_connection(client))
elif command == 'microsoft-365-defender-incidents-list':
test_context_for_token(client)
return_results(microsoft_365_defender_incidents_list_command(client, args))
elif command == 'microsoft-365-defender-incident-update':
test_context_for_token(client)
return_results(microsoft_365_defender_incident_update_command(client, args))
elif command == 'microsoft-365-defender-advanced-hunting':
test_context_for_token(client)
return_results(microsoft_365_defender_advanced_hunting_command(client, args))
elif command == 'fetch-incidents':
fetch_limit = arg_to_number(fetch_limit)
fetch_timeout = arg_to_number(fetch_timeout) if fetch_timeout else None
incidents = fetch_incidents(client, first_fetch_time, fetch_limit, fetch_timeout)
demisto.incidents(incidents)
else:
raise NotImplementedError
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
17,476 |
def combine_by_coords(
data_objects: Sequence[Union[Dataset, DataArray]] = [],
compat: str = "no_conflicts",
data_vars: str = "all",
coords: str = "different",
fill_value: object = dtypes.NA,
join: str = "outer",
combine_attrs: str = "no_conflicts",
datasets: Sequence[Dataset] = None,
) -> Union[Dataset, DataArray]:
"""
Attempt to auto-magically combine the given datasets (or data arrays)
into one by using dimension coordinates.
This method attempts to combine a group of datasets along any number of
dimensions into a single entity by inspecting coords and metadata and using
a combination of concat and merge.
Will attempt to order the datasets such that the values in their dimension
coordinates are monotonic along all dimensions. If it cannot determine the
order in which to concatenate the datasets, it will raise a ValueError.
Non-coordinate dimensions will be ignored, as will any coordinate
dimensions which do not vary between each dataset.
Aligns coordinates, but different variables on datasets can cause it
to fail under some scenarios. In complex cases, you may need to clean up
your data and use concat/merge explicitly (also see `combine_nested`).
Works well if, for example, you have N years of data and M data variables,
and each combination of a distinct time period and set of data variables is
saved as its own dataset. Also useful for if you have a simulation which is
parallelized in multiple dimensions, but has global coordinates saved in
each file specifying the positions of points within the global domain.
Parameters
----------
data_objects : sequence of xarray.Dataset or sequence of xarray.DataArray
Data objects to combine.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
appears are included.
* "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* "all": All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
If objects are DataArrays, `data_vars` must be "all".
coords : {"minimal", "different", "all"} or list of str, optional
As per the "data_vars" kwarg, but for coordinate variables.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values. If None, raises a ValueError if
the passed Datasets do not create a complete hypercube.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset or xarray.DataArray
See also
--------
concat
merge
combine_nested
Examples
--------
Combining two datasets using their common dimension coordinates. Notice
they are concatenated based on the values in their dimension coordinates,
not on their position in the list passed to `combine_by_coords`.
>>> x1 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [0, 1], "x": [10, 20, 30]},
... )
>>> x2 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [10, 20, 30]},
... )
>>> x3 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [40, 50, 60]},
... )
>>> x1
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 10.9 8.473 12.92
precipitation (y, x) float64 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289
>>> x2
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 11.36 18.51 1.421 1.743 0.4044 16.65
precipitation (y, x) float64 0.7782 0.87 0.9786 0.7992 0.4615 0.7805
>>> x3
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 2 3
* x (x) int64 40 50 60
Data variables:
temperature (y, x) float64 2.365 12.8 2.867 18.89 10.44 8.293
precipitation (y, x) float64 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x2, x1])
<xarray.Dataset>
Dimensions: (y: 4, x: 3)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 10.9 ... 1.743 0.4044 16.65
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.7992 0.4615 0.7805
>>> xr.combine_by_coords([x3, x1])
<xarray.Dataset>
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 nan ... nan 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x3, x1], join="override")
<xarray.Dataset>
Dimensions: (y: 2, x: 6)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 2.365 ... 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x1, x2, x3])
<xarray.Dataset>
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 nan ... 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
"""
# TODO remove after version 0.21, see PR4696
if datasets is not None:
warnings.warn(
"The datasets argument has been renamed to `data_objects`."
" From 0.21 on passing a value for datasets will raise an error."
)
data_objects = datasets
if not data_objects:
return Dataset()
objs_are_unnamed_dataarrays = [
isinstance(data_object, DataArray) and data_object.name is None
for data_object in data_objects
]
if any(objs_are_unnamed_dataarrays):
if all(objs_are_unnamed_dataarrays):
# Combine into a single larger DataArray
unnamed_arrays = data_objects
temp_datasets = [
data_array._to_temp_dataset() for data_array in unnamed_arrays
]
combined_temp_dataset = _combine_single_variable_hypercube(
temp_datasets,
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
return DataArray()._from_temp_dataset(combined_temp_dataset)
else:
# Must be a mix of unnamed dataarrays with either named dataarrays or with datasets
# Can't combine these as we wouldn't know whether to merge or concatenate the arrays
raise ValueError(
"Can't automatically combine unnamed dataarrays with either named dataarrays or datasets."
)
else:
# Promote any named DataArrays to single-variable Datasets to simplify combining
data_objects = [
obj.to_dataset() if isinstance(obj, DataArray) else obj
for obj in data_objects
]
# Group by data vars
sorted_datasets = sorted(data_objects, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
# Perform the multidimensional combine on each group of data variables
# before merging back together
concatenated_grouped_by_data_vars = []
for vars, datasets_with_same_vars in grouped_by_vars:
concatenated = _combine_single_variable_hypercube(
list(datasets_with_same_vars),
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
concatenated_grouped_by_data_vars.append(concatenated)
return merge(
concatenated_grouped_by_data_vars,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
|
def combine_by_coords(
data_objects: Sequence[Union[Dataset, DataArray]] = [],
compat: str = "no_conflicts",
data_vars: str = "all",
coords: str = "different",
fill_value: object = dtypes.NA,
join: str = "outer",
combine_attrs: str = "no_conflicts",
datasets: Sequence[Dataset] = None,
) -> Union[Dataset, DataArray]:
"""
Attempt to auto-magically combine the given datasets (or data arrays)
into one by using dimension coordinates.
This method attempts to combine a group of datasets along any number of
dimensions into a single entity by inspecting coords and metadata and using
a combination of concat and merge.
Will attempt to order the datasets such that the values in their dimension
coordinates are monotonic along all dimensions. If it cannot determine the
order in which to concatenate the datasets, it will raise a ValueError.
Non-coordinate dimensions will be ignored, as will any coordinate
dimensions which do not vary between each dataset.
Aligns coordinates, but different variables on datasets can cause it
to fail under some scenarios. In complex cases, you may need to clean up
your data and use concat/merge explicitly (also see `combine_nested`).
Works well if, for example, you have N years of data and M data variables,
and each combination of a distinct time period and set of data variables is
saved as its own dataset. Also useful for if you have a simulation which is
parallelized in multiple dimensions, but has global coordinates saved in
each file specifying the positions of points within the global domain.
Parameters
----------
data_objects : sequence of xarray.Dataset or sequence of xarray.DataArray
Data objects to combine.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
appears are included.
* "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* "all": All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
If objects are DataArrays, `data_vars` must be "all".
coords : {"minimal", "different", "all"} or list of str, optional
As per the "data_vars" kwarg, but for coordinate variables.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values. If None, raises a ValueError if
the passed Datasets do not create a complete hypercube.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset or xarray.DataArray
See also
--------
concat
merge
combine_nested
Examples
--------
Combining two datasets using their common dimension coordinates. Notice
they are concatenated based on the values in their dimension coordinates,
not on their position in the list passed to `combine_by_coords`.
>>> x1 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [0, 1], "x": [10, 20, 30]},
... )
>>> x2 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [10, 20, 30]},
... )
>>> x3 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [40, 50, 60]},
... )
>>> x1
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 10.9 8.473 12.92
precipitation (y, x) float64 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289
>>> x2
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 11.36 18.51 1.421 1.743 0.4044 16.65
precipitation (y, x) float64 0.7782 0.87 0.9786 0.7992 0.4615 0.7805
>>> x3
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 2 3
* x (x) int64 40 50 60
Data variables:
temperature (y, x) float64 2.365 12.8 2.867 18.89 10.44 8.293
precipitation (y, x) float64 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x2, x1])
<xarray.Dataset>
Dimensions: (y: 4, x: 3)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 10.9 ... 1.743 0.4044 16.65
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.7992 0.4615 0.7805
>>> xr.combine_by_coords([x3, x1])
<xarray.Dataset>
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 nan ... nan 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x3, x1], join="override")
<xarray.Dataset>
Dimensions: (y: 2, x: 6)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 2.365 ... 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x1, x2, x3])
<xarray.Dataset>
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 nan ... 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
"""
# TODO remove after version 0.21, see PR4696
if datasets is not None:
warnings.warn(
"The datasets argument has been renamed to `data_objects`."
" From 0.21 on passing a value for datasets will raise an error."
)
data_objects = datasets
if not data_objects:
return Dataset()
objs_are_unnamed_dataarrays = [
isinstance(data_object, DataArray) and data_object.name is None
for data_object in data_objects
]
if any(objs_are_unnamed_dataarrays):
if all(objs_are_unnamed_dataarrays):
# Combine into a single larger DataArray
unnamed_arrays = data_objects
temp_datasets = [
data_array._to_temp_dataset() for data_array in unnamed_arrays
]
combined_temp_dataset = _combine_single_variable_hypercube(
temp_datasets,
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
return DataArray()._from_temp_dataset(combined_temp_dataset)
else:
# Must be a mix of unnamed dataarrays with either named dataarrays or with datasets
# Can't combine these as we wouldn't know whether to merge or concatenate the arrays
raise ValueError(
"Can't automatically combine unnamed DataArrays with either named DataArrays or Datasets."
)
else:
# Promote any named DataArrays to single-variable Datasets to simplify combining
data_objects = [
obj.to_dataset() if isinstance(obj, DataArray) else obj
for obj in data_objects
]
# Group by data vars
sorted_datasets = sorted(data_objects, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
# Perform the multidimensional combine on each group of data variables
# before merging back together
concatenated_grouped_by_data_vars = []
for vars, datasets_with_same_vars in grouped_by_vars:
concatenated = _combine_single_variable_hypercube(
list(datasets_with_same_vars),
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
concatenated_grouped_by_data_vars.append(concatenated)
return merge(
concatenated_grouped_by_data_vars,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
|
31,074 |
def main():
params = demisto.params()
key = fix_rsa_data(params.get('key', ''), 4)
crt = params.get('crt', '')
collection = params.get('collection')
tags = argToList(params['tags']) if params.get('tags') else None
client = TaxiiClient(key, crt, collection, base_url=params.get('base_url'),
verify=argToBoolean(params.get('insecure')))
command = demisto.command()
handle_proxy()
try:
if command == 'fetch-indicators':
fetch_indicators(client, hours_back=params.get('first_fetch', ''),
tlp_color=params.get('tlp_color'), tags=tags)
elif command == 'dhs-get-indicators':
args = demisto.args()
command_results = get_indicators(client, tlp_color=args.get('tlp_color'), limit=int(args.get('limit', 20)),
tags=params.get('tags'))
return_results(command_results)
elif command == 'test-module':
command_test_module(client, key, crt, params.get('first_fetch', ''))
else:
raise DemistoException('not implemented.')
except SyntaxError as error:
return_error(str(error), error)
|
def main():
params = demisto.params()
key = fix_rsa_data(params.get('key', ''), 4)
crt = params.get('crt', '')
collection = params.get('collection')
tags = argToList(params['tags']) if params.get('tags') else None
client = TaxiiClient(key, crt, collection, base_url=params.get('base_url'),
verify=argToBoolean(params.get('insecure')))
command = demisto.command()
handle_proxy()
try:
if command == 'fetch-indicators':
fetch_indicators(client, hours_back=params.get('first_fetch', ''),
tlp_color=params.get('tlp_color'), tags=tags)
elif command == 'dhs-get-indicators':
args = demisto.args()
command_results = get_indicators(client, tlp_color=args.get('tlp_color'), limit=int(args.get('limit', 20)),
tags=params.get('tags'))
return_results(command_results)
elif command == 'test-module':
command_test_module(client, key, crt, params.get('first_fetch', ''))
else:
raise DemistoException('not implemented.')
except Exception as error:
return_error(str(error), error)
|
47,552 |
def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False):
"""
Loads a shard from a sharded checkpoint file. Handles the missing keys and unexpected keys.
Args:
model (`tf.keras.models.Model`): Model in which the weights are loaded
model_layer_map (`Dict`): A dictionnary mapping the layer name to the index of the layer in the model.
resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded
ignore_mismatched_sizes (bool, optional):
_description_. Defaults to False. Whether to ignore the mismatch keys
Returns:
Three lists, one for the layers that were found and succesfully restored (from the shard file), one for the
missmatched layers, and another one for the unexpected layers.
"""
saved_weight_names_set = set()
saved_weights = {}
missmatched_keys = set()
unexpected_keys = set()
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer_name in saved_h5_model_layers_name:
h5_layer_object = f[layer_name]
saved_weights[layer_name] = np.asarray(h5_layer_object)
saved_weight_names_set.add(layer_name)
if layer_name not in model_layer_map:
unexpected_keys.add(layer_name)
else:
symbolic_weight = model.weights[model_layer_map[layer_name]]
saved_weight_value = saved_weights[layer_name]
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
missmatched_keys.add(
(layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
)
continue
else:
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
K.batch_set_value(weight_value_tuples)
return saved_weight_names_set, unexpected_keys, missmatched_keys
|
def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False):
"""
Loads a shard from a sharded checkpoint file. Handles the missing keys and unexpected keys.
Args:
model (`tf.keras.models.Model`): Model in which the weights are loaded
model_layer_map (`Dict`): A dictionnary mapping the layer name to the index of the layer in the model.
resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys
Returns:
Three lists, one for the layers that were found and succesfully restored (from the shard file), one for the
missmatched layers, and another one for the unexpected layers.
"""
saved_weight_names_set = set()
saved_weights = {}
missmatched_keys = set()
unexpected_keys = set()
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer_name in saved_h5_model_layers_name:
h5_layer_object = f[layer_name]
saved_weights[layer_name] = np.asarray(h5_layer_object)
saved_weight_names_set.add(layer_name)
if layer_name not in model_layer_map:
unexpected_keys.add(layer_name)
else:
symbolic_weight = model.weights[model_layer_map[layer_name]]
saved_weight_value = saved_weights[layer_name]
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
missmatched_keys.add(
(layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
)
continue
else:
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
K.batch_set_value(weight_value_tuples)
return saved_weight_names_set, unexpected_keys, missmatched_keys
|
50,424 |
def _skip_if_no_torch():
if not os.environ.get('DOCKER_PYMOR', False):
pytest.skip('skipped test due to missing Torch')
|
def _skip_if_no_torch():
try:
import torch
except ImportError as ie:
if not os.environ.get('DOCKER_PYMOR', False):
pytest.skip('skipped test due to missing Torch')
raise ie
|
40,706 |
def average_precision_compute_fn(y_preds: Any, y_targets: Any):
try:
from sklearn.metrics import average_precision_score
except ImportError:
raise RuntimeError("This contrib module requires sklearn to be installed.")
y_true = y_targets.numpy()
y_pred = y_preds.numpy()
return average_precision_score(y_true, y_pred)
|
def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor):
try:
from sklearn.metrics import average_precision_score
except ImportError:
raise RuntimeError("This contrib module requires sklearn to be installed.")
y_true = y_targets.numpy()
y_pred = y_preds.numpy()
return average_precision_score(y_true, y_pred)
|
30,604 |
def get_current_table(grid_id: str, sort_by: Optional[str], columns: Optional[str]) -> \
Tuple[List[Dict[Any, Any]], Any]:
""" Get current grid data
Date retreived:
1. Column names.
2. Current grid data.
Validate:
1. Correct number of context paths.
2. Sort_by is a name of a column.
3. Grid ID.
4. Columns exists.
Args:
grid_id(str): Normalized Grid ID (Machine name in `Settings -> Advanced -> Fields -> Field property` or in Incident
Context Data.
sort_by(str): The static name of the column to sort the table rows by.
columns(str): Comma separated list of columns names, Should be defined if grid is empty otherwise the automation
detect it automatically.
Returns:
list: Current grid as dict in following structure - [{'col1': 'val1'},{'col2': 'val2'},{'col3': 'val3'},
{'col4': 'val4'}].
list: Table columns name.
"""
# Get current Grid data
current_table: Optional[List[dict]] = demisto.incidents()[0].get("CustomFields", {}).get(grid_id)
if not current_table:
raise ValueError(f"The grid id isn't valid : {grid_id}")
# Validate columns number the same as context paths - If no data initiated skip validation, but check if columns specified
if not columns:
raise ValueError("Columns not specified - Its a mandatory arg when grid is empty.")
# Get columns
columns = argToList(columns.lower())
# Validate sort is valide col
if sort_by and sort_by not in columns:
raise ValueError(f'sort_by: {sort_by} is not columns: {columns}')
return current_table, columns
|
def get_current_table(grid_id: str, sort_by: Optional[str], columns: Optional[str]) -> \
Tuple[List[Dict[Any, Any]], Any]:
""" Get current grid data
Date retreived:
1. Column names.
2. Current grid data.
Validate:
1. Correct number of context paths.
2. Sort_by is a name of a column.
3. Grid ID.
4. Columns exists.
Args:
grid_id(str): Normalized Grid ID (Machine name in `Settings -> Advanced -> Fields -> Field property` or in Incident
Context Data.
sort_by(str): The static name of the column to sort the table rows by.
columns(str): Comma separated list of columns names, Should be defined if grid is empty otherwise the automation
detect it automatically.
Returns:
list: Current grid as dict in following structure - [{'col1': 'val1'},{'col2': 'val2'},{'col3': 'val3'},
{'col4': 'val4'}].
list: Table columns name.
"""
# Get current Grid data
current_table: Optional[List[dict]] = demisto.incidents()[0].get("CustomFields", {}).get(grid_id)
if not current_table:
raise ValueError(f"The grid id isn't valid : {grid_id}")
# Validate columns number the same as context paths - If no data initiated skip validation, but check if columns specified
if not columns:
raise ValueError("Columns not specified - Its a mandatory arg when grid is empty.")
# Get columns
columns = argToList(columns.lower())
# Validate sort is valide col
if sort_by and sort_by not in columns:
raise ValueError(f'sort_by "{sort_by}" is not one of the columns: {columns}')
return current_table, columns
|
45,644 |
def run_standalone_app(
layout,
callbacks,
header_colors,
filename
):
"""Run demo app (tests/dashbio_demos/app_*.py) as standalone app."""
app = dash.Dash(__name__, assets_folder='../../../assets/')
app.scripts.config.serve_locally = True
# Handle callback to component with id "fullband-switch"
app.config['suppress_callback_exceptions'] = True
# Get all information from filename
app_name = os.path.basename(filename).replace(
'.py', '').replace(
'app_', '')
app_title = "Dash {}".format(app_name.replace('_', ' ').title())
# Assign layout
app.layout = app_page_layout(
page_layout=layout(),
app_title=app_title,
app_name=app_name,
standalone=True,
**header_colors()
)
# Register all callbacks
callbacks(app)
# return app object
return app
|
def run_standalone_app(
layout,
callbacks,
header_colors,
filename
):
"""Run demo app (tests/dashbio_demos/app_*.py) as standalone app."""
app = dash.Dash(__name__, assets_folder=os.path.join('..', '..', '..', 'assets/'))
app.scripts.config.serve_locally = True
# Handle callback to component with id "fullband-switch"
app.config['suppress_callback_exceptions'] = True
# Get all information from filename
app_name = os.path.basename(filename).replace(
'.py', '').replace(
'app_', '')
app_title = "Dash {}".format(app_name.replace('_', ' ').title())
# Assign layout
app.layout = app_page_layout(
page_layout=layout(),
app_title=app_title,
app_name=app_name,
standalone=True,
**header_colors()
)
# Register all callbacks
callbacks(app)
# return app object
return app
|
45,559 |
def is_open(opens, break_starts, break_ends, closes, minute_val):
open_idx = np.searchsorted(opens, minute_val)
close_idx = np.searchsorted(closes, minute_val)
if open_idx != close_idx:
# we are not guarenteed to have a sorted list of breaks, since break
# may not exist for a given date. Thus we need special logic.
# if the indices are not same, that means we are within a session
if (break_starts == NP_NAT).all() and (break_ends == NP_NAT).all():
# this calendar has no breaks
return True
break_start_on_open_dt = break_starts[open_idx - 1]
break_end_on_open_dt = break_ends[open_idx - 1]
if break_start_on_open_dt == NP_NAT:
# There is no break on the relevant day
return True
elif break_start_on_open_dt <= minute_val <= break_end_on_open_dt:
# we're in the middle of a break
return False
else:
return True
else:
try:
# if they are the same, it might be the first minute of a
# session
return minute_val == opens[open_idx]
except IndexError:
# this can happen if we're outside the schedule's range (like
# after the last close)
return False
|
def is_open(opens, break_starts, break_ends, closes, minute_val):
open_idx = np.searchsorted(opens, minute_val)
close_idx = np.searchsorted(closes, minute_val)
if open_idx != close_idx:
# we are not guaranteed to have a sorted list of breaks, since break
# may not exist for a given date. Thus we need special logic.
# if the indices are not same, that means we are within a session
if (break_starts == NP_NAT).all() and (break_ends == NP_NAT).all():
# this calendar has no breaks
return True
break_start_on_open_dt = break_starts[open_idx - 1]
break_end_on_open_dt = break_ends[open_idx - 1]
if break_start_on_open_dt == NP_NAT:
# There is no break on the relevant day
return True
elif break_start_on_open_dt <= minute_val <= break_end_on_open_dt:
# we're in the middle of a break
return False
else:
return True
else:
try:
# if they are the same, it might be the first minute of a
# session
return minute_val == opens[open_idx]
except IndexError:
# this can happen if we're outside the schedule's range (like
# after the last close)
return False
|
57,908 |
def validate_common_args(args: Dict[str, str]) -> Dict[str, Any]:
"""
Validate page_size and page_number argument, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Dict[str, Any]``
"""
params: Dict[str, Any] = {}
page_size = arg_to_number(args.get("page_size"))
params["page[size]"] = 50
if page_size is not None:
if page_size <= 0 or page_size > 100:
raise ValueError(MESSAGES['PAGE_SIZE'].format(page_size))
params["page[size]"] = page_size
page_number = arg_to_number(args.get("page_number"))
if page_number is not None:
if page_number < 0 or page_number >= 2147483648:
raise ValueError(MESSAGES['PAGE_NUMBER'].format(page_number))
params["page[number]"] = page_number
return params
|
def validate_common_args(args: Dict[str, str]) -> Dict[str, Any]:
"""
Validate page_size and page_number argument, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Dict[str, Any]``
"""
params: Dict[str, Any] = {}
page_size = arg_to_number(args.get("page_size"))
if page_size <= 0 or page_size > 100:
raise ValueError(MESSAGES['PAGE_SIZE'].format(page_size))
params["page[size]"] = page_size
page_number = arg_to_number(args.get("page_number"))
if page_number is not None:
if page_number < 0 or page_number >= 2147483648:
raise ValueError(MESSAGES['PAGE_NUMBER'].format(page_number))
params["page[number]"] = page_number
return params
|
32,598 |
def create_issue_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Creates an issue.
Args:
client (Client): Client to perform calls to GitLab services.
args (Dict[str, Any]): XSOAR arguments:
- 'state': The state of the issue.
- 'labels': Retrieve only issues with the given labels.
- 'assignee_username': Retrieve issues by assignee username.
Returns:
(CommandResults).
"""
labels = args.get('labels', '')
headers = ['Iid', 'Title', 'CreatedAt', 'CreatedBy', 'UpdatedAt', 'Milstone', 'State', 'Assignee']
title = args.get('title', '')
description = args.get('description', '')
response = client.create_issue_request(labels, title, description)
human_readable_dict = {
'Iid': response.get('iid'),
'Title': response.get('title'),
'CreatedAt': response.get('created_at', ' '),
'CreatedBy': response.get('autor.name', ' '),
'UpdatedAt': response.get('updated_at', ' '),
'Milstone': response.get('milestone.title', ' '),
'State': response.get('state', ' '),
'Assignee': response.get('assignee.name', ' ')
}
human_readable = tableToMarkdown('Create Issue', human_readable_dict, headers=headers, removeNull=True)
return CommandResults(
outputs_prefix='GitLab.Issue',
outputs_key_field='Iid',
readable_output=human_readable,
outputs=response,
raw_response=response
)
|
def create_issue_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Creates an issue.
Args:
client (Client): Client to perform calls to GitLab services.
args (Dict[str, Any]): XSOAR arguments:
- 'state': The state of the issue.
- 'labels': Retrieve only issues with the given labels.
- 'assignee_username': Retrieve issues by assignee username.
Returns:
(CommandResults).
"""
labels = args.get('labels', '')
headers = ['Iid', 'Title', 'CreatedAt', 'CreatedBy', 'UpdatedAt', 'Milstone', 'State', 'Assignee']
title = args.get('title', '')
description = args.get('description', '')
response = client.create_issue_request(labels, title, description)
human_readable_dict = {
'Iid': response.get('iid'),
'Title': response.get('title'),
'CreatedAt': response.get('created_at', ' '),
'CreatedBy': response.get('autor.name', ' '),
'UpdatedAt': response.get('updated_at', ' '),
'Milstone': response.get('milestone.title', ' '),
'State': response.get('state', ' '),
'Assignee': response.get('assignee.name', ' ')
}
human_readable = tableToMarkdown('Created Issue', human_readable_dict, headers=headers, removeNull=True)
return CommandResults(
outputs_prefix='GitLab.Issue',
outputs_key_field='Iid',
readable_output=human_readable,
outputs=response,
raw_response=response
)
|
10,417 |
def fail_if_max_instance_lifetime_not_supported():
if LooseVersion(boto3.__version__) < LooseVersion('1.10.0'):
module.fail_json(msg="max_instance_lifetime requires boto3 version 1.10.0 or higher. Version %s is installed" %
boto3.__version__)
|
def fail_if_max_instance_lifetime_not_supported():
if not module.boto3_at_least('1.10.0'):
module.fail_json(msg="max_instance_lifetime requires boto3 version 1.10.0 or higher. Version %s is installed" %
boto3.__version__)
|
4,501 |
def _export_raw(fname, raw):
# load data first
raw.load_data()
# remove extra epoc and STI channels
drop_chs = ['epoc']
try:
if not (raw.filenames[0].endswith('.fif')):
drop_chs.append('STI 014')
except AttributeError: # mne.io.RawArray has no filenames attribute
pass
ch_names = [ch for ch in raw.ch_names if ch not in drop_chs]
cart_coords = _get_als_coords_from_chs(raw.info['chs'], drop_chs)
annotations = [raw.annotations.description,
raw.annotations.onset,
raw.annotations.duration]
eeglabio.raw.export_set(
fname, data=raw.get_data(picks=ch_names), sfreq=raw.info['sfreq'],
ch_names=ch_names, ch_locs=cart_coords, annotations=annotations)
|
def _export_raw(fname, raw):
# load data first
raw.load_data()
# remove extra epoc and STI channels
drop_chs = ['epoc']
try:
if not (raw.filenames[0].endswith('.fif')):
drop_chs.append('STI 014')
except AttributeError: # mne.io.RawArray has no filenames attribute
pass
ch_names = [ch for ch in raw.ch_names if ch not in drop_chs]
cart_coords = _get_als_coords_from_chs(raw.info['chs'], drop_chs)
annotations = [raw.annotations.description,
raw.annotations.onset,
raw.annotations.duration]
eeglabio.raw.export_set(
fname, data=raw.get_data(picks=ch_names), sfreq=raw.info['sfreq'],
ch_names=ch_names, ch_locs=cart_coords, annotations=annotations)
|
27,481 |
def execute_workflow(
project, location="us-central1", workflow="myFirstWorkflow"
):
"""Execute a workflow and print the execution results."""
# [START workflows_api_quickstart]
from google.cloud import workflows_v1beta
from google.cloud.workflows import executions_v1beta
from google.cloud.workflows.executions_v1beta.types import executions
if not project:
raise Exception('GOOGLE_CLOUD_PROJECT is required.')
execution_client = executions_v1beta.ExecutionsClient()
workflows_client = workflows_v1beta.WorkflowsClient()
# Construct the fully qualified location path.
parent = workflows_client.workflow_path(project, location, workflow)
# Execute workflow
response = execution_client.create_execution(request={"parent": parent})
print(f"Created execution: {response.name}")
# Wait for execution to finish, then print results.
execution_finished = False
backoff_delay = 1 # Start wait with delay of 1 second
print('Poll every second for result...')
while (not execution_finished):
execution = execution_client.get_execution(request={"name": response.name})
execution_finished = execution.state != executions.Execution.State.ACTIVE
# If we haven't seen the result yet, wait a second.
if not execution_finished:
print('- Waiting for results...')
time.sleep(backoff_delay)
backoff_delay *= 2 # Double the delay to provide exponential backoff.
else:
print(f'Execution finished with state: {execution.state.name}')
print(execution.result)
return execution.result
# [END workflows_api_quickstart]
|
def execute_workflow(
project, location="us-central1", workflow="myFirstWorkflow"
):
"""Execute a workflow and print the execution results."""
# [START workflows_api_quickstart]
from google.cloud import workflows_v1beta
from google.cloud.workflows import executions_v1beta
from google.cloud.workflows.executions_v1beta.types import executions
if not project:
raise Exception('GOOGLE_CLOUD_PROJECT env var is required.')
execution_client = executions_v1beta.ExecutionsClient()
workflows_client = workflows_v1beta.WorkflowsClient()
# Construct the fully qualified location path.
parent = workflows_client.workflow_path(project, location, workflow)
# Execute workflow
response = execution_client.create_execution(request={"parent": parent})
print(f"Created execution: {response.name}")
# Wait for execution to finish, then print results.
execution_finished = False
backoff_delay = 1 # Start wait with delay of 1 second
print('Poll every second for result...')
while (not execution_finished):
execution = execution_client.get_execution(request={"name": response.name})
execution_finished = execution.state != executions.Execution.State.ACTIVE
# If we haven't seen the result yet, wait a second.
if not execution_finished:
print('- Waiting for results...')
time.sleep(backoff_delay)
backoff_delay *= 2 # Double the delay to provide exponential backoff.
else:
print(f'Execution finished with state: {execution.state.name}')
print(execution.result)
return execution.result
# [END workflows_api_quickstart]
|
11,883 |
def autocontrast(image, cutoff=0, ignore=None, mask=None):
"""
Maximize (normalize) image contrast. This function calculates a
histogram of the input image (or mask region), removes **cutoff** percent of the
lightest and darkest pixels from the histogram, and remaps the image
so that the darkest pixel becomes black (0), and the lightest
becomes white (255).
:param image: The image to process.
:param cutoff: The percent to cut off from the histogram on the low and
high ends. Either a tuple of (low, high), or a single
number for both.
:param ignore: The background pixel value (use None for no background).
:param mask: histogram used in contrast operation is computed using pixels
within the mask. If no mask is given the entire image is used
for histogram computation.
:return: An image.
"""
if mask:
histogram = image.histogram(mask)
else:
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer : layer + 256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
if not isinstance(cutoff, tuple):
cutoff = (cutoff, cutoff)
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff[0] // 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] -= cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the high end
cut = n * cutoff[1] // 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] -= cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(list(range(256)))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
|
def autocontrast(image, cutoff=0, ignore=None, mask=None):
"""
Maximize (normalize) image contrast. This function calculates a
histogram of the input image (or mask region), removes **cutoff** percent of the
lightest and darkest pixels from the histogram, and remaps the image
so that the darkest pixel becomes black (0), and the lightest
becomes white (255).
:param image: The image to process.
:param cutoff: The percent to cut off from the histogram on the low and
high ends. Either a tuple of (low, high), or a single
number for both.
:param ignore: The background pixel value (use None for no background).
:param mask: Histogram used in contrast operation is computed using pixels
within the mask. If no mask is given the entire image is used
for histogram computation.
:return: An image.
"""
if mask:
histogram = image.histogram(mask)
else:
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer : layer + 256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
if not isinstance(cutoff, tuple):
cutoff = (cutoff, cutoff)
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff[0] // 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] -= cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the high end
cut = n * cutoff[1] // 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] -= cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(list(range(256)))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
|
31,854 |
def ignore_ransomware_anomaly_command(client: Client, args: Dict[str, Any]) -> str:
"""Ignore detected anomalous object on Helios.
:type client: ``Client``
:param Client: cohesity helios client to use.
:type args: ``Dict[str, Any]``
:param args: Dictionary with ignore anomaly parameters.
:return: success message of the ignore anomaly operation.
:rtype: ``str``
"""
# Filter ransomware alert for given object name.
alert_id = ''
object_name = args.get('object_name')
demisto.debug("Performing ignore anomaly operation for object {name}".format(name=object_name))
resp = client.get_ransomware_alerts()
for alert in resp:
property_dict = _get_property_dict(alert['propertyList'])
if property_dict.get('object', "") == object_name:
alert_id = alert.get('id')
if alert_id == '':
raise ValueError('No anomalous object found by given name')
# Suppress ransomware alert.
client.suppress_ransomware_alert_by_id(alert_id)
return "Ignored object {name}".format(name=object_name)
|
def ignore_ransomware_anomaly_command(client: Client, args: Dict[str, Any]) -> str:
"""Ignore detected anomalous object on Helios.
:type client: ``Client``
:param Client: cohesity helios client to use.
:type args: ``Dict[str, Any]``
:param args: Dictionary with ignore anomaly parameters.
:return: success message of the ignore anomaly operation.
:rtype: ``str``
"""
# Filter ransomware alert for given object name.
alert_id = ''
object_name = args.get('object_name')
demisto.debug(f"Performing ignore anomaly operation for object {object_name}.")
resp = client.get_ransomware_alerts()
for alert in resp:
property_dict = _get_property_dict(alert['propertyList'])
if property_dict.get('object', "") == object_name:
alert_id = alert.get('id')
if alert_id == '':
raise ValueError('No anomalous object found by given name')
# Suppress ransomware alert.
client.suppress_ransomware_alert_by_id(alert_id)
return "Ignored object {name}".format(name=object_name)
|
8,663 |
def test_find_config_local(tmpdir):
"""Assert function retrieve configuration file from working dir first"""
working_dir = tmpdir.mkdir("working")
working_dir.join('local.cfg').write('')
config_dir = tmpdir.mkdir("config")
with cd(working_dir.strpath):
found_config = run_script.find_config(config_dir.strpath, 'local.cfg')
assert found_config == 'local.cfg'
found_config = run_script.find_config(config_dir.strpath, 'local')
assert found_config == config_dir.join('local').strpath
|
def test_find_config_local(tmpdir):
"""Assert function retrieves configuration file from working dir first"""
working_dir = tmpdir.mkdir("working")
working_dir.join('local.cfg').write('')
config_dir = tmpdir.mkdir("config")
with cd(working_dir.strpath):
found_config = run_script.find_config(config_dir.strpath, 'local.cfg')
assert found_config == 'local.cfg'
found_config = run_script.find_config(config_dir.strpath, 'local')
assert found_config == config_dir.join('local').strpath
|
21,934 |
def _bdschur_condmax_search(aschur, tschur, condmax):
"""Block-diagonal Schur decomposition search up to condmax
Iterates mb03rd with different pmax values until:
- result is non-defective;
- or condition number of similarity transform is unchanging despite large pmax;
- or condition number of similarity transform is close to condmax.
Parameters
----------
aschur: (n, n) array
real Schur-form matrix
tschur: (n, n) array
orthogonal transformation giving aschur from some initial matrix a
condmax: positive scalar >= 1
maximum condition number of final transformation
Returns
-------
amodal: n, n array
block diagonal Schur form
tmodal:
similarity transformation give amodal from aschur
blksizes:
Array of Schur block sizes
eigvals:
Eigenvalues of amodal (and a, etc.)
Notes
-----
Outputs as for slycot.mb03rd
aschur, tschur are as returned by scipy.linalg.schur.
"""
try:
from slycot import mb03rd
except ImportError:
raise ControlSlycot("can't find slycot module 'mb03rd'")
# see notes on RuntimeError below
pmaxlower = None
# get lower bound; try condmax ** 0.5 first
pmaxlower = condmax ** 0.5
amodal, tmodal, blksizes, eigvals = mb03rd(aschur.shape[0], aschur, tschur, pmax=pmaxlower)
if np.linalg.cond(tmodal) <= condmax:
reslower = amodal, tmodal, blksizes, eigvals
else:
pmaxlower = 1.0
amodal, tmodal, blksizes, eigvals = mb03rd(aschur.shape[0], aschur, tschur, pmax=pmaxlower)
cond = np.linalg.cond(tmodal)
if cond > condmax:
msg = 'minimum cond={} > condmax={}; try increasing condmax'.format(cond, condmax)
raise RuntimeError(msg)
pmax = pmaxlower
# phase 1: search for upper bound on pmax
for i in range(50):
amodal, tmodal, blksizes, eigvals = mb03rd(aschur.shape[0], aschur, tschur, pmax=pmax)
cond = np.linalg.cond(tmodal)
if cond < condmax:
pmaxlower = pmax
reslower = amodal, tmodal, blksizes, eigvals
else:
# upper bound found; go to phase 2
pmaxupper = pmax
break
if _bdschur_defective(blksizes, eigvals):
pmax *= 2
else:
return amodal, tmodal, blksizes, eigvals
else:
# no upper bound found; return current result
return reslower
# phase 2: bisection search
for i in range(50):
pmax = (pmaxlower * pmaxupper) ** 0.5
amodal, tmodal, blksizes, eigvals = mb03rd(aschur.shape[0], aschur, tschur, pmax=pmax)
cond = np.linalg.cond(tmodal)
if cond < condmax:
if not _bdschur_defective(blksizes, eigvals):
return amodal, tmodal, blksizes, eigvals
pmaxlower = pmax
reslower = amodal, tmodal, blksizes, eigvals
else:
pmaxupper = pmax
if pmaxupper / pmaxlower < _PMAX_SEARCH_TOL:
# hit search limit
return reslower
else:
raise ValueError('bisection failed to converge; pmaxlower={}, pmaxupper={}'.format(pmaxlower, pmaxupper))
|
def _bdschur_condmax_search(aschur, tschur, condmax):
"""Block-diagonal Schur decomposition search up to condmax
Iterates mb03rd with different pmax values until:
- result is non-defective;
- or condition number of similarity transform is unchanging despite large pmax;
- or condition number of similarity transform is close to condmax.
Parameters
----------
aschur: (n, n) array_like
real Schur-form matrix
tschur: (n, n) array
orthogonal transformation giving aschur from some initial matrix a
condmax: positive scalar >= 1
maximum condition number of final transformation
Returns
-------
amodal: n, n array
block diagonal Schur form
tmodal:
similarity transformation give amodal from aschur
blksizes:
Array of Schur block sizes
eigvals:
Eigenvalues of amodal (and a, etc.)
Notes
-----
Outputs as for slycot.mb03rd
aschur, tschur are as returned by scipy.linalg.schur.
"""
try:
from slycot import mb03rd
except ImportError:
raise ControlSlycot("can't find slycot module 'mb03rd'")
# see notes on RuntimeError below
pmaxlower = None
# get lower bound; try condmax ** 0.5 first
pmaxlower = condmax ** 0.5
amodal, tmodal, blksizes, eigvals = mb03rd(aschur.shape[0], aschur, tschur, pmax=pmaxlower)
if np.linalg.cond(tmodal) <= condmax:
reslower = amodal, tmodal, blksizes, eigvals
else:
pmaxlower = 1.0
amodal, tmodal, blksizes, eigvals = mb03rd(aschur.shape[0], aschur, tschur, pmax=pmaxlower)
cond = np.linalg.cond(tmodal)
if cond > condmax:
msg = 'minimum cond={} > condmax={}; try increasing condmax'.format(cond, condmax)
raise RuntimeError(msg)
pmax = pmaxlower
# phase 1: search for upper bound on pmax
for i in range(50):
amodal, tmodal, blksizes, eigvals = mb03rd(aschur.shape[0], aschur, tschur, pmax=pmax)
cond = np.linalg.cond(tmodal)
if cond < condmax:
pmaxlower = pmax
reslower = amodal, tmodal, blksizes, eigvals
else:
# upper bound found; go to phase 2
pmaxupper = pmax
break
if _bdschur_defective(blksizes, eigvals):
pmax *= 2
else:
return amodal, tmodal, blksizes, eigvals
else:
# no upper bound found; return current result
return reslower
# phase 2: bisection search
for i in range(50):
pmax = (pmaxlower * pmaxupper) ** 0.5
amodal, tmodal, blksizes, eigvals = mb03rd(aschur.shape[0], aschur, tschur, pmax=pmax)
cond = np.linalg.cond(tmodal)
if cond < condmax:
if not _bdschur_defective(blksizes, eigvals):
return amodal, tmodal, blksizes, eigvals
pmaxlower = pmax
reslower = amodal, tmodal, blksizes, eigvals
else:
pmaxupper = pmax
if pmaxupper / pmaxlower < _PMAX_SEARCH_TOL:
# hit search limit
return reslower
else:
raise ValueError('bisection failed to converge; pmaxlower={}, pmaxupper={}'.format(pmaxlower, pmaxupper))
|
54,976 |
def decompose_hamiltonian(H, hide_identity=False):
"""Decomposes a Hermitian matrix into a linear combination of Pauli operators.
Args:
H (array[complex]): an Hermitian matrix of dimension :math:`2^n\times 2^n`
Keyword Args:
hide_identity (bool): always show ~.Identity observables in the results
Returns:
tuple[list[float], list[~.Observable]]: Returns a list of tensor products of PennyLane Pauli observables, as
well as the corresponding coefficients for each tensor product.
**Example:**
We can use this function to compute the Pauli operator decomposition of an arbitrary Hermitian
matrix:
>>> A = np.array([[-2, -2+1j, -2, -2], [-2-1j, 0, 0, -1], [-2, 0, -2, -1], [-2, -1, -1, 0]])
>>> coeffs, obs_list = decompose_hamiltonian(A)
>>> coeffs
[-1.0, -1.5, -0.5, -1.0, -1.5, -1.0, -0.5, 1.0, -0.5, -0.5]
We can use the output coefficients and tensor Pauli terms to construct a :class:`~.Hamiltonian`:
>>> H = qml.Hamiltonian(coeffs, obs_list)
>>> print(H)
(-1.0) [I0 I1]
+ (-1.5) [X1]
+ (-0.5) [Y1]
+ (-1.0) [Z1]
+ (-1.5) [X0]
+ (-1.0) [X0 X1]
+ (-0.5) [X0 Z1]
+ (1.0) [Y0 Y1]
+ (-0.5) [Z0 X1]
+ (-0.5) [Z0 Y1]
This Hamiltonian can then be used in defining VQE problems using :class:`~VQECost`.
"""
n = int(np.log2(len(H)))
N = 2 ** n
if len(H) - N != 0:
raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1")
if not np.allclose(H, H.conj().T):
raise ValueError("The Hamiltonian is not Hermitian")
paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ]
obs = []
coeffs = []
for term in itertools.product(paulis, repeat=n):
matrices = [i._matrix() for i in term]
coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / N
coeff = np.real_if_close(coeff).item()
if not np.allclose(coeff, 0):
coeffs.append(coeff)
if not all(t is qml.Identity for t in term):
obs.append(
functools.reduce(
matmul,
[
t(i)
for i, t in enumerate(term)
if t is not qml.Identity or not hide_identity
],
)
)
else:
obs.append(functools.reduce(matmul, [t(i) for i, t in enumerate(term)]))
return coeffs, obs
|
def decompose_hamiltonian(H, hide_identity=False):
"""Decomposes a Hermitian matrix into a linear combination of Pauli operators.
Args:
H (array[complex]): an Hermitian matrix of dimension :math:`2^n\times 2^n`
Keyword Args:
hide_identity (bool): does not include the :class:`~.Identity` observable within
the tensor products of the decomposition if ``True``
Returns:
tuple[list[float], list[~.Observable]]: Returns a list of tensor products of PennyLane Pauli observables, as
well as the corresponding coefficients for each tensor product.
**Example:**
We can use this function to compute the Pauli operator decomposition of an arbitrary Hermitian
matrix:
>>> A = np.array([[-2, -2+1j, -2, -2], [-2-1j, 0, 0, -1], [-2, 0, -2, -1], [-2, -1, -1, 0]])
>>> coeffs, obs_list = decompose_hamiltonian(A)
>>> coeffs
[-1.0, -1.5, -0.5, -1.0, -1.5, -1.0, -0.5, 1.0, -0.5, -0.5]
We can use the output coefficients and tensor Pauli terms to construct a :class:`~.Hamiltonian`:
>>> H = qml.Hamiltonian(coeffs, obs_list)
>>> print(H)
(-1.0) [I0 I1]
+ (-1.5) [X1]
+ (-0.5) [Y1]
+ (-1.0) [Z1]
+ (-1.5) [X0]
+ (-1.0) [X0 X1]
+ (-0.5) [X0 Z1]
+ (1.0) [Y0 Y1]
+ (-0.5) [Z0 X1]
+ (-0.5) [Z0 Y1]
This Hamiltonian can then be used in defining VQE problems using :class:`~VQECost`.
"""
n = int(np.log2(len(H)))
N = 2 ** n
if len(H) - N != 0:
raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1")
if not np.allclose(H, H.conj().T):
raise ValueError("The Hamiltonian is not Hermitian")
paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ]
obs = []
coeffs = []
for term in itertools.product(paulis, repeat=n):
matrices = [i._matrix() for i in term]
coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / N
coeff = np.real_if_close(coeff).item()
if not np.allclose(coeff, 0):
coeffs.append(coeff)
if not all(t is qml.Identity for t in term):
obs.append(
functools.reduce(
matmul,
[
t(i)
for i, t in enumerate(term)
if t is not qml.Identity or not hide_identity
],
)
)
else:
obs.append(functools.reduce(matmul, [t(i) for i, t in enumerate(term)]))
return coeffs, obs
|
14,466 |
def report_outcome(matches: List["MatchError"], options) -> int:
"""Display information about how to skip found rules.
Returns exit code, 2 if erros were found, 0 when only warnings were found.
"""
failure = False
msg = """\
You can skip specific rules by adding them to the skip_list section of your configuration file:
```yaml
# .ansible-lint
warn_list: # or 'skip_list' to silence them completly
"""
matched_rules = {match.rule.id: match.rule.shortdesc for match in matches}
for id in sorted(matched_rules.keys()):
if id not in options.warn_list:
msg += f" - '{id}' # {matched_rules[id]}'\n"
failure = True
msg += "```"
if failure:
console.print(Markdown(msg))
return 2
else:
return 0
|
def report_outcome(matches: List["MatchError"], options) -> int:
"""Display information about how to skip found rules.
Returns exit code, 2 if erros were found, 0 when only warnings were found.
"""
failure = False
msg = """\
You can skip specific rules by adding them to the skip_list section of your configuration file:
```yaml
# .ansible-lint
warn_list: # or 'skip_list' to silence them completely
"""
matched_rules = {match.rule.id: match.rule.shortdesc for match in matches}
for id in sorted(matched_rules.keys()):
if id not in options.warn_list:
msg += f" - '{id}' # {matched_rules[id]}'\n"
failure = True
msg += "```"
if failure:
console.print(Markdown(msg))
return 2
else:
return 0
|
42,703 |
def _query_blockchain_info(accounts: list[BTCAddress]) -> dict[BTCAddress, FVal]:
if _have_bc1_accounts(accounts):
raise BlockchainNotSupported('Blockstream.info does not support Bitcoin Cash')
balances: dict[BTCAddress, FVal] = {}
accounts_chunks = [accounts[x:x + 80] for x in range(0, len(accounts), 80)]
for accounts_chunk in accounts_chunks:
params = '|'.join(accounts_chunk)
btc_resp = request_get_dict(
url=f'https://blockchain.info/multiaddr?active={params}',
handle_429=True,
# If we get a 429 then their docs suggest 10 seconds
# https://blockchain.info/q
backoff_in_seconds=10,
)
for entry in btc_resp['addresses']:
balances[entry['address']] = satoshis_to_btc(FVal(entry['final_balance']))
return balances
|
def _query_blockchain_info(accounts: list[BTCAddress]) -> dict[BTCAddress, FVal]:
if _have_bc1_accounts(accounts):
raise BlockchainNotSupported('blockchain.info does not support Bitcoin Cash')
balances: dict[BTCAddress, FVal] = {}
accounts_chunks = [accounts[x:x + 80] for x in range(0, len(accounts), 80)]
for accounts_chunk in accounts_chunks:
params = '|'.join(accounts_chunk)
btc_resp = request_get_dict(
url=f'https://blockchain.info/multiaddr?active={params}',
handle_429=True,
# If we get a 429 then their docs suggest 10 seconds
# https://blockchain.info/q
backoff_in_seconds=10,
)
for entry in btc_resp['addresses']:
balances[entry['address']] = satoshis_to_btc(FVal(entry['final_balance']))
return balances
|
47,956 |
def main():
args = build_argparser().parse_args()
log.info('Initializing Inference Engine...')
ie = IECore()
refine_config_plugin = get_plugin_configs(args.device_refine, args.refine_nstreams, args.refine_nthreads)
output_config_plugin = get_plugin_configs(args.device_output, args.output_nstreams, args.output_nthreads)
log.info('Loading network...')
model_proposal = models.ProposalModel(ie, args.model_proposal)
model_refine = models.RefineModel(ie, args.model_refine)
model_output = models.OutputModel(ie, args.model_output)
detector_pipeline = MtcnnPipeline(ie, model_proposal, model_refine, model_output,
pm_sync=not args.proposal_async,
pm_device=args.device_proposal,
rm_batch_size=args.refine_batch_size,
rm_config=refine_config_plugin,
rm_num_requests=args.refine_requests,
rm_device=args.device_refine,
om_batch_size=args.output_batch_size,
om_config=output_config_plugin,
om_num_requests=args.output_requests,
om_device=args.device_output)
cap = open_images_capture(args.input, args.loop)
log.info('Starting inference...')
print("Use 'c' key to disable/enable confidence drawing, 'l' to disable/enable landmarks drawing")
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
palette = ColorPalette(1)
metrics = PerformanceMetrics()
video_writer = cv2.VideoWriter()
draw_lanmdmark = True
draw_confidence = True
total_frames = 0
while True:
start_time = perf_counter()
frame = cap.read()
if not frame:
break
total_frames += 1
if total_frames == 1 :
presenter = monitors.Presenter(args.utilization_monitors, 55,
(round(frame.shape[1] / 4), round(frame.shape[0] / 8)))
if args.output:
video_writer = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(),
(frame.shape[1], frame.shape[0]))
if not video_writer.isOpened():
raise RuntimeError("Can't open video writer")
detections = detector_pipeline.infer(frame)
presenter.drawGraphs(frame)
draw_detections(frame, detections, palette, None, 0.5, draw_lanmdmark, draw_confidence)
metrics.update(start_time, frame)
if video_writer.isOpened() and (args.output_limit == -1 or total_frames <= args.output_limit - 1):
video_writer.write(frame)
if not args.no_show:
cv2.imshow('Detection Results', frame)
key = cv2.waitKey(1)
ESC_KEY = 27
# Quit.
if key in {ord('q'), ord('Q'), ESC_KEY}:
break
if key in {ord('l'), ord('L')}:
draw_lanmdmark = not draw_lanmdmark
if key in {ord('c'), ord('C')}:
draw_confidence = not draw_confidence
metrics.print_total()
|
def main():
args = build_argparser().parse_args()
log.info('Initializing Inference Engine...')
ie = IECore()
refine_config_plugin = get_plugin_configs(args.device_refine, args.refine_nstreams, args.refine_nthreads)
output_config_plugin = get_plugin_configs(args.device_output, args.output_nstreams, args.output_nthreads)
log.info('Loading network...')
model_proposal = models.ProposalModel(ie, args.model_proposal)
model_refine = models.RefineModel(ie, args.model_refine)
model_output = models.OutputModel(ie, args.model_output)
detector_pipeline = MtcnnPipeline(ie, model_proposal, model_refine, model_output,
pm_sync=not args.proposal_async,
pm_device=args.device_proposal,
rm_batch_size=args.refine_batch_size,
rm_config=refine_config_plugin,
rm_num_requests=args.refine_requests,
rm_device=args.device_refine,
om_batch_size=args.output_batch_size,
om_config=output_config_plugin,
om_num_requests=args.output_requests,
om_device=args.device_output)
cap = open_images_capture(args.input, args.loop)
log.info('Starting inference...')
print("Use 'c' key to disable/enable confidence drawing, 'l' to disable/enable landmarks drawing")
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
palette = ColorPalette(1)
metrics = PerformanceMetrics()
video_writer = cv2.VideoWriter()
draw_lanmdmark = True
draw_confidence = True
total_frames = 0
while True:
start_time = perf_counter()
frame = cap.read()
if frame is None:
break
total_frames += 1
if total_frames == 1 :
presenter = monitors.Presenter(args.utilization_monitors, 55,
(round(frame.shape[1] / 4), round(frame.shape[0] / 8)))
if args.output:
video_writer = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(),
(frame.shape[1], frame.shape[0]))
if not video_writer.isOpened():
raise RuntimeError("Can't open video writer")
detections = detector_pipeline.infer(frame)
presenter.drawGraphs(frame)
draw_detections(frame, detections, palette, None, 0.5, draw_lanmdmark, draw_confidence)
metrics.update(start_time, frame)
if video_writer.isOpened() and (args.output_limit == -1 or total_frames <= args.output_limit - 1):
video_writer.write(frame)
if not args.no_show:
cv2.imshow('Detection Results', frame)
key = cv2.waitKey(1)
ESC_KEY = 27
# Quit.
if key in {ord('q'), ord('Q'), ESC_KEY}:
break
if key in {ord('l'), ord('L')}:
draw_lanmdmark = not draw_lanmdmark
if key in {ord('c'), ord('C')}:
draw_confidence = not draw_confidence
metrics.print_total()
|
24,017 |
def _getKeyNames(keys: int) -> Set[int]:
return {_keyNames[1 << i] for i in range(16) if (1 << i) & keys}
|
def _getKeyNames(keys: int) -> Set[int]:
return {keyName for bitFlag, keyName in _keyNames.items() if bitFlag & keys}
|
57,588 |
def subparser(subparsers):
subparser = subparsers.add_parser('rename')
subparser.add_argument('sigfiles', nargs='+')
subparser.add_argument('name')
subparser.add_argument(
'-q', '--quiet', action='store_true',
help='suppress non-error output'
)
subparser.add_argument(
'-d', '--debug', action='store_true',
help='print debugging output'
)
subparser.add_argument(
'-o', '--output', metavar='FILE', help='output to this file',
default='-'
)
add_ksize_arg(subparser, 31)
add_moltype_args(subparser)
|
def subparser(subparsers):
subparser = subparsers.add_parser('rename')
subparser.add_argument('sigfiles', nargs='+')
subparser.add_argument('name')
subparser.add_argument(
'-q', '--quiet', action='store_true',
help='suppress non-error output'
)
subparser.add_argument(
'-d', '--debug', action='store_true',
help='print debugging output'
)
subparser.add_argument(
'-o', '--output', metavar='FILE',
help='output renamed signature to this file (default stdout)',
default='-'
)
add_ksize_arg(subparser, 31)
add_moltype_args(subparser)
|
45,797 |
def normalize(
data: torch.Tensor, mean: Union[torch.Tensor, float], std: Union[torch.Tensor, float]
) -> torch.Tensor:
r"""Normalize a tensor image with mean and standard deviation.
.. math::
\text{input[channel] = (input[channel] - mean[channel]) / std[channel]}
Where `mean` is :math:`(M_1, ..., M_n)` and `std` :math:`(S_1, ..., S_n)` for `n` channels,
Args:
data (torch.Tensor): Image tensor of size :math:`(*, C, ...)`.
mean (Union[torch.Tensor, Tuple[float], float]): Mean for each channel.
std (Union[torch.Tensor, Tuple[float], float]): Standard deviations for each channel.
Return:
torch.Tensor: Normalised tensor with same size as input :math:`(*, C, ...)`.
Examples:
>>> x = torch.rand(1, 4, 3, 3)
>>> out = normalize(x, 0.0, 255.)
>>> out.shape
torch.Size([1, 4, 3, 3])
>>> x = torch.rand(1, 4, 3, 3)
>>> mean = torch.zeros(1, 4)
>>> std = 255. * torch.ones(1, 4)
>>> out = normalize(x, mean, std)
>>> out.shape
torch.Size([1, 4, 3, 3])
"""
shape = data.shape
if isinstance(mean, float):
mean = torch.tensor([mean] * shape[1], device=data.device, dtype=data.dtype)
if isinstance(std, float):
std = torch.tensor([std] * shape[1], device=data.device, dtype=data.dtype)
if isinstance(mean, tuple):
assert len(mean) == len(shape)
mean = torch.tensor(mean, device=data.device, dtype=data.dtype)
if isinstance(std, tuple):
assert len(std) == len(shape)
std = torch.tensor(std, device=data.device, dtype=data.dtype)
if not isinstance(data, torch.Tensor):
raise TypeError("data should be a tensor. Got {}".format(type(data)))
if not isinstance(mean, torch.Tensor):
raise TypeError("mean should be a tensor or a float. Got {}".format(type(mean)))
if not isinstance(std, torch.Tensor):
raise TypeError("std should be a tensor or float. Got {}".format(type(std)))
# Allow broadcast on channel dimension
if mean.shape and mean.shape[0] != 1:
if mean.shape[0] != data.shape[-3] and mean.shape[:2] != data.shape[:2]:
raise ValueError(f"mean length and number of channels do not match. Got {mean.shape} and {data.shape}.")
# Allow broadcast on channel dimension
if std.shape and std.shape[0] != 1:
if std.shape[0] != data.shape[-3] and std.shape[:2] != data.shape[:2]:
raise ValueError(f"std length and number of channels do not match. Got {std.shape} and {data.shape}.")
mean = torch.as_tensor(mean, device=data.device, dtype=data.dtype)
std = torch.as_tensor(std, device=data.device, dtype=data.dtype)
if mean.shape:
mean = mean[..., :, None]
if std.shape:
std = std[..., :, None]
out: torch.Tensor = (data.view(shape[0], shape[1], -1) - mean) / std
return out.view(shape)
|
def normalize(
data: torch.Tensor, mean: Union[torch.Tensor, float], std: Union[torch.Tensor, float]
) -> torch.Tensor:
r"""Normalize a tensor image with mean and standard deviation.
.. math::
\text{input[channel] = (input[channel] - mean[channel]) / std[channel]}
Where `mean` is :math:`(M_1, ..., M_n)` and `std` :math:`(S_1, ..., S_n)` for `n` channels,
Args:
data (torch.Tensor): Image tensor of size :math:`(*, C, ...)`.
mean (Union[torch.Tensor, Tuple[float], float]): Mean for each channel.
std (Union[torch.Tensor, Tuple[float, ...], float]): Standard deviations for each channel.
Return:
torch.Tensor: Normalised tensor with same size as input :math:`(*, C, ...)`.
Examples:
>>> x = torch.rand(1, 4, 3, 3)
>>> out = normalize(x, 0.0, 255.)
>>> out.shape
torch.Size([1, 4, 3, 3])
>>> x = torch.rand(1, 4, 3, 3)
>>> mean = torch.zeros(1, 4)
>>> std = 255. * torch.ones(1, 4)
>>> out = normalize(x, mean, std)
>>> out.shape
torch.Size([1, 4, 3, 3])
"""
shape = data.shape
if isinstance(mean, float):
mean = torch.tensor([mean] * shape[1], device=data.device, dtype=data.dtype)
if isinstance(std, float):
std = torch.tensor([std] * shape[1], device=data.device, dtype=data.dtype)
if isinstance(mean, tuple):
assert len(mean) == len(shape)
mean = torch.tensor(mean, device=data.device, dtype=data.dtype)
if isinstance(std, tuple):
assert len(std) == len(shape)
std = torch.tensor(std, device=data.device, dtype=data.dtype)
if not isinstance(data, torch.Tensor):
raise TypeError("data should be a tensor. Got {}".format(type(data)))
if not isinstance(mean, torch.Tensor):
raise TypeError("mean should be a tensor or a float. Got {}".format(type(mean)))
if not isinstance(std, torch.Tensor):
raise TypeError("std should be a tensor or float. Got {}".format(type(std)))
# Allow broadcast on channel dimension
if mean.shape and mean.shape[0] != 1:
if mean.shape[0] != data.shape[-3] and mean.shape[:2] != data.shape[:2]:
raise ValueError(f"mean length and number of channels do not match. Got {mean.shape} and {data.shape}.")
# Allow broadcast on channel dimension
if std.shape and std.shape[0] != 1:
if std.shape[0] != data.shape[-3] and std.shape[:2] != data.shape[:2]:
raise ValueError(f"std length and number of channels do not match. Got {std.shape} and {data.shape}.")
mean = torch.as_tensor(mean, device=data.device, dtype=data.dtype)
std = torch.as_tensor(std, device=data.device, dtype=data.dtype)
if mean.shape:
mean = mean[..., :, None]
if std.shape:
std = std[..., :, None]
out: torch.Tensor = (data.view(shape[0], shape[1], -1) - mean) / std
return out.view(shape)
|
37,695 |
def idna_decode(name):
pieces = name.lower().split('.')
if any([p.startswith('xn--') for p in pieces]):
# it's idna
if name.startswith('*'):
# idna.decode doesn't like the *
return f'*.{_decode(name[2:])}'
return _decode(name)
# not idna, just return as-is
return name
|
def idna_decode(name):
pieces = name.lower().split('.')
if any(p.startswith('xn--') for p in pieces):
# it's idna
if name.startswith('*'):
# idna.decode doesn't like the *
return f'*.{_decode(name[2:])}'
return _decode(name)
# not idna, just return as-is
return name
|
2,878 |
def test_assert_ovr_roc_auc_chance_level():
# Build equal probability predictions to multiclass problem
y_true = np.array([3, 1, 2, 0])
y_pred = 0.25 * np.ones((4, 4))
macro_roc_auc = roc_auc_score(y_true, y_pred, multi_class="ovr", average="macro")
assert_allclose(macro_roc_auc, 0.5)
micro_roc_auc = roc_auc_score(y_true, y_pred, multi_class="ovr", average="micro")
assert_allclose(micro_roc_auc, 0.5)
|
def test_assert_ovr_roc_auc_chance_level():
# Build equal probability predictions to multiclass problem
y_true = np.array([3, 1, 2, 0])
y_pred = 0.25 * np.ones((4, 4))
macro_roc_auc = roc_auc_score(y_true, y_pred, multi_class="ovr", average="macro")
assert_allclose(macro_roc_auc, 0.5)
micro_roc_auc = roc_auc_score(y_true, y_pred, multi_class="ovr", average="micro")
assert micro_roc_auc == pytest.approx(0.5)
|
31,522 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# get the service API url
base_url = demisto.params()['url']
api_key = demisto.params()['apikey']
min_severity = demisto.params()['minSeverity'] # mandatory
alert_types = demisto.params()['categories'] # mandatory
show_only_active = demisto.params()['ShowOnlyActive'] # mandatory
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': 'Token {}'.format(api_key)
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'cyberpion-get-connections-from-domain':
return_results(get_domain_connections_command(client, demisto.args()))
elif demisto.command() == 'cyberpion-get-connections-to-domain':
return_results(get_domain_connections_command(client, demisto.args(), reverse=True))
elif demisto.command() == 'cyberpion-get-domain-state':
return_results(get_domain_state_command(client, demisto.args()))
elif demisto.command() == 'cyberpion-get-domain-action-items':
return_results(get_domain_action_items_command(client,
demisto.args(),
min_severity=min_severity,
alert_types=alert_types,
show_only_active=show_only_active))
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
max_fetch = demisto.params().get('maxFetch')
if not max_fetch:
max_fetch = DEFAULT_MAX_INCIDENTS_TO_FETCH
try:
max_fetch = int(max_fetch)
except ValueError:
raise ValueError('max_fetch must be an int')
if max_fetch > DEFAULT_MAX_INCIDENTS_TO_FETCH:
max_fetch = DEFAULT_MAX_INCIDENTS_TO_FETCH
new_last_run_dict, incidents = fetch_incidents(
client=client,
max_fetch=max_fetch,
min_severity=min_severity,
show_only_active=show_only_active,
alert_types=alert_types
)
# create incidents
demisto.incidents(incidents)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(new_last_run_dict)
else:
raise ValueError(f'no such command: {demisto.command()}')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Cyberpion integration: Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# get the service API url
base_url = demisto.params()['url']
api_key = demisto.params()['apikey']
min_severity = demisto.params()['minSeverity'] # mandatory
alert_types = demisto.params()['categories'] # mandatory
show_only_active = demisto.params()['ShowOnlyActive'] # mandatory
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': 'Token {}'.format(api_key)
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'cyberpion-get-connections-from-domain':
return_results(get_domain_connections_command(client, demisto.args()))
elif demisto.command() == 'cyberpion-get-connections-to-domain':
return_results(get_domain_connections_command(client, demisto.args(), reverse=True))
elif demisto.command() == 'cyberpion-get-domain-state':
return_results(get_domain_state_command(client, demisto.args()))
elif demisto.command() == 'cyberpion-get-domain-action-items':
return_results(get_domain_action_items_command(client,
demisto.args(),
min_severity=min_severity,
alert_types=alert_types,
show_only_active=show_only_active))
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
max_fetch = demisto.params().get('maxFetch')
if not max_fetch:
max_fetch = DEFAULT_MAX_INCIDENTS_TO_FETCH
try:
max_fetch = int(max_fetch)
except ValueError:
raise ValueError('max_fetch must be an int')
if max_fetch > DEFAULT_MAX_INCIDENTS_TO_FETCH:
max_fetch = DEFAULT_MAX_INCIDENTS_TO_FETCH
new_last_run_dict, incidents = fetch_incidents(
client=client,
max_fetch=max_fetch,
min_severity=min_severity,
show_only_active=show_only_active,
alert_types=alert_types
)
# create incidents
demisto.incidents(incidents)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(new_last_run_dict)
else:
raise NotImplemented(f'no such command: {demisto.command()}')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Cyberpion integration: Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
7,171 |
def equalize_hist(image, nbins=256, mask=None, source_range='image'):
"""Return image after histogram equalization.
Parameters
----------
image : array
Image array.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays, for which each integer is its own bin.
mask: ndarray of bools or 0s and 1s, optional
Array of same shape as `image`. Only points at which mask == True
are used for the equalization, which is applied to the whole image.
source_range : string, optional
'image' (default) determines the range from the input image.
'dtype' determines the range from the expected range of the images
of that data type.
Returns
-------
out : float array
Image array after histogram equalization.
Notes
-----
This function is adapted from [1]_ with the author's permission.
References
----------
.. [1] http://www.janeriksolem.net/histogram-equalization-with-python-and.html
.. [2] https://en.wikipedia.org/wiki/Histogram_equalization
"""
if mask is not None:
mask = np.array(mask, dtype=bool)
cdf, bin_centers = cumulative_distribution(image[mask], nbins,
source_range=source_range)
else:
cdf, bin_centers = cumulative_distribution(image, nbins,
source_range=source_range)
out = np.interp(image.flat, bin_centers, cdf)
return out.reshape(image.shape)
|
def equalize_hist(image, nbins=256, mask=None, *, source_range='image'):
"""Return image after histogram equalization.
Parameters
----------
image : array
Image array.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays, for which each integer is its own bin.
mask: ndarray of bools or 0s and 1s, optional
Array of same shape as `image`. Only points at which mask == True
are used for the equalization, which is applied to the whole image.
source_range : string, optional
'image' (default) determines the range from the input image.
'dtype' determines the range from the expected range of the images
of that data type.
Returns
-------
out : float array
Image array after histogram equalization.
Notes
-----
This function is adapted from [1]_ with the author's permission.
References
----------
.. [1] http://www.janeriksolem.net/histogram-equalization-with-python-and.html
.. [2] https://en.wikipedia.org/wiki/Histogram_equalization
"""
if mask is not None:
mask = np.array(mask, dtype=bool)
cdf, bin_centers = cumulative_distribution(image[mask], nbins,
source_range=source_range)
else:
cdf, bin_centers = cumulative_distribution(image, nbins,
source_range=source_range)
out = np.interp(image.flat, bin_centers, cdf)
return out.reshape(image.shape)
|
48,661 |
def load_font(prefix, ttf_filename, charmap_filename, directory=None):
"""
Loads a font file and the associated charmap.
If ``directory`` the files will be looked for in the qtawesome ``fonts``
directory.
Parameters
----------
prefix: str
Prefix string to be used when accessing a given font set
ttf_filename: str
Ttf font filename
charmap_filename: str
Character map filename
directory: str or None, optional
Directory path for font and charmap files
Example
-------
If you want to load a font ``myicon.tff`` with a ``myicon-charmap.json``
charmap added to the qtawesome ``fonts`` directory (usually located at
``</path/to/lib/python>/site-packages/qtawesome/fonts/``) you can use::
qta.load_font(
'myicon',
'myicon.ttf',
'myicon-charmap.json'
)
However, if you want to load a font ``myicon.tff`` with a
``myicon-charmap.json`` charmap located in a specific path outside the
qtawesome ``font`` directory like for example ``/path/to/myproject/fonts``
you can use::
qta.load_font(
'myicon',
'myicon.ttf',
'myicon-charmap.json',
directory='/path/to/myproject/fonts'
)
"""
return _instance().load_font(prefix, ttf_filename, charmap_filename, directory)
|
def load_font(prefix, ttf_filename, charmap_filename, directory=None):
"""
Loads a font file and the associated charmap.
If ``directory`` is passed, the files will be looked for in the qtawesome
``fonts`` directory.
Parameters
----------
prefix: str
Prefix string to be used when accessing a given font set
ttf_filename: str
Ttf font filename
charmap_filename: str
Character map filename
directory: str or None, optional
Directory path for font and charmap files
Example
-------
If you want to load a font ``myicon.tff`` with a ``myicon-charmap.json``
charmap added to the qtawesome ``fonts`` directory (usually located at
``</path/to/lib/python>/site-packages/qtawesome/fonts/``) you can use::
qta.load_font(
'myicon',
'myicon.ttf',
'myicon-charmap.json'
)
However, if you want to load a font ``myicon.tff`` with a
``myicon-charmap.json`` charmap located in a specific path outside the
qtawesome ``font`` directory like for example ``/path/to/myproject/fonts``
you can use::
qta.load_font(
'myicon',
'myicon.ttf',
'myicon-charmap.json',
directory='/path/to/myproject/fonts'
)
"""
return _instance().load_font(prefix, ttf_filename, charmap_filename, directory)
|
13,587 |
def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
Randomized QB decomposition.
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
32,296 |
def check_security_zones(topology: Topology, device_filter_string: str = None) -> ConfigurationHygieneCheckResult:
"""
Check configured security zones have correct settings.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
return HygieneLookups.check_security_zones(topology, device_filter_str=device_filter_string)
|
def check_security_zones(topology: Topology, device_filter_string: Optional[str] = None) -> ConfigurationHygieneCheckResult:
"""
Check configured security zones have correct settings.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
return HygieneLookups.check_security_zones(topology, device_filter_str=device_filter_string)
|
8,811 |
def test_isupport_getattr():
"""Test using ISUPPORT parameter as read-only attributes."""
instance = isupport.ISupport(awaylen=50)
assert hasattr(instance, 'AWAYLEN')
assert not hasattr(instance, 'awaylen'), 'attributes are ALL_UPPERCASE'
assert not hasattr(instance, 'UNKNOWN')
assert instance.AWAYLEN == 50
# you can't set attributes yourself
with pytest.raises(AttributeError):
instance.AWAYLEN = 20
with pytest.raises(AttributeError):
instance.awaylen = 20
with pytest.raises(AttributeError):
instance.UNKNOWN = 'not possible'
|
def test_isupport_getattr():
"""Test using ISUPPORT parameters as read-only attributes."""
instance = isupport.ISupport(awaylen=50)
assert hasattr(instance, 'AWAYLEN')
assert not hasattr(instance, 'awaylen'), 'attributes are ALL_UPPERCASE'
assert not hasattr(instance, 'UNKNOWN')
assert instance.AWAYLEN == 50
# you can't set attributes yourself
with pytest.raises(AttributeError):
instance.AWAYLEN = 20
with pytest.raises(AttributeError):
instance.awaylen = 20
with pytest.raises(AttributeError):
instance.UNKNOWN = 'not possible'
|
22,220 |
def fill_template(template_text,
context=None,
retry=10,
compiler_class=Compiler,
first_exception=None,
futurized=False,
**kwargs):
"""Fill a cheetah template out for specified context.
If template_text is None, an exception will be thrown, if context
is None (the default) - keyword arguments to this function will be used
as the context.
"""
if template_text is None:
raise TypeError("Template text specified as None to fill_template.")
if not context:
context = kwargs
klass = Template.compile(source=template_text, compilerClass=compiler_class)
t = klass(searchList=[context])
try:
return unicodify(t)
except NotFound as e:
if first_exception is None:
first_exception = e
if retry > 0 and sys.version_info.major > 2:
tb = e.__traceback__
last_stack = traceback.extract_tb(tb)[-1]
if last_stack.name == '<listcomp>':
# On python 3 list,dict and set comprehensions as well as generator expressions
# have their own local scope, which prevents accessing frame variables in cheetah.
# We can work around this by replacing `$var` with `var`
var_not_found = e.args[0].split("'")[1]
replace_str = 'VFFSL(SL,"%s",True)' % var_not_found
lineno = last_stack.lineno - 1
module_code = t._CHEETAH_generatedModuleCode.splitlines()
module_code[lineno] = module_code[lineno].replace(replace_str, var_not_found)
module_code = "\n".join(module_code)
compiler_class = create_compiler_class(module_code)
return fill_template(template_text=template_text,
context=context,
retry=retry - 1,
compiler_class=compiler_class,
first_exception=first_exception
)
raise first_exception or e
except Exception as e:
if first_exception is None:
first_exception = e
if not futurized:
# Possibly an error caused by attempting to run python 2
# template code on python 3. Run the generated module code
# through futurize and hope for the best.
module_code = t._CHEETAH_generatedModuleCode
module_code = futurize_preprocessor(module_code)
compiler_class = create_compiler_class(module_code)
return fill_template(template_text=template_text,
context=context,
retry=retry,
compiler_class=compiler_class,
first_exception=first_exception,
futurized=True
)
raise first_exception or e
|
def fill_template(template_text,
context=None,
retry=10,
compiler_class=Compiler,
first_exception=None,
futurized=False,
**kwargs):
"""Fill a cheetah template out for specified context.
If template_text is None, an exception will be thrown, if context
is None (the default) - keyword arguments to this function will be used
as the context.
"""
if template_text is None:
raise TypeError("Template text specified as None to fill_template.")
if not context:
context = kwargs
klass = Template.compile(source=template_text, compilerClass=compiler_class)
t = klass(searchList=[context])
try:
return unicodify(t)
except NotFound as e:
if first_exception is None:
first_exception = e
if retry > 0 and sys.version_info.major > 2:
tb = e.__traceback__
last_stack = traceback.extract_tb(tb)[-1]
if last_stack.name == '<listcomp>':
# On python 3 list,dict and set comprehensions as well as generator expressions
# have their own local scope, which prevents accessing frame variables in cheetah.
# We can work around this by replacing `$var` with `var`
var_not_found = e.args[0].split("'")[1]
replace_str = 'VFFSL(SL,"%s",True)' % var_not_found
lineno = last_stack.lineno - 1
module_code = t._CHEETAH_generatedModuleCode.splitlines()
module_code[lineno] = module_code[lineno].replace(replace_str, var_not_found)
module_code = "\n".join(module_code)
compiler_class = create_compiler_class(module_code)
return fill_template(template_text=template_text,
context=context,
retry=retry - 1,
compiler_class=compiler_class,
first_exception=first_exception
)
raise first_exception or e
except Exception as e:
if first_exception is None:
first_exception = e
if not futurized and sys.version_info.major > 2:
# Possibly an error caused by attempting to run python 2
# template code on python 3. Run the generated module code
# through futurize and hope for the best.
module_code = t._CHEETAH_generatedModuleCode
module_code = futurize_preprocessor(module_code)
compiler_class = create_compiler_class(module_code)
return fill_template(template_text=template_text,
context=context,
retry=retry,
compiler_class=compiler_class,
first_exception=first_exception,
futurized=True
)
raise first_exception or e
|
58,025 |
def main():
params = demisto.params() # pragma: no cover
filters: Dict[str, Optional[Union[str, list]]] = build_feed_filters(params) # pragma: no cover
indicators_type: list = argToList(params.get('indicator_type', [])) # pragma: no cover
params['feed_name_to_config'] = create_fetch_configuration(indicators_type, filters, params) # pragma: no cover
PACK_VERSION = get_pack_version() # pragma: no cover
DEMISTO_VERSION = demisto.demistoVersion() # pragma: no cover
DEMISTO_VERSION = f'{DEMISTO_VERSION["version"]}.{DEMISTO_VERSION["buildNumber"]}' # pragma: no cover
params['headers'] = {"Content-Type": "application/json", # pragma: no cover
'auth-token': params.get('api_token').get("password"), # pragma: no cover
'User-Agent': f'AccentureCTI Pack/{PACK_VERSION} Palo Alto XSOAR/{DEMISTO_VERSION}'} # pragma: no cover
feed_main(params, 'ACTI Indicator Feed', 'acti') # pragma: no cover
|
def main(): # pragma: no cover
params = demisto.params()
filters: Dict[str, Optional[Union[str, list]]] = build_feed_filters(params)
indicators_type: list = argToList(params.get('indicator_type', []))
params['feed_name_to_config'] = create_fetch_configuration(indicators_type, filters, params)
PACK_VERSION = get_pack_version()
DEMISTO_VERSION = demisto.demistoVersion()
DEMISTO_VERSION = f'{DEMISTO_VERSION["version"]}.{DEMISTO_VERSION["buildNumber"]}'
params['headers'] = {"Content-Type": "application/json",
'auth-token': params.get('api_token').get("password"),
'User-Agent': f'AccentureCTI Pack/{PACK_VERSION} Palo Alto XSOAR/{DEMISTO_VERSION}'}
feed_main(params, 'ACTI Indicator Feed', 'acti')
|
42,654 |
def trade_from_conversion(trade_a: Dict[str, Any], trade_b: Dict[str, Any]) -> Optional[Trade]:
"""Turn information from a conversion into a trade
Mary raise:
- UnknownAsset due to Asset instantiation
- DeserializationError due to unexpected format of dict entries
- KeyError due to dict entires missing an expected entry
"""
# Check that the status is complete
if trade_a['status'] != 'completed':
return None
# Trade b will represent the asset we are converting to
if trade_b['amount']['amount'].startswith('-'):
trade_a, trade_b = trade_b, trade_a
timestamp = deserialize_timestamp_from_date(trade_a['updated_at'], 'iso8601', 'coinbase')
tx_amount = AssetAmount(abs(deserialize_asset_amount(trade_a['amount']['amount'])))
tx_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp)
native_amount = deserialize_asset_amount(trade_b['amount']['amount'])
native_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp)
amount = tx_amount
# The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency
rate = Price(native_amount / tx_amount)
# Obtain fee amount in the native currency using data from both trades
amount_after_fee = deserialize_asset_amount(trade_b['native_amount']['amount'])
amount_before_fee = deserialize_asset_amount(trade_a['native_amount']['amount'])
# amount_after_fee + amount_before_fee is a negative amount and the fee needs to be positive
conversion_native_fee_amount = abs(amount_after_fee + amount_before_fee)
if ZERO not in (tx_amount, conversion_native_fee_amount, amount_before_fee):
# To get the asset in wich the fee is nominated we pay attention to the creation
# date of each event. As per hour hypothesis the fee is nominated in the asset
# for wich the first transaction part was intialized
time_created_a = deserialize_timestamp_from_date(
date=trade_a['created_at'],
formatstr='iso8601',
location='coinbase',
)
time_created_b = deserialize_timestamp_from_date(
date=trade_b['created_at'],
formatstr='iso8601',
location='coinbase',
)
if time_created_a < time_created_b:
# We have the fee amount in the native currency. To get it in the
# converted asset we have to get the rate
asset_native_rate = tx_amount / abs(amount_before_fee)
fee_amount = Fee(conversion_native_fee_amount * asset_native_rate)
fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp)
else:
trade_b_amount = abs(deserialize_asset_amount(trade_b['amount']['amount']))
asset_native_rate = trade_b_amount / abs(amount_after_fee)
fee_amount = Fee(conversion_native_fee_amount * asset_native_rate)
fee_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp)
else:
fee_amount = Fee(ZERO)
fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp)
return Trade(
timestamp=timestamp,
location=Location.COINBASE,
# in coinbase you are buying/selling tx_asset for native_asset
base_asset=tx_asset,
quote_asset=native_asset,
trade_type=TradeType.SELL,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(trade_a['trade']['id']),
)
|
def trade_from_conversion(trade_a: Dict[str, Any], trade_b: Dict[str, Any]) -> Optional[Trade]:
"""Turn information from a conversion into a trade
Mary raise:
- UnknownAsset due to Asset instantiation
- DeserializationError due to unexpected format of dict entries
- KeyError due to dict entires missing an expected entry
"""
# Check that the status is complete
if trade_a['status'] != 'completed':
return None
# Trade b will represent the asset we are converting to
if trade_b['amount']['amount'].startswith('-'):
trade_a, trade_b = trade_b, trade_a
timestamp = deserialize_timestamp_from_date(trade_a['updated_at'], 'iso8601', 'coinbase')
tx_amount = AssetAmount(abs(deserialize_asset_amount(trade_a['amount']['amount'])))
tx_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp)
native_amount = deserialize_asset_amount(trade_b['amount']['amount'])
native_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp)
amount = tx_amount
# The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency
rate = Price(native_amount / tx_amount)
# Obtain fee amount in the native currency using data from both trades
amount_after_fee = deserialize_asset_amount(trade_b['native_amount']['amount'])
amount_before_fee = deserialize_asset_amount(trade_a['native_amount']['amount'])
# amount_after_fee + amount_before_fee is a negative amount and the fee needs to be positive
conversion_native_fee_amount = abs(amount_after_fee + amount_before_fee)
if ZERO not in (tx_amount, conversion_native_fee_amount, amount_before_fee):
# To get the asset in wich the fee is nominated we pay attention to the creation
# date of each event. As per our hypothesis the fee is nominated in the asset
# for wich the first transaction part was intialized
time_created_a = deserialize_timestamp_from_date(
date=trade_a['created_at'],
formatstr='iso8601',
location='coinbase',
)
time_created_b = deserialize_timestamp_from_date(
date=trade_b['created_at'],
formatstr='iso8601',
location='coinbase',
)
if time_created_a < time_created_b:
# We have the fee amount in the native currency. To get it in the
# converted asset we have to get the rate
asset_native_rate = tx_amount / abs(amount_before_fee)
fee_amount = Fee(conversion_native_fee_amount * asset_native_rate)
fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp)
else:
trade_b_amount = abs(deserialize_asset_amount(trade_b['amount']['amount']))
asset_native_rate = trade_b_amount / abs(amount_after_fee)
fee_amount = Fee(conversion_native_fee_amount * asset_native_rate)
fee_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp)
else:
fee_amount = Fee(ZERO)
fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp)
return Trade(
timestamp=timestamp,
location=Location.COINBASE,
# in coinbase you are buying/selling tx_asset for native_asset
base_asset=tx_asset,
quote_asset=native_asset,
trade_type=TradeType.SELL,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(trade_a['trade']['id']),
)
|
4,563 |
def plot_surf(surf_mesh, surf_map=None, bg_map=None,
hemi='left', view='lateral', cmap=None, colorbar=False,
avg_method='mean', threshold=None, alpha='auto',
bg_on_data=False, darkness=1, vmin=None, vmax=None,
cbar_vmin=None, cbar_vmax=None, cbar_tick_format='%.2g',
title=None, output_file=None, axes=None, figure=None, **kwargs):
"""Plotting of surfaces with optional background and data
.. versionadded:: 0.3
Parameters
----------
surf_mesh : str or list of two numpy.ndarray or Mesh
Surface mesh geometry, can be a file (valid formats are
.gii or Freesurfer specific files such as .orig, .pial,
.sphere, .white, .inflated) or
a list of two Numpy arrays, the first containing the x-y-z coordinates
of the mesh vertices, the second containing the indices
(into coords) of the mesh faces, or a Mesh object with
"coordinates" and "faces" attributes.
surf_map : str or numpy.ndarray, optional
Data to be displayed on the surface mesh. Can be a file (valid formats
are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific files such as
.thickness, .curv, .sulc, .annot, .label) or
a Numpy array with a value for each vertex of the surf_mesh.
bg_map : Surface data object (to be defined), optional
Background image to be plotted on the mesh underneath the
surf_data in greyscale, most likely a sulcal depth map for
realistic shading.
hemi : {'left', 'right'}, optional
Hemisphere to display. Default='left'.
view : {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'}, optional
View of the surface that is rendered. Default='lateral'.
cmap : matplotlib colormap, str or colormap object, optional
To use for plotting of the stat_map. Either a string
which is a name of a matplotlib colormap, or a matplotlib
colormap object. If None, matplotlib default will be chosen.
colorbar : bool, optional
If True, a colorbar of surf_map is displayed. Default=False.
avg_method : {'mean', 'median', 'min', 'max', custom function}, optional
How to average vertex values to derive the face value,
mean results in smooth, median in sharp boundaries,
min or max for sparse matrices.
You can also pass a custom function which will be
executed though `numpy.apply_along_axis`.
Here is an example of a custom function:
.. code-block:: python
def custom_function(vertices):
return vertices[0] * vertices[1] * vertices[2].
Default='mean'.
threshold : a number or None, default is None.
If None is given, the image is not thresholded.
If a number is given, it is used to threshold the image, values
below the threshold (in absolute value) are plotted as transparent.
alpha : float or 'auto', optional
Alpha level of the mesh (not surf_data).
If 'auto' is chosen, alpha will default to .5 when no bg_map
is passed and to 1 if a bg_map is passed.
Default='auto'.
bg_on_data : bool, optional
If True, and a bg_map is specified, the surf_data data is multiplied
by the background image, so that e.g. sulcal depth is visible beneath
the surf_data.
NOTE: that this non-uniformly changes the surf_data values according
to e.g the sulcal depth.
Default=False.
darkness : float between 0 and 1, optional
Specifying the darkness of the background image.
1 indicates that the original values of the background are used.
.5 indicates the background values are reduced by half before being
applied. Default=1.
vmin, vmax : float, float, optional
Lower / upper bound to plot surf_data values.
If None, the values will be set to min/max of the data.
Default values are None.
cbar_vmin, cbar_vmax : float, float, optional
Lower / upper bounds for the colorbar.
If None, the values will be set from the data.
Default values are None.
cbar_tick_format : str, optional
Controls how to format the tick labels of the colorbar.
Ex: use "%i" to display as integers.
Default='%.2g' for scientific notation.
title : str, optional
Figure title.
output_file : str, or None, optional
The name of an image file to export plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
axes : instance of matplotlib axes, None, optional
The axes instance to plot to. The projection must be '3d' (e.g.,
`figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,
where axes should be passed.).
If None, a new axes is created.
figure : instance of matplotlib figure, None, optional
The figure instance to plot to. If None, a new figure is created.
See Also
--------
nilearn.datasets.fetch_surf_fsaverage : For surface data object to be
used as background map for this plotting function.
nilearn.plotting.plot_surf_roi : For plotting statistical maps on brain
surfaces.
nilearn.plotting.plot_surf_stat_map : for plotting statistical maps on
brain surfaces.
nilearn.surface.vol_to_surf : For info on the generation of surfaces.
"""
_default_figsize = [6, 4]
# load mesh and derive axes limits
mesh = load_surf_mesh(surf_mesh)
coords, faces = mesh[0], mesh[1]
limits = [coords.min(), coords.max()]
# set view
if hemi == 'right':
if view == 'lateral':
elev, azim = 0, 0
elif view == 'medial':
elev, azim = 0, 180
elif view == 'dorsal':
elev, azim = 90, 0
elif view == 'ventral':
elev, azim = 270, 0
elif view == 'anterior':
elev, azim = 0, 90
elif view == 'posterior':
elev, azim = 0, 270
else:
raise ValueError('view must be one of lateral, medial, '
'dorsal, ventral, anterior, or posterior')
elif hemi == 'left':
if view == 'medial':
elev, azim = 0, 0
elif view == 'lateral':
elev, azim = 0, 180
elif view == 'dorsal':
elev, azim = 90, 0
elif view == 'ventral':
elev, azim = 270, 0
elif view == 'anterior':
elev, azim = 0, 90
elif view == 'posterior':
elev, azim = 0, 270
else:
raise ValueError('view must be one of lateral, medial, '
'dorsal, ventral, anterior, or posterior')
else:
raise ValueError('hemi must be one of right or left')
# set alpha if in auto mode
if alpha == 'auto':
if bg_map is None:
alpha = .5
else:
alpha = 1
# if no cmap is given, set to matplotlib default
if cmap is None:
cmap = plt.cm.get_cmap(plt.rcParamsDefault['image.cmap'])
else:
# if cmap is given as string, translate to matplotlib cmap
if isinstance(cmap, str):
cmap = plt.cm.get_cmap(cmap)
figsize = _default_figsize
# Leave space for colorbar
if colorbar:
figsize[0] += .7
# initiate figure and 3d axes
if axes is None:
if figure is None:
figure = plt.figure(figsize=figsize)
axes = Axes3D(figure, rect=[0, 0, 1, 1],
xlim=limits, ylim=limits)
else:
if figure is None:
figure = axes.get_figure()
figure.set_size_inches(*figsize)
axes.set_xlim(*limits)
axes.set_ylim(*limits)
axes.view_init(elev=elev, azim=azim)
axes.set_axis_off()
# plot mesh without data
p3dcollec = axes.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=faces, linewidth=0.,
antialiased=False,
color='white')
# reduce viewing distance to remove space around mesh
axes.dist = 8
# set_facecolors function of Poly3DCollection is used as passing the
# facecolors argument to plot_trisurf does not seem to work
face_colors = np.ones((faces.shape[0], 4))
if bg_map is None:
bg_data = np.ones(coords.shape[0]) * 0.5
else:
bg_data = load_surf_data(bg_map)
if bg_data.shape[0] != coords.shape[0]:
raise ValueError('The bg_map does not have the same number '
'of vertices as the mesh.')
bg_faces = np.mean(bg_data[faces], axis=1)
if bg_faces.min() != bg_faces.max():
bg_faces = bg_faces - bg_faces.min()
bg_faces = bg_faces / bg_faces.max()
# control background darkness
bg_faces *= darkness
face_colors = plt.cm.gray_r(bg_faces)
# modify alpha values of background
face_colors[:, 3] = alpha * face_colors[:, 3]
# should it be possible to modify alpha of surf data as well?
if surf_map is not None:
surf_map_data = load_surf_data(surf_map)
if surf_map_data.ndim != 1:
raise ValueError('surf_map can only have one dimension but has'
'%i dimensions' % surf_map_data.ndim)
if surf_map_data.shape[0] != coords.shape[0]:
raise ValueError('The surf_map does not have the same number '
'of vertices as the mesh.')
# create face values from vertex values by selected avg methods
if avg_method == 'mean':
surf_map_faces = np.mean(surf_map_data[faces], axis=1)
elif avg_method == 'median':
surf_map_faces = np.median(surf_map_data[faces], axis=1)
elif avg_method == 'min':
surf_map_faces = np.min(surf_map_data[faces], axis=1)
elif avg_method == 'max':
surf_map_faces = np.max(surf_map_data[faces], axis=1)
elif callable(avg_method):
surf_map_faces = np.apply_along_axis(
avg_method, 1, surf_map_data[faces]
)
## check that surf_map_faces has the same length as face_colors
if surf_map_faces.shape != (face_colors.shape[0],):
raise ValueError(
'Array computed with the custom function '
'from avg_method does not have the correct shape: '
'{} != {}'.format(
surf_map_faces.shape[0],
face_colors.shape[0]
)
)
## check that dtype is either int or float
if not (
"int" in str(surf_map_faces.dtype) or
"float" in str(surf_map_faces.dtype)
):
raise ValueError(
'Array computed with the custom function '
'from avg_method should be an array of numbers '
'(int or float)'
)
else:
raise ValueError(
"avg_method should be either "
"['mean', 'median', 'max', 'min'] "
"or a custom function"
)
# if no vmin/vmax are passed figure them out from data
if vmin is None:
vmin = np.nanmin(surf_map_faces)
if vmax is None:
vmax = np.nanmax(surf_map_faces)
# treshold if indicated
if threshold is None:
# If no thresholding and nans, filter them out
kept_indices = np.where(
np.logical_not(
np.isnan(surf_map_faces)))[0]
else:
kept_indices = np.where(np.abs(surf_map_faces) >= threshold)[0]
surf_map_faces = surf_map_faces - vmin
surf_map_faces = surf_map_faces / (vmax - vmin)
# multiply data with background if indicated
if bg_on_data:
face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\
* face_colors[kept_indices]
else:
face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])
if colorbar:
our_cmap = get_cmap(cmap)
norm = Normalize(vmin=vmin, vmax=vmax)
# Default number of ticks is 5...
nb_ticks = 5
# ...unless we are dealing with integers with a small range
# in this case, we reduce the number of ticks
if cbar_tick_format == "%i" and vmax - vmin < nb_ticks:
ticks = np.arange(vmin, vmax + 1)
nb_ticks = len(ticks)
else:
ticks = np.linspace(vmin, vmax, nb_ticks)
bounds = np.linspace(vmin, vmax, our_cmap.N)
if threshold is not None:
cmaplist = [our_cmap(i) for i in range(our_cmap.N)]
# set colors to grey for absolute values < threshold
istart = int(norm(-threshold, clip=True) * (our_cmap.N - 1))
istop = int(norm(threshold, clip=True) * (our_cmap.N - 1))
for i in range(istart, istop):
cmaplist[i] = (0.5, 0.5, 0.5, 1.)
our_cmap = LinearSegmentedColormap.from_list(
'Custom cmap', cmaplist, our_cmap.N)
# we need to create a proxy mappable
proxy_mappable = ScalarMappable(cmap=our_cmap, norm=norm)
proxy_mappable.set_array(surf_map_faces)
cax, kw = make_axes(axes, location='right', fraction=.1,
shrink=.6, pad=.0)
cbar = figure.colorbar(
proxy_mappable, cax=cax, ticks=ticks,
boundaries=bounds, spacing='proportional',
format=cbar_tick_format, orientation='vertical')
_crop_colorbar(cbar, cbar_vmin, cbar_vmax)
p3dcollec.set_facecolors(face_colors)
if title is not None:
axes.set_title(title, position=(.5, .95))
# save figure if output file is given
if output_file is not None:
figure.savefig(output_file)
plt.close(figure)
else:
return figure
|
def plot_surf(surf_mesh, surf_map=None, bg_map=None,
hemi='left', view='lateral', cmap=None, colorbar=False,
avg_method='mean', threshold=None, alpha='auto',
bg_on_data=False, darkness=1, vmin=None, vmax=None,
cbar_vmin=None, cbar_vmax=None, cbar_tick_format='%.2g',
title=None, output_file=None, axes=None, figure=None, **kwargs):
"""Plotting of surfaces with optional background and data
.. versionadded:: 0.3
Parameters
----------
surf_mesh : str or list of two numpy.ndarray or Mesh
Surface mesh geometry, can be a file (valid formats are
.gii or Freesurfer specific files such as .orig, .pial,
.sphere, .white, .inflated) or
a list of two Numpy arrays, the first containing the x-y-z coordinates
of the mesh vertices, the second containing the indices
(into coords) of the mesh faces, or a Mesh object with
"coordinates" and "faces" attributes.
surf_map : str or numpy.ndarray, optional
Data to be displayed on the surface mesh. Can be a file (valid formats
are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific files such as
.thickness, .curv, .sulc, .annot, .label) or
a Numpy array with a value for each vertex of the surf_mesh.
bg_map : Surface data object (to be defined), optional
Background image to be plotted on the mesh underneath the
surf_data in greyscale, most likely a sulcal depth map for
realistic shading.
hemi : {'left', 'right'}, optional
Hemisphere to display. Default='left'.
view : {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'}, optional
View of the surface that is rendered. Default='lateral'.
cmap : matplotlib colormap, str or colormap object, optional
To use for plotting of the stat_map. Either a string
which is a name of a matplotlib colormap, or a matplotlib
colormap object. If None, matplotlib default will be chosen.
colorbar : bool, optional
If True, a colorbar of surf_map is displayed. Default=False.
avg_method : {'mean', 'median', 'min', 'max', custom function}, optional
How to average vertex values to derive the face value,
mean results in smooth, median in sharp boundaries,
min or max for sparse matrices.
You can also pass a custom function which will be
executed though `numpy.apply_along_axis`.
Here is an example of a custom function:
.. code-block:: python
def custom_function(vertices):
return vertices[0] * vertices[1] * vertices[2]
Default='mean'.
threshold : a number or None, default is None.
If None is given, the image is not thresholded.
If a number is given, it is used to threshold the image, values
below the threshold (in absolute value) are plotted as transparent.
alpha : float or 'auto', optional
Alpha level of the mesh (not surf_data).
If 'auto' is chosen, alpha will default to .5 when no bg_map
is passed and to 1 if a bg_map is passed.
Default='auto'.
bg_on_data : bool, optional
If True, and a bg_map is specified, the surf_data data is multiplied
by the background image, so that e.g. sulcal depth is visible beneath
the surf_data.
NOTE: that this non-uniformly changes the surf_data values according
to e.g the sulcal depth.
Default=False.
darkness : float between 0 and 1, optional
Specifying the darkness of the background image.
1 indicates that the original values of the background are used.
.5 indicates the background values are reduced by half before being
applied. Default=1.
vmin, vmax : float, float, optional
Lower / upper bound to plot surf_data values.
If None, the values will be set to min/max of the data.
Default values are None.
cbar_vmin, cbar_vmax : float, float, optional
Lower / upper bounds for the colorbar.
If None, the values will be set from the data.
Default values are None.
cbar_tick_format : str, optional
Controls how to format the tick labels of the colorbar.
Ex: use "%i" to display as integers.
Default='%.2g' for scientific notation.
title : str, optional
Figure title.
output_file : str, or None, optional
The name of an image file to export plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
axes : instance of matplotlib axes, None, optional
The axes instance to plot to. The projection must be '3d' (e.g.,
`figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,
where axes should be passed.).
If None, a new axes is created.
figure : instance of matplotlib figure, None, optional
The figure instance to plot to. If None, a new figure is created.
See Also
--------
nilearn.datasets.fetch_surf_fsaverage : For surface data object to be
used as background map for this plotting function.
nilearn.plotting.plot_surf_roi : For plotting statistical maps on brain
surfaces.
nilearn.plotting.plot_surf_stat_map : for plotting statistical maps on
brain surfaces.
nilearn.surface.vol_to_surf : For info on the generation of surfaces.
"""
_default_figsize = [6, 4]
# load mesh and derive axes limits
mesh = load_surf_mesh(surf_mesh)
coords, faces = mesh[0], mesh[1]
limits = [coords.min(), coords.max()]
# set view
if hemi == 'right':
if view == 'lateral':
elev, azim = 0, 0
elif view == 'medial':
elev, azim = 0, 180
elif view == 'dorsal':
elev, azim = 90, 0
elif view == 'ventral':
elev, azim = 270, 0
elif view == 'anterior':
elev, azim = 0, 90
elif view == 'posterior':
elev, azim = 0, 270
else:
raise ValueError('view must be one of lateral, medial, '
'dorsal, ventral, anterior, or posterior')
elif hemi == 'left':
if view == 'medial':
elev, azim = 0, 0
elif view == 'lateral':
elev, azim = 0, 180
elif view == 'dorsal':
elev, azim = 90, 0
elif view == 'ventral':
elev, azim = 270, 0
elif view == 'anterior':
elev, azim = 0, 90
elif view == 'posterior':
elev, azim = 0, 270
else:
raise ValueError('view must be one of lateral, medial, '
'dorsal, ventral, anterior, or posterior')
else:
raise ValueError('hemi must be one of right or left')
# set alpha if in auto mode
if alpha == 'auto':
if bg_map is None:
alpha = .5
else:
alpha = 1
# if no cmap is given, set to matplotlib default
if cmap is None:
cmap = plt.cm.get_cmap(plt.rcParamsDefault['image.cmap'])
else:
# if cmap is given as string, translate to matplotlib cmap
if isinstance(cmap, str):
cmap = plt.cm.get_cmap(cmap)
figsize = _default_figsize
# Leave space for colorbar
if colorbar:
figsize[0] += .7
# initiate figure and 3d axes
if axes is None:
if figure is None:
figure = plt.figure(figsize=figsize)
axes = Axes3D(figure, rect=[0, 0, 1, 1],
xlim=limits, ylim=limits)
else:
if figure is None:
figure = axes.get_figure()
figure.set_size_inches(*figsize)
axes.set_xlim(*limits)
axes.set_ylim(*limits)
axes.view_init(elev=elev, azim=azim)
axes.set_axis_off()
# plot mesh without data
p3dcollec = axes.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=faces, linewidth=0.,
antialiased=False,
color='white')
# reduce viewing distance to remove space around mesh
axes.dist = 8
# set_facecolors function of Poly3DCollection is used as passing the
# facecolors argument to plot_trisurf does not seem to work
face_colors = np.ones((faces.shape[0], 4))
if bg_map is None:
bg_data = np.ones(coords.shape[0]) * 0.5
else:
bg_data = load_surf_data(bg_map)
if bg_data.shape[0] != coords.shape[0]:
raise ValueError('The bg_map does not have the same number '
'of vertices as the mesh.')
bg_faces = np.mean(bg_data[faces], axis=1)
if bg_faces.min() != bg_faces.max():
bg_faces = bg_faces - bg_faces.min()
bg_faces = bg_faces / bg_faces.max()
# control background darkness
bg_faces *= darkness
face_colors = plt.cm.gray_r(bg_faces)
# modify alpha values of background
face_colors[:, 3] = alpha * face_colors[:, 3]
# should it be possible to modify alpha of surf data as well?
if surf_map is not None:
surf_map_data = load_surf_data(surf_map)
if surf_map_data.ndim != 1:
raise ValueError('surf_map can only have one dimension but has'
'%i dimensions' % surf_map_data.ndim)
if surf_map_data.shape[0] != coords.shape[0]:
raise ValueError('The surf_map does not have the same number '
'of vertices as the mesh.')
# create face values from vertex values by selected avg methods
if avg_method == 'mean':
surf_map_faces = np.mean(surf_map_data[faces], axis=1)
elif avg_method == 'median':
surf_map_faces = np.median(surf_map_data[faces], axis=1)
elif avg_method == 'min':
surf_map_faces = np.min(surf_map_data[faces], axis=1)
elif avg_method == 'max':
surf_map_faces = np.max(surf_map_data[faces], axis=1)
elif callable(avg_method):
surf_map_faces = np.apply_along_axis(
avg_method, 1, surf_map_data[faces]
)
## check that surf_map_faces has the same length as face_colors
if surf_map_faces.shape != (face_colors.shape[0],):
raise ValueError(
'Array computed with the custom function '
'from avg_method does not have the correct shape: '
'{} != {}'.format(
surf_map_faces.shape[0],
face_colors.shape[0]
)
)
## check that dtype is either int or float
if not (
"int" in str(surf_map_faces.dtype) or
"float" in str(surf_map_faces.dtype)
):
raise ValueError(
'Array computed with the custom function '
'from avg_method should be an array of numbers '
'(int or float)'
)
else:
raise ValueError(
"avg_method should be either "
"['mean', 'median', 'max', 'min'] "
"or a custom function"
)
# if no vmin/vmax are passed figure them out from data
if vmin is None:
vmin = np.nanmin(surf_map_faces)
if vmax is None:
vmax = np.nanmax(surf_map_faces)
# treshold if indicated
if threshold is None:
# If no thresholding and nans, filter them out
kept_indices = np.where(
np.logical_not(
np.isnan(surf_map_faces)))[0]
else:
kept_indices = np.where(np.abs(surf_map_faces) >= threshold)[0]
surf_map_faces = surf_map_faces - vmin
surf_map_faces = surf_map_faces / (vmax - vmin)
# multiply data with background if indicated
if bg_on_data:
face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\
* face_colors[kept_indices]
else:
face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])
if colorbar:
our_cmap = get_cmap(cmap)
norm = Normalize(vmin=vmin, vmax=vmax)
# Default number of ticks is 5...
nb_ticks = 5
# ...unless we are dealing with integers with a small range
# in this case, we reduce the number of ticks
if cbar_tick_format == "%i" and vmax - vmin < nb_ticks:
ticks = np.arange(vmin, vmax + 1)
nb_ticks = len(ticks)
else:
ticks = np.linspace(vmin, vmax, nb_ticks)
bounds = np.linspace(vmin, vmax, our_cmap.N)
if threshold is not None:
cmaplist = [our_cmap(i) for i in range(our_cmap.N)]
# set colors to grey for absolute values < threshold
istart = int(norm(-threshold, clip=True) * (our_cmap.N - 1))
istop = int(norm(threshold, clip=True) * (our_cmap.N - 1))
for i in range(istart, istop):
cmaplist[i] = (0.5, 0.5, 0.5, 1.)
our_cmap = LinearSegmentedColormap.from_list(
'Custom cmap', cmaplist, our_cmap.N)
# we need to create a proxy mappable
proxy_mappable = ScalarMappable(cmap=our_cmap, norm=norm)
proxy_mappable.set_array(surf_map_faces)
cax, kw = make_axes(axes, location='right', fraction=.1,
shrink=.6, pad=.0)
cbar = figure.colorbar(
proxy_mappable, cax=cax, ticks=ticks,
boundaries=bounds, spacing='proportional',
format=cbar_tick_format, orientation='vertical')
_crop_colorbar(cbar, cbar_vmin, cbar_vmax)
p3dcollec.set_facecolors(face_colors)
if title is not None:
axes.set_title(title, position=(.5, .95))
# save figure if output file is given
if output_file is not None:
figure.savefig(output_file)
plt.close(figure)
else:
return figure
|
5,413 |
def test_rename():
"""
Test if the source file exists on the system,
rename it to the named file.
"""
name = "/tmp/salt"
source = "/tmp/salt/salt"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
comt = "Must provide name to file.rename"
ret.update({"comment": comt, "name": ""})
assert filestate.rename("", source) == ret
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_lex = MagicMock(side_effect=[False, True, True])
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt, "name": name})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(return_value=False)
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
comt = 'Source file "{}" has already been moved out of ' "place".format(
source
)
ret.update({"comment": comt, "result": True})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, True, True])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
comt = 'The target file "{}" exists and will not be ' "overwritten".format(
name
)
ret.update({"comment": comt, "result": True})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, True, True])
mock_rem = MagicMock(side_effect=IOError)
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.dict(filestate.__opts__, {"test": False}):
comt = 'Failed to delete "{}" in preparation for ' "forced move".format(
name
)
with patch.dict(filestate.__salt__, {"file.remove": mock_rem}):
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source, force=True) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.dict(filestate.__opts__, {"test": True}):
comt = 'File "{}" is set to be moved to "{}"'.format(source, name)
ret.update({"name": name, "comment": comt, "result": None})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
comt = "The target directory /tmp is not present"
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_t):
with patch.object(os.path, "islink", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.object(
shutil, "move", MagicMock(side_effect=IOError)
):
comt = 'Failed to move "{}" to "{}"'.format(source, name)
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_t):
with patch.object(os.path, "islink", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.object(shutil, "move", MagicMock()):
comt = 'Moved "{}" to "{}"'.format(source, name)
ret.update(
{
"name": name,
"comment": comt,
"result": True,
"changes": {name: source},
}
)
assert filestate.rename(name, source) == ret
|
def test_rename():
"""
Test if the source file exists on the system,
rename it to the named file.
"""
name = "/tmp/salt"
source = "/tmp/salt/salt"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
comt = "Must provide name to file.rename"
ret.update({"comment": comt, "name": ""})
assert filestate.rename("", source) == ret
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_lex = MagicMock(side_effect=[False, True, True])
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt, "name": name})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(return_value=False)
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
comt = 'Source file "{}" has already been moved out of ' "place".format(
source
)
ret.update({"comment": comt, "result": True})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, True, True])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
comt = 'The target file "{}" exists and will not be ' "overwritten".format(
name
)
ret.update({"comment": comt, "result": True})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, True, True])
mock_rem = MagicMock(side_effect=IOError)
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.dict(filestate.__opts__, {"test": False}):
comt = 'Failed to delete "{}" in preparation for forced move'.format(
name
)
with patch.dict(filestate.__salt__, {"file.remove": mock_rem}):
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source, force=True) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.dict(filestate.__opts__, {"test": True}):
comt = 'File "{}" is set to be moved to "{}"'.format(source, name)
ret.update({"name": name, "comment": comt, "result": None})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
comt = "The target directory /tmp is not present"
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_t):
with patch.object(os.path, "islink", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.object(
shutil, "move", MagicMock(side_effect=IOError)
):
comt = 'Failed to move "{}" to "{}"'.format(source, name)
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_t):
with patch.object(os.path, "islink", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.object(shutil, "move", MagicMock()):
comt = 'Moved "{}" to "{}"'.format(source, name)
ret.update(
{
"name": name,
"comment": comt,
"result": True,
"changes": {name: source},
}
)
assert filestate.rename(name, source) == ret
|
9,056 |
def subreddit_info(bot, trigger, match, commanded=False):
"""Shows information about the given subreddit"""
match_lower = match.lower()
if match_lower in ['all', 'popular']:
message = '{link}{nsfw} | {public_description}'
nsfw = ' ' + bold(color('[Possible NSFW]', colors.ORANGE))
link = 'https://reddit.com/r/' + match_lower
public_description = ''
if match_lower == 'all':
public_description = ("Today's top content from hundreds of "
'thousands of Reddit communities.')
elif match_lower == 'popular':
public_description = ('The top trending content from some of '
"Reddit\'s most popular communities")
message = message.format(
link=link, nsfw=nsfw, public_description=public_description)
bot.say(message)
return plugin.NOLIMIT
r = bot.memory['reddit_praw']
try:
r.subreddits.search_by_name(match, exact=True)
except prawcore.exceptions.NotFound:
if commanded:
bot.reply('No such subreddit.')
# Fail silently if it wasn't an explicit command.
return plugin.NOLIMIT
try:
s = r.subreddit(match)
s.subreddit_type
except prawcore.exceptions.Forbidden:
bot.reply('r/' + match + ' appears to be a private subreddit!')
return plugin.NOLIMIT
except prawcore.exceptions.NotFound:
bot.reply('r/' + match + ' appears to be a banned subreddit!')
return plugin.NOLIMIT
link = 'https://reddit.com/r/' + s.display_name
created = get_time_created(bot, trigger, s.created_utc)
message = ('{link}{nsfw} | {subscribers} subscribers | '
'Created at {created} | {public_description}')
nsfw = ''
if s.over18:
nsfw += ' ' + bold(color('[NSFW]', colors.RED))
sfw = bot.db.get_channel_value(trigger.sender, 'sfw')
if sfw:
link = '(link hidden)'
bot.kick(
trigger.nick, trigger.sender,
'Linking to NSFW content in a SFW channel.'
)
message = message.format(
link=link, nsfw=nsfw, subscribers='{:,}'.format(s.subscribers),
created=created, public_description=s.public_description)
bot.say(message, truncation=' […]')
|
def subreddit_info(bot, trigger, match, commanded=False):
"""Shows information about the given subreddit"""
match_lower = match.lower()
if match_lower in ['all', 'popular']:
message = '{link}{nsfw} | {public_description}'
nsfw = ' ' + bold(color('[Possible NSFW]', colors.ORANGE))
link = 'https://reddit.com/r/' + match_lower
public_description = ''
if match_lower == 'all':
public_description = ("Today's top content from hundreds of "
'thousands of Reddit communities.')
elif match_lower == 'popular':
public_description = ('The top trending content from some of '
"Reddit's most popular communities")
message = message.format(
link=link, nsfw=nsfw, public_description=public_description)
bot.say(message)
return plugin.NOLIMIT
r = bot.memory['reddit_praw']
try:
r.subreddits.search_by_name(match, exact=True)
except prawcore.exceptions.NotFound:
if commanded:
bot.reply('No such subreddit.')
# Fail silently if it wasn't an explicit command.
return plugin.NOLIMIT
try:
s = r.subreddit(match)
s.subreddit_type
except prawcore.exceptions.Forbidden:
bot.reply('r/' + match + ' appears to be a private subreddit!')
return plugin.NOLIMIT
except prawcore.exceptions.NotFound:
bot.reply('r/' + match + ' appears to be a banned subreddit!')
return plugin.NOLIMIT
link = 'https://reddit.com/r/' + s.display_name
created = get_time_created(bot, trigger, s.created_utc)
message = ('{link}{nsfw} | {subscribers} subscribers | '
'Created at {created} | {public_description}')
nsfw = ''
if s.over18:
nsfw += ' ' + bold(color('[NSFW]', colors.RED))
sfw = bot.db.get_channel_value(trigger.sender, 'sfw')
if sfw:
link = '(link hidden)'
bot.kick(
trigger.nick, trigger.sender,
'Linking to NSFW content in a SFW channel.'
)
message = message.format(
link=link, nsfw=nsfw, subscribers='{:,}'.format(s.subscribers),
created=created, public_description=s.public_description)
bot.say(message, truncation=' […]')
|
45,735 |
def forecast(
precip,
velocity,
timesteps,
feature_method="blob",
max_num_features=25,
feature_kwargs=None,
ari_order=1,
kernel_type="anisotropic",
localization_window_radius=None,
errdist_window_radius=None,
acf_window_radius=None,
extrap_method="semilagrangian",
extrap_kwargs=None,
add_perturbations=True,
pert_thrs=(0.5, 1.0),
n_ens_members=10,
vel_pert_method=None,
vel_pert_kwargs=None,
kmperpixel=None,
timestep=None,
seed=None,
num_workers=1,
use_multiprocessing=False,
measure_time=False,
callback=None,
return_output=True,
):
"""
Generate a deterministic or ensemble nowcast by using the Lagrangian
INtegro-Difference equation model with Autoregression (LINDA) model.
Parameters
----------
precip: array_like
Array of shape (ari_order + 2, m, n) containing the input rain rate
or reflectivity fields (in linear scale) ordered by timestamp from
oldest to newest. The time steps between the inputs are assumed to be
regular.
velocity: array_like
Array of shape (2, m, n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs.
timesteps: int
Number of time steps to forecast.
feature_method: {'blob', 'domain' 'shitomasi'}
Feature detection method:
+-------------------+-----------------------------------------------------+
| Method name | Description |
+===================+=====================================================+
| blob | Laplacian of Gaussian (LoG) blob detector |
| | implemented in scikit-image |
+-------------------+-----------------------------------------------------+
| domain | no feature detection, the model is applied over the |
| | whole domain without localization |
+-------------------+-----------------------------------------------------+
| shitomasi | Shi-Tomasi corner detector implemented in OpenCV |
+-------------------+-----------------------------------------------------+
Default: 'blob'
max_num_features: int, optional
Maximum number of features to use. It is recommended to set this between
20 and 50, which gives a good tradeoff between localization and
computation time. Default: 25
feature_kwargs: dict, optional
Keyword arguments that are passed as ``**kwargs`` for the feature detector.
See :py:mod:`pysteps.feature.blob` and :py:mod:`pysteps.feature.shitomasi`.
ari_order: {1, 2}, optional
The order of the ARI(p, 1) model. Default: 1
kernel_type: {"anisotropic", "isotropic"}, optional
The type of the kernel. Default: 'anisotropic'
localization_window_radius: float, optional
The standard deviation of the Gaussian localization window.
Default: 0.2 * min(m, n)
errdist_window_radius: float, optional
The standard deviation of the Gaussian window for estimating the
forecast error distribution. Default: 0.15 * min(m, n)
acf_window_radius: float, optional
The standard deviation of the Gaussian window for estimating the
forecast error ACF. Default: 0.25 * min(m, n)
extrap_method: str, optional
The extrapolation method to use. See the documentation of
:py:mod:`pysteps.extrapolation.interface`. Default: 'semilagrangian'
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See :py:mod:`pysteps.extrapolation.interface`.
add_perturbations: bool
Set to False to disable perturbations and generate a single
deterministic nowcast. Default: True
pert_thrs: float
Two-element tuple containing the threshold values for estimating the
perturbation parameters (mm/h). Default: (0.5, 1.0)
n_ens_members: int, optional
The number of ensemble members to generate. Default: 10
vel_pert_method: {'bps', None}, optional
Name of the generator to use for perturbing the advection field. See
:py:mod:`pysteps.noise.interface`. Default: None
vel_pert_kwargs: dict, optional
Optional dictionary containing keyword arguments 'p_par' and 'p_perp'
for the initializer of the velocity perturbator. The choice of the
optimal parameters depends on the domain and the used optical flow
method. For the default values and parameters optimized for different
domains, see :py:func:`pysteps.nowcasts.steps.forecast`.
kmperpixel: float, optional
Spatial resolution of the input data (kilometers/pixel). Required if
vel_pert_method is not None.
timestep: float, optional
Time step of the motion vectors (minutes). Required if vel_pert_method
is not None.
seed: int, optional
Optional seed for the random generators.
num_workers: int, optional
The number of workers to use for parallel computations. Applicable if
dask is installed. Default: 1
use_multiprocessing: bool, optional
Set to True to improve the performance of certain parallelized parts of
the code. If set to True, the main script calling linda.forecast must
be enclosed within the 'if __name__ == "__main__":' block.
Default: False
measure_time: bool, optional
If set to True, measure, print and return the computation time.
Default: False
callback: function, optional
Optional function that is called after computation of each time step of
the nowcast. The function takes one argument: a three-dimensional array
of shape (n_ens_members,h,w), where h and w are the height and width
of the input precipitation fields, respectively. This can be used, for
instance, writing the outputs into files. Default: None
return_output: bool, optional
Set to False to disable returning the outputs as numpy arrays. This can
save memory if the intermediate results are written to output files
using the callback function. Default: True
Returns
-------
out: numpy.ndarray
A four-dimensional array of shape (n_ens_members, timesteps, m, n)
containing a time series of forecast precipitation fields for each
ensemble member. If add_perturbations is False, the first dimension is
dropped. The time series starts from t0 + timestep, where timestep is
taken from the input fields. If measure_time is True, the return value
is a three-element tuple containing the nowcast array, the initialization
time of the nowcast generator and the time used in the main loop
(seconds). If return_output is set to False, a single None value is
returned instead.
Notes
-----
It is recommended to choose the feature detector parameters so that the
number of features is around 20-40. This gives a good tradeoff between
localization and computation time.
It is highly recommented to set num_workers>1 to reduce computation time.
In this case, it is advisable to disable OpenMP by setting the environment
variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many
simultaneous threads.
"""
_check_inputs(precip, velocity, timesteps, ari_order)
if feature_kwargs is None:
feature_kwargs = dict()
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if localization_window_radius is None:
localization_window_radius = 0.2 * np.min(precip.shape[1:])
if add_perturbations:
if errdist_window_radius is None:
errdist_window_radius = 0.15 * min(precip.shape[1], precip.shape[2])
if acf_window_radius is None:
acf_window_radius = 0.25 * min(precip.shape[1], precip.shape[2])
if vel_pert_method is not None:
if kmperpixel is None:
raise ValueError("vel_pert_method is set but kmperpixel is None")
if timestep is None:
raise ValueError("vel_pert_method is set but timestep is None")
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
print("Computing LINDA nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print(f"dimensions: {precip.shape[1]}x{precip.shape[2]}")
print(f"number of time steps: {precip.shape[0]}")
print("")
print("Methods")
print("-------")
nowcast_type = "ensemble" if add_perturbations else "deterministic"
print(f"nowcast type: {nowcast_type}")
print(f"feature detector: {feature_method}")
print(f"extrapolator: {extrap_method}")
print(f"kernel type: {kernel_type}")
if add_perturbations and vel_pert_method is not None:
print(f"velocity perturbator: {vel_pert_method}")
print("")
print("Parameters")
print("----------")
print(f"number of time steps: {timesteps}")
print(f"ARI model order: {ari_order}")
print(f"localization window radius: {localization_window_radius}")
if add_perturbations:
print(f"error dist. window radius: {errdist_window_radius}")
print(f"error ACF window radius: {acf_window_radius}")
print(f"ensemble size: {n_ens_members}")
print(f"parallel workers: {num_workers}")
print(f"seed: {seed}")
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get(
"p_par", noise.motion.get_default_params_bps_par()
)
vp_perp = vel_pert_kwargs.get(
"p_perp", noise.motion.get_default_params_bps_perp()
)
print(
f"velocity perturbations, parallel: {vp_par[0]:.2f}, {vp_par[1]:.2f}, {vp_par[2]:.2f}"
)
print(
f"velocity perturbations, perpendicular: {vp_perp[0]:.2f}, {vp_perp[1]:.2f}, {vp_perp[2]:.2f}"
)
vel_pert_kwargs = vel_pert_kwargs.copy()
vel_pert_kwargs["vp_par"] = vp_par
vel_pert_kwargs["vp_perp"] = vp_perp
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(precip)) else False
)
fct_gen = _linda_deterministic_init(
precip,
velocity,
feature_method,
max_num_features,
feature_kwargs,
ari_order,
kernel_type,
localization_window_radius,
extrap_method,
extrap_kwargs,
add_perturbations,
num_workers,
measure_time,
)
if measure_time:
fct_gen, precip_lagr_diff, init_time = fct_gen
else:
fct_gen, precip_lagr_diff = fct_gen
if add_perturbations:
pert_gen = _linda_perturbation_init(
precip,
precip_lagr_diff,
velocity,
fct_gen,
pert_thrs,
localization_window_radius,
errdist_window_radius,
acf_window_radius,
vel_pert_method,
vel_pert_kwargs,
kmperpixel,
timestep,
num_workers,
use_multiprocessing,
measure_time,
)
if measure_time:
precip_pert_gen, vel_pert_gen, pert_init_time = pert_gen
init_time += pert_init_time
else:
precip_pert_gen, vel_pert_gen = pert_gen
else:
precip_pert_gen = None
vel_pert_gen = None
# TODO: make printing info optional
fct = _linda_forecast(
precip,
precip_lagr_diff[1:],
timesteps,
fct_gen,
precip_pert_gen,
vel_pert_gen,
n_ens_members,
seed,
measure_time,
True,
return_output,
callback,
)
if return_output:
if measure_time:
return fct[0], init_time, fct[1]
else:
return fct
else:
return None
|
def forecast(
precip,
velocity,
timesteps,
feature_method="blob",
max_num_features=25,
feature_kwargs=None,
ari_order=1,
kernel_type="anisotropic",
localization_window_radius=None,
errdist_window_radius=None,
acf_window_radius=None,
extrap_method="semilagrangian",
extrap_kwargs=None,
add_perturbations=True,
pert_thrs=(0.5, 1.0),
n_ens_members=10,
vel_pert_method=None,
vel_pert_kwargs=None,
kmperpixel=None,
timestep=None,
seed=None,
num_workers=1,
use_multiprocessing=False,
measure_time=False,
callback=None,
return_output=True,
):
"""
Generate a deterministic or ensemble nowcast by using the Lagrangian
INtegro-Difference equation model with Autoregression (LINDA) model.
Parameters
----------
precip: array_like
Array of shape (ari_order + 2, m, n) containing the input rain rate
or reflectivity fields (in linear scale) ordered by timestamp from
oldest to newest. The time steps between the inputs are assumed to be
regular.
velocity: array_like
Array of shape (2, m, n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs.
timesteps: int
Number of time steps to forecast.
feature_method: {'blob', 'domain' 'shitomasi'}
Feature detection method:
+-------------------+-----------------------------------------------------+
| Method name | Description |
+===================+=====================================================+
| blob | Laplacian of Gaussian (LoG) blob detector |
| | implemented in scikit-image |
+-------------------+-----------------------------------------------------+
| domain | no feature detection, the model is applied over the |
| | whole domain without localization |
+-------------------+-----------------------------------------------------+
| shitomasi | Shi-Tomasi corner detector implemented in OpenCV |
+-------------------+-----------------------------------------------------+
Default: 'blob'
max_num_features: int, optional
Maximum number of features to use. It is recommended to set this between
20 and 50, which gives a good tradeoff between localization and
computation time. Default: 25
feature_kwargs: dict, optional
Keyword arguments that are passed as ``**kwargs`` for the feature detector.
See :py:mod:`pysteps.feature.blob` and :py:mod:`pysteps.feature.shitomasi`.
ari_order: {1, 2}, optional
The order of the ARI(p, 1) model. Default: 1
kernel_type: {"anisotropic", "isotropic"}, optional
The type of the kernel. Default: 'anisotropic'
localization_window_radius: float, optional
The standard deviation of the Gaussian localization window.
Default: 0.2 * min(m, n)
errdist_window_radius: float, optional
The standard deviation of the Gaussian window for estimating the
forecast error distribution. Default: 0.15 * min(m, n)
acf_window_radius: float, optional
The standard deviation of the Gaussian window for estimating the
forecast error ACF. Default: 0.25 * min(m, n)
extrap_method: str, optional
The extrapolation method to use. See the documentation of
:py:mod:`pysteps.extrapolation.interface`. Default: 'semilagrangian'
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See :py:mod:`pysteps.extrapolation.interface`.
add_perturbations: bool
Set to False to disable perturbations and generate a single
deterministic nowcast. Default: True
pert_thrs: float
Two-element tuple containing the threshold values for estimating the
perturbation parameters (mm/h). Default: (0.5, 1.0)
n_ens_members: int, optional
The number of ensemble members to generate. Default: 10
vel_pert_method: {'bps', None}, optional
Name of the generator to use for perturbing the advection field. See
:py:mod:`pysteps.noise.interface`. Default: None
vel_pert_kwargs: dict, optional
Optional dictionary containing keyword arguments 'p_par' and 'p_perp'
for the initializer of the velocity perturbator. The choice of the
optimal parameters depends on the domain and the used optical flow
method. For the default values and parameters optimized for different
domains, see :py:func:`pysteps.nowcasts.steps.forecast`.
kmperpixel: float, optional
Spatial resolution of the input data (kilometers/pixel). Required if
vel_pert_method is not None.
timestep: float, optional
Time step of the motion vectors (minutes). Required if vel_pert_method
is not None.
seed: int, optional
Optional seed for the random generators.
num_workers: int, optional
The number of workers to use for parallel computations. Applicable if
dask is installed. Default: 1
use_multiprocessing: bool, optional
Set to True to improve the performance of certain parallelized parts of
the code. If set to True, the main script calling linda.forecast must
be enclosed within the 'if __name__ == "__main__":' block.
Default: False
measure_time: bool, optional
If set to True, measure, print and return the computation time.
Default: False
callback: function, optional
Optional function that is called after computation of each time step of
the nowcast. The function takes one argument: a three-dimensional array
of shape (n_ens_members,h,w), where h and w are the height and width
of the input precipitation fields, respectively. This can be used, for
instance, writing the outputs into files. Default: None
return_output: bool, optional
Set to False to disable returning the outputs as numpy arrays. This can
save memory if the intermediate results are written to output files
using the callback function. Default: True
Returns
-------
out: numpy.ndarray
A four-dimensional array of shape (n_ens_members, timesteps, m, n)
containing a time series of forecast precipitation fields for each
ensemble member. If add_perturbations is False, the first dimension is
dropped. The time series starts from t0 + timestep, where timestep is
taken from the input fields. If measure_time is True, the return value
is a three-element tuple containing the nowcast array, the initialization
time of the nowcast generator and the time used in the main loop
(seconds). If return_output is set to False, a single None value is
returned instead.
Notes
-----
It is recommended to choose the feature detector parameters so that the
number of features is around 20-40. This gives a good tradeoff between
localization and computation time.
It is highly recommented to set num_workers>1 to reduce computation time.
In this case, it is advisable to disable OpenMP by setting the environment
variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many
simultaneous threads.
"""
_check_inputs(precip, velocity, timesteps, ari_order)
if feature_kwargs is None:
feature_kwargs = dict()
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if localization_window_radius is None:
localization_window_radius = 0.2 * np.min(precip.shape[1:])
if add_perturbations:
if errdist_window_radius is None:
errdist_window_radius = 0.15 * min(precip.shape[1], precip.shape[2])
if acf_window_radius is None:
acf_window_radius = 0.25 * min(precip.shape[1], precip.shape[2])
if vel_pert_method is not None:
if kmperpixel is None:
raise ValueError("vel_pert_method is set but kmperpixel is None")
if timestep is None:
raise ValueError("vel_pert_method is set but timestep is None")
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
print("Computing LINDA nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print(f"dimensions: {precip.shape[1]}x{precip.shape[2]}")
print(f"number of time steps: {precip.shape[0]}")
print("")
print("Methods")
print("-------")
nowcast_type = "ensemble" if add_perturbations else "deterministic"
print(f"nowcast type: {nowcast_type}")
print(f"feature detector: {feature_method}")
print(f"extrapolator: {extrap_method}")
print(f"kernel type: {kernel_type}")
if add_perturbations and vel_pert_method is not None:
print(f"velocity perturbator: {vel_pert_method}")
print("")
print("Parameters")
print("----------")
print(f"number of time steps: {timesteps}")
print(f"ARI model order: {ari_order}")
print(f"localization window radius: {localization_window_radius}")
if add_perturbations:
print(f"error dist. window radius: {errdist_window_radius}")
print(f"error ACF window radius: {acf_window_radius}")
print(f"ensemble size: {n_ens_members}")
print(f"parallel workers: {num_workers}")
print(f"seed: {seed}")
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get(
"p_par", noise.motion.get_default_params_bps_par()
)
vp_perp = vel_pert_kwargs.get(
"p_perp", noise.motion.get_default_params_bps_perp()
)
print(
f"velocity perturbations, parallel: {vp_par[0]:.2f}, {vp_par[1]:.2f}, {vp_par[2]:.2f}"
)
print(
f"velocity perturbations, perpendicular: {vp_perp[0]:.2f}, {vp_perp[1]:.2f}, {vp_perp[2]:.2f}"
)
vel_pert_kwargs = vel_pert_kwargs.copy()
vel_pert_kwargs["vp_par"] = vp_par
vel_pert_kwargs["vp_perp"] = vp_perp
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(precip)) else False
)
fct_gen = _linda_deterministic_init(
precip,
velocity,
feature_method,
max_num_features,
feature_kwargs,
ari_order,
kernel_type,
localization_window_radius,
extrap_method,
extrap_kwargs,
add_perturbations,
num_workers,
measure_time,
)
if measure_time:
fct_gen, precip_lagr_diff, init_time = fct_gen
else:
fct_gen, precip_lagr_diff = fct_gen
if add_perturbations:
pert_gen = _linda_perturbation_init(
precip,
precip_lagr_diff,
velocity,
fct_gen,
pert_thrs,
localization_window_radius,
errdist_window_radius,
acf_window_radius,
vel_pert_method,
vel_pert_kwargs,
kmperpixel,
timestep,
num_workers,
use_multiprocessing,
measure_time,
)
if measure_time:
precip_pert_gen, vel_pert_gen, pert_init_time = pert_gen
init_time += pert_init_time
else:
precip_pert_gen, vel_pert_gen = pert_gen
else:
precip_pert_gen = None
vel_pert_gen = None
# TODO: make printing info optional
precip_forecast = _linda_forecast(
precip,
precip_lagr_diff[1:],
timesteps,
fct_gen,
precip_pert_gen,
vel_pert_gen,
n_ens_members,
seed,
measure_time,
True,
return_output,
callback,
)
if return_output:
if measure_time:
return fct[0], init_time, fct[1]
else:
return fct
else:
return None
|
32,744 |
def patch_cache(tracer):
"""
Function that patches the inner cache system. Because the cache backend
can have different implementations and connectors, this function must
handle all possible interactions with the Django cache. What follows
is currently traced:
* in-memory cache
* the cache client wrapper that could use any of the common
Django supported cache servers (Redis, Memcached, Database, Custom)
"""
# discover used cache backends
cache_backends = {cache['BACKEND'] for cache in django_settings.CACHES.values()}
def _trace_operation(fn, method_name):
"""
Return a wrapped function that traces a cache operation
"""
cache_service_name = settings.DEFAULT_CACHE_SERVICE \
if settings.DEFAULT_CACHE_SERVICE else settings.DEFAULT_SERVICE
@wraps(fn)
def wrapped(self, *args, **kwargs):
# get the original function method
method = getattr(self, DATADOG_NAMESPACE.format(method=method_name))
with tracer.trace('django.cache',
span_type=TYPE, service=cache_service_name) as span:
# update the resource name and tag the cache backend
span.resource = _resource_from_cache_prefix(method_name, self)
cache_backend = '{}.{}'.format(self.__module__, self.__class__.__name__)
span.set_tag(CACHE_BACKEND, cache_backend)
if args:
keys = quantize_key_values(args[0])
span.set_tag(CACHE_COMMAND_KEY, keys)
return method(*args, **kwargs)
return wrapped
def _wrap_method(cls, method_name):
"""
For the given class, wraps the method name with a traced operation
so that the original method is executed, while the span is properly
created
"""
# check if the backend owns the given bounded method
if not hasattr(cls, method_name):
return
# prevent patching each backend's method more than once
if hasattr(cls, DATADOG_NAMESPACE.format(method=method_name)):
log.debug('{} already traced'.format(method_name))
else:
method = getattr(cls, method_name)
setattr(cls, DATADOG_NAMESPACE.format(method=method_name), method)
setattr(cls, method_name, _trace_operation(method, method_name))
# trace all backends
for cache_module in cache_backends:
cache = import_from_string(cache_module, cache_module)
for method in TRACED_METHODS:
_wrap_method(cache, method)
|
def patch_cache(tracer):
"""
Function that patches the inner cache system. Because the cache backend
can have different implementations and connectors, this function must
handle all possible interactions with the Django cache. What follows
is currently traced:
* in-memory cache
* the cache client wrapper that could use any of the common
Django supported cache servers (Redis, Memcached, Database, Custom)
"""
# discover used cache backends
cache_backends = set([cache['BACKEND'] for cache in django_settings.CACHES.values()])
def _trace_operation(fn, method_name):
"""
Return a wrapped function that traces a cache operation
"""
cache_service_name = settings.DEFAULT_CACHE_SERVICE \
if settings.DEFAULT_CACHE_SERVICE else settings.DEFAULT_SERVICE
@wraps(fn)
def wrapped(self, *args, **kwargs):
# get the original function method
method = getattr(self, DATADOG_NAMESPACE.format(method=method_name))
with tracer.trace('django.cache',
span_type=TYPE, service=cache_service_name) as span:
# update the resource name and tag the cache backend
span.resource = _resource_from_cache_prefix(method_name, self)
cache_backend = '{}.{}'.format(self.__module__, self.__class__.__name__)
span.set_tag(CACHE_BACKEND, cache_backend)
if args:
keys = quantize_key_values(args[0])
span.set_tag(CACHE_COMMAND_KEY, keys)
return method(*args, **kwargs)
return wrapped
def _wrap_method(cls, method_name):
"""
For the given class, wraps the method name with a traced operation
so that the original method is executed, while the span is properly
created
"""
# check if the backend owns the given bounded method
if not hasattr(cls, method_name):
return
# prevent patching each backend's method more than once
if hasattr(cls, DATADOG_NAMESPACE.format(method=method_name)):
log.debug('{} already traced'.format(method_name))
else:
method = getattr(cls, method_name)
setattr(cls, DATADOG_NAMESPACE.format(method=method_name), method)
setattr(cls, method_name, _trace_operation(method, method_name))
# trace all backends
for cache_module in cache_backends:
cache = import_from_string(cache_module, cache_module)
for method in TRACED_METHODS:
_wrap_method(cache, method)
|
54,094 |
def attach_OPSD_renewables(n, countries=None):
if countries is None:
countries = (snakemake.config['electricity']
.get('OPSD_VRES_countries', {}))
if len(countries) == 0: return
for country in countries:
mapping = attach_OPSD_ppls_for_country(country)
n.generators.loc[mapping.index, 'p_nom'] = mapping
logger.info('overwriting renewable capacities in {} [MW] with precise locations\n{}'
.format(country, n.generators.loc[mapping.index].groupby('carrier').sum().p_nom))
|
def attach_OPSD_renewables(n):
available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB']
countries = set(available) & set(n.buses.country)
tech_map = (snakemake.config['electricity']
.get('include_renewable_capacities_from_OPSD', {}))
logger.info(f'Using OPSD renewable capacities in {", ".join(countries)}.')
df = pd.concat([pm.data.OPSD_VRE_country(c) for c in countries])
df = df.query('Fueltype in @techs').powerplant.convert_country_to_alpha2()
technology_b = ~df.Technology.isin(['Onshore', 'Offshore'])
df['Fueltype'] = df.Fueltype.where(technology_b, df.Technology)
for fueltype, carrier_like in tech_map.items():
gens = n.generators[lambda df: df.carrier.str.contains(carrier_like)]
buses = n.buses.loc[gens.bus.unique()]
gens_per_bus = gens.groupby('bus').p_nom.count()
caps = map_country_bus(df.query('Fueltype == @fueltype'), buses)
caps = caps.groupby(['bus']).Capacity.sum()
n.generators.p_nom.update(gens.bus.map(caps).dropna())
|
43,699 |
def max_independent_set(graph, constrained=True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the MaxIndependentSet problem,
for a given graph.
The goal of MaxIndependentSet is to find the largest possible independent set of a graph. Given some graph :math:`G`,
an independent set of :math:`G` is a set of vertices such that no two of the vertices in the set share a common edge.
Args:
graph (nx.Graph): a graph defining the pairs of wires on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian):
.. UsageDetails::
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in `[arXiv:1709.03489] <https://arxiv.org/abs/1709.03489>`__.
The constrained MaxIndependentSet cost Hamiltonian is defined as:
.. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v}
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator applied to the :math:`i`-th
vertex.
The returned mixer Hamiltonian is `~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|0\rangle` state
**Unconstrained**
The unconstrained MaxIndependentSet cost Hamiltonian is defined as:
.. math:: H_C \ = \ \frac{(i, j) \in E(G)} (Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \ \displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the edges of :math:`G`, :math:`V(G)` is the set of vertices, and :math:`Z_i` is the Pauli-Z operator
acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is `~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states
"""
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
if constrained:
return (bit_driver(graph.nodes, 1), qaoa.bit_flip_mixer(graph, 0))
cost_h = edge_driver(graph, ['10', '01', '00']) + bit_driver(graph.nodes, 1)
mixer_h = qaoa.x_mixer(graph.nodes)
return (cost_h, mixer_h)
|
def max_independent_set(graph, constrained=True):
r"""For an input graph, returns the QAOA cost Hamiltonian and the recommended mixer for the MaxIndependentSet problem.
The goal of MaxIndependentSet is to find the largest possible independent set of a graph. Given some graph :math:`G`,
an independent set of :math:`G` is a set of vertices such that no two of the vertices in the set share a common edge.
Args:
graph (nx.Graph): a graph defining the pairs of wires on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian):
.. UsageDetails::
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in `[arXiv:1709.03489] <https://arxiv.org/abs/1709.03489>`__.
The constrained MaxIndependentSet cost Hamiltonian is defined as:
.. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v}
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator applied to the :math:`i`-th
vertex.
The returned mixer Hamiltonian is `~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|0\rangle` state
**Unconstrained**
The unconstrained MaxIndependentSet cost Hamiltonian is defined as:
.. math:: H_C \ = \ \frac{(i, j) \in E(G)} (Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \ \displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the edges of :math:`G`, :math:`V(G)` is the set of vertices, and :math:`Z_i` is the Pauli-Z operator
acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is `~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states
"""
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
if constrained:
return (bit_driver(graph.nodes, 1), qaoa.bit_flip_mixer(graph, 0))
cost_h = edge_driver(graph, ['10', '01', '00']) + bit_driver(graph.nodes, 1)
mixer_h = qaoa.x_mixer(graph.nodes)
return (cost_h, mixer_h)
|
45,985 |
def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate binary intersection-over-union (IoU) loss.
According to [2], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] http://proceedings.mlr.press/v37/yub15.pdf
[2] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports binary labels. For multi-class labels please
use the Lovasz-Softmax loss.
Args:
input: logits tensor with shape :math:`(N, 1, H, W)`.
labels: labels tensor with shape :math:`(N, H, W)` with binary values.
Return:
a scalar with the computed loss.
Example:
>>> N = 1 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_hinge_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] == 1:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=1)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, N = input_flatten.shape
# compute probabilities
input_prob: torch.Tensor = torch.sigmoid(input_flatten)
# compute actual loss
signs = 2. * target_flatten - 1.
errors = 1. - input_prob * signs
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
return loss
|
def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate binary intersection-over-union (IoU) loss.
According to [2], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] http://proceedings.mlr.press/v37/yub15.pdf
[2] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports binary labels. For multi-class labels please
use the Lovasz-Softmax loss.
Args:
input: logits tensor with shape :math:`(N, 1, H, W)`.
labels: labels tensor with shape :math:`(N, H, W)` with binary values.
Return:
a scalar with the computed loss.
Example:
>>> N = 1 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_hinge_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] == 1:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=1)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, N = input_flatten.shape
# compute probabilities
input_prob: torch.Tensor = torch.sigmoid(input_flatten)
# compute actual loss
signs = 2. * target_flatten - 1.
errors = 1. - input_prob * signs
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
return loss
|
25,835 |
def get_interface_row_class(record):
if not record.enabled:
return 'danger'
elif not record.is_connectable:
return 'primary'
else:
return get_cabletermination_row_class(record)
return ''
|
def get_interface_row_class(record):
if not record.enabled:
return 'danger'
elif record.is_virtual:
return 'primary'
else:
return get_cabletermination_row_class(record)
return ''
|
57,785 |
def build_indicator_list(indicator_list: List[str]) -> List[str]:
"""Builds an indicator list for the query"""
result = []
if 'ALL' in indicator_list:
# Replaces "ALL" for all types supported on XSOAR.
result = ['User-Account', 'Domain-Name', 'Email-Addr', 'StixFile', 'X-OpenCTI-Hostname', 'IPv4-Addr',
'IPv6-Addr', 'Windows-Registry-Key', 'Url']
# Checks for additional types not supported by XSOAR, and adds them.
for indicator in indicator_list:
if not XSOHR_TYPES_TO_OPENCTI.get(indicator.lower(), ''):
result.append(indicator)
else:
result = [XSOHR_TYPES_TO_OPENCTI.get(indicator.lower(), indicator) for indicator in indicator_list]
return result
|
def build_indicator_list(indicator_list: List[str]) -> List[str]:
"""Builds an indicator list for the query"""
result = []
if 'ALL' in indicator_list:
# Replaces "ALL" for all types supported on XSOAR.
result = ['User-Account', 'Domain-Name', 'Email-Addr', 'StixFile', 'X-OpenCTI-Hostname', 'IPv4-Addr',
'IPv6-Addr', 'Windows-Registry-Key', 'Url']
result += [XSOHR_TYPES_TO_OPENCTI.get(indicator.lower(), indicator) for indicator in indicator_list if indicator != 'ALL']
return result
|
53,784 |
def get_clustering_from_busmap(network, busmap, with_time=True, global_constraints=True, line_length_factor=1.0,
aggregate_generators_weighted=False, aggregate_one_ports={},
aggregate_generators_carriers=None,
scale_link_capital_costs=True,
bus_strategies=dict(), one_port_strategies=dict(),
generator_strategies=dict()):
buses, linemap, linemap_p, linemap_n, lines = get_buses_linemap_and_lines(network, busmap, line_length_factor, bus_strategies)
network_c = Network()
io.import_components_from_dataframe(network_c, buses, "Bus")
io.import_components_from_dataframe(network_c, lines, "Line")
# Carry forward global constraints to clustered network.
if global_constraints:
network_c.global_constraints = network.global_constraints
if with_time:
network_c.set_snapshots(network.snapshots)
network_c.snapshot_weightings = network.snapshot_weightings.copy()
one_port_components = network.one_port_components.copy()
if aggregate_generators_weighted:
one_port_components.remove("Generator")
generators, generators_pnl = aggregategenerators(network, busmap, with_time=with_time,
carriers=aggregate_generators_carriers,
custom_strategies=generator_strategies)
io.import_components_from_dataframe(network_c, generators, "Generator")
if with_time:
for attr, df in generators_pnl.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, "Generator", attr)
for one_port in aggregate_one_ports:
one_port_components.remove(one_port)
new_df, new_pnl = aggregateoneport(network, busmap, component=one_port, with_time=with_time,
custom_strategies=one_port_strategies.get(one_port, {}))
io.import_components_from_dataframe(network_c, new_df, one_port)
for attr, df in new_pnl.items():
io.import_series_from_dataframe(network_c, df, one_port, attr)
##
# Collect remaining one ports
for c in network.iterate_components(one_port_components):
io.import_components_from_dataframe(
network_c,
c.df.assign(bus=c.df.bus.map(busmap)).dropna(subset=['bus']),
c.name
)
if with_time:
for c in network.iterate_components(one_port_components):
for attr, df in c.pnl.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, c.name, attr)
new_links = (network.links.assign(bus0=network.links.bus0.map(busmap),
bus1=network.links.bus1.map(busmap))
.dropna(subset=['bus0', 'bus1'])
.loc[lambda df: df.bus0 != df.bus1])
new_links['length'] = np.where(
new_links.length.notnull() & (new_links.length > 0),
line_length_factor *
haversine_pts(buses.loc[new_links['bus0'], ['x', 'y']],
buses.loc[new_links['bus1'], ['x', 'y']]),
0
)
if scale_link_capital_costs:
new_links['capital_cost'] *= (new_links.length/network.links.length).fillna(1)
io.import_components_from_dataframe(network_c, new_links, "Link")
if with_time:
for attr, df in network.links_t.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, "Link", attr)
io.import_components_from_dataframe(network_c, network.carriers, "Carrier")
network_c.determine_network_topology()
return Clustering(network_c, busmap, linemap, linemap_p, linemap_n)
|
def get_clustering_from_busmap(network, busmap, with_time=True, global_constraints=True, line_length_factor=1.0,
aggregate_generators_weighted=False, aggregate_one_ports={},
aggregate_generators_carriers=None,
scale_link_capital_costs=True,
bus_strategies=dict(), one_port_strategies=dict(),
generator_strategies=dict()):
buses, linemap, linemap_p, linemap_n, lines = get_buses_linemap_and_lines(network, busmap, line_length_factor, bus_strategies)
network_c = Network()
io.import_components_from_dataframe(network_c, buses, "Bus")
io.import_components_from_dataframe(network_c, lines, "Line")
# Carry forward global constraints to clustered network.
network_c.global_constraints = network.global_constraints
if with_time:
network_c.set_snapshots(network.snapshots)
network_c.snapshot_weightings = network.snapshot_weightings.copy()
one_port_components = network.one_port_components.copy()
if aggregate_generators_weighted:
one_port_components.remove("Generator")
generators, generators_pnl = aggregategenerators(network, busmap, with_time=with_time,
carriers=aggregate_generators_carriers,
custom_strategies=generator_strategies)
io.import_components_from_dataframe(network_c, generators, "Generator")
if with_time:
for attr, df in generators_pnl.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, "Generator", attr)
for one_port in aggregate_one_ports:
one_port_components.remove(one_port)
new_df, new_pnl = aggregateoneport(network, busmap, component=one_port, with_time=with_time,
custom_strategies=one_port_strategies.get(one_port, {}))
io.import_components_from_dataframe(network_c, new_df, one_port)
for attr, df in new_pnl.items():
io.import_series_from_dataframe(network_c, df, one_port, attr)
##
# Collect remaining one ports
for c in network.iterate_components(one_port_components):
io.import_components_from_dataframe(
network_c,
c.df.assign(bus=c.df.bus.map(busmap)).dropna(subset=['bus']),
c.name
)
if with_time:
for c in network.iterate_components(one_port_components):
for attr, df in c.pnl.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, c.name, attr)
new_links = (network.links.assign(bus0=network.links.bus0.map(busmap),
bus1=network.links.bus1.map(busmap))
.dropna(subset=['bus0', 'bus1'])
.loc[lambda df: df.bus0 != df.bus1])
new_links['length'] = np.where(
new_links.length.notnull() & (new_links.length > 0),
line_length_factor *
haversine_pts(buses.loc[new_links['bus0'], ['x', 'y']],
buses.loc[new_links['bus1'], ['x', 'y']]),
0
)
if scale_link_capital_costs:
new_links['capital_cost'] *= (new_links.length/network.links.length).fillna(1)
io.import_components_from_dataframe(network_c, new_links, "Link")
if with_time:
for attr, df in network.links_t.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, "Link", attr)
io.import_components_from_dataframe(network_c, network.carriers, "Carrier")
network_c.determine_network_topology()
return Clustering(network_c, busmap, linemap, linemap_p, linemap_n)
|
1,015 |
def test_html_metadata():
s = "<h1>Test</h1>"
h = display.HTML(s, metadata={"isolated": True})
assert h._repr_html_(), (s == {"isolated": True})
|
def test_html_metadata():
s = "<h1>Test</h1>"
h = display.HTML(s, metadata={"isolated": True})
assert h._repr_html_() == (s , {"isolated": True})
|
30,955 |
def create_user_iam(default_base_dn, default_page_size, args):
assert conn is not None
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER, mapping_type='User Profile')
try:
sam_account_name = ad_user.get("samaccountname")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, default_page_size, sam_account_name)
if user_exists:
return return_results("User already exists")
user_dn = generate_dn_and_remove_from_user_profile(ad_user)
object_classes = ["top", "person", "organizationalPerson", "user"]
success = conn.add(user_dn, object_classes, ad_user)
if success:
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
active=ad_user.get("userAccountControl"))
else:
iam_user_profile.set_result(success=False, error_message="Failed to create user")
return_results(iam_user_profile)
except Exception as e:
iam_user_profile.set_result(success=False, error_message=str(e))
return_results(iam_user_profile)
|
def create_user_iam(default_base_dn, default_page_size, args):
assert conn is not None
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER, mapping_type='User Profile')
try:
sam_account_name = ad_user.get("samaccountname")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, default_page_size, sam_account_name)
if user_exists:
return return_results("User already exists")
user_dn = generate_dn_and_remove_from_user_profile(ad_user)
object_classes = ["top", "person", "organizationalPerson", "user"]
success = conn.add(user_dn, object_classes, ad_user)
if success:
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
active=True)
else:
iam_user_profile.set_result(success=False, error_message="Failed to create user")
return_results(iam_user_profile)
except Exception as e:
iam_user_profile.set_result(success=False, error_message=str(e))
return_results(iam_user_profile)
|
30,853 |
def format_sort(sort_str: str) -> list:
"""
Format a sort string from "field1:asc,field2:desc" to a list accepted by pymongo.sort()
"field1:asc,field2:desc" => [("field1",1),("field2",-1)]
Args:
sort_str: a sort detailed as a string
Returns:
list accepted by pymongo.sort()
"""
sort_fields = sort_str.split(',')
sort_list = list()
for field in sort_fields:
if ':' not in field:
raise ValueError("`sort` is not in the correct format.")
field, type = field.split(':')
if type not in SORT_TYPE_DICT.keys():
raise ValueError("`sort` is not in the correct format. Please make sure it's either 'asc' or 'desc'")
else:
sort_list.append((field, SORT_TYPE_DICT[type]))
return sort_list
|
def format_sort(sort_str: str) -> list:
"""
Format a sort string from "field1:asc,field2:desc" to a list accepted by pymongo.sort()
"field1:asc,field2:desc" => [("field1",1),("field2",-1)]
Args:
sort_str: a sort detailed as a string
Returns:
list accepted by pymongo.sort()
"""
sort_fields = sort_str.split(',')
sort_list = list()
for field in sort_fields:
if ':' not in field:
raise ValueError("`sort` is not in the correct format.")
field, type = field.split(':', 1)
if type not in SORT_TYPE_DICT.keys():
raise ValueError("`sort` is not in the correct format. Please make sure it's either 'asc' or 'desc'")
else:
sort_list.append((field, SORT_TYPE_DICT[type]))
return sort_list
|
5,344 |
def init(
name,
cpu,
mem,
nic="default",
interfaces=None,
hypervisor=None,
start=True, # pylint: disable=redefined-outer-name
disk="default",
disks=None,
saltenv="base",
seed=True,
install=True,
pub_key=None,
priv_key=None,
seed_cmd="seed.apply",
graphics=None,
os_type=None,
arch=None,
boot=None,
memtune=None,
**kwargs
):
"""
Initialize a new vm
:param name: name of the virtual machine to create
:param cpu: Number of virtual CPUs to assign to the virtual machine
:param mem: Amount of memory to allocate to the virtual machine in MiB.
:param nic: NIC profile to use (Default: ``'default'``).
The profile interfaces can be customized / extended with the interfaces parameter.
If set to ``None``, no profile will be used.
:param interfaces:
List of dictionaries providing details on the network interfaces to create.
These data are merged with the ones from the nic profile. The structure of
each dictionary is documented in :ref:`init-nic-def`.
.. versionadded:: 2019.2.0
:param hypervisor: the virtual machine type. By default the value will be computed according
to the virtual host capabilities.
:param start: ``True`` to start the virtual machine after having defined it (Default: ``True``)
:param disk: Disk profile to use (Default: ``'default'``). If set to ``None``, no profile will be used.
:param disks: List of dictionaries providing details on the disk devices to create.
These data are merged with the ones from the disk profile. The structure of
each dictionary is documented in :ref:`init-disk-def`.
.. versionadded:: 2019.2.0
:param saltenv: Fileserver environment (Default: ``'base'``).
See :mod:`cp module for more details <salt.modules.cp>`
:param seed: ``True`` to seed the disk image. Only used when the ``image`` parameter is provided.
(Default: ``True``)
:param install: install salt minion if absent (Default: ``True``)
:param pub_key: public key to seed with (Default: ``None``)
:param priv_key: public key to seed with (Default: ``None``)
:param seed_cmd: Salt command to execute to seed the image. (Default: ``'seed.apply'``)
:param graphics:
Dictionary providing details on the graphics device to create. (Default: ``None``)
See :ref:`init-graphics-def` for more details on the possible values.
.. versionadded:: 2019.2.0
:param os_type:
type of virtualization as found in the ``//os/type`` element of the libvirt definition.
The default value is taken from the host capabilities, with a preference for ``hvm``.
.. versionadded:: 2019.2.0
:param arch:
architecture of the virtual machine. The default value is taken from the host capabilities,
but ``x86_64`` is prefed over ``i686``.
.. versionadded:: 2019.2.0
:param config: minion configuration to use when seeding.
See :mod:`seed module for more details <salt.modules.seed>`
:param boot_dev: String of space-separated devices to boot from (Default: ``'hd'``)
:param serial_type: Serial device type. One of ``'pty'``, ``'tcp'`` (Default: ``None``)
:param telnet_port: Telnet port to use for serial device of type ``tcp``.
:param console: ``True`` to add a console device along with serial one (Default: ``True``)
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param boot:
Specifies kernel, initial ramdisk and kernel command line parameters for the virtual machine.
This is an optional parameter, all of the keys are optional within the dictionary. The structure of
the dictionary is documented in :ref:`init-boot-def`. If a remote path is provided to kernel or initrd,
salt will handle the downloading of the specified remote file and modify the XML accordingly.
To boot VM with UEFI, specify loader and nvram path or specify 'efi': ``True`` if your libvirtd version
is >= 5.2.0 and QEMU >= 3.0.0.
.. versionadded:: 3000
.. code-block:: python
{
'kernel': '/root/f8-i386-vmlinuz',
'initrd': '/root/f8-i386-initrd',
'cmdline': 'console=ttyS0 ks=http://example.com/f8-i386/os/',
'loader': '/usr/share/OVMF/OVMF_CODE.fd',
'nvram': '/usr/share/OVMF/OVMF_VARS.ms.fd'
}
:param memtune:
Specifies hard_limit, soft_limit, swap_hard_limit and min_guarantee parameters for tuning the memory of the domain.
This is an optional parameter, all of the keys are optional within the dictionary. The structure of the dictionary
is documented in :ref:`init-memtune-def`. Both decimal and binary base are supported. Detail unit specification is
documented in :ref:`virt-units`
.. versionadded:: Magnesium
.. code-block:: python
{
'hard_limit': '1024'
'soft_limit': '512m'
'swap_hard_limit': '1g'
'min_guarantee': '512mib'
}
.. _init-boot-def:
.. rubric:: Boot parameters definition
The boot parameters dictionary can contains the following properties:
kernel
The URL or path to the kernel to run the virtual machine with.
initrd
The URL or path to the initrd file to run the virtual machine with.
cmdline
The parameters to pass to the kernel provided in the `kernel` property.
loader
The path to the UEFI binary loader to use.
.. versionadded:: 3001
nvram
The path to the UEFI data template. The file will be copied when creating the virtual machine.
.. versionadded:: 3001
efi
A boolean value.
.. versionadded:: sodium
.. _init-memtune-def:
.. rubric:: Memtune parameter definition
Memtune parameter can contain the following properties:
hard_limit
the maximum memory the guest can use
soft_limit
memory limit to enforce during memory contention
swap_hard_limit
the maximum memory plus swap the guest can use
min_guarantee
the guaranteed minimum memory allocation for the guest
.. _init-nic-def:
.. rubric:: Network Interfaces Definitions
Network interfaces dictionaries can contain the following properties:
name
Name of the network interface. This is only used as a key to merge with the profile data
type
Network type. One of ``'bridge'``, ``'network'``
source
The network source, typically the bridge or network name
mac
The desired mac address, computed if ``None`` (Default: ``None``).
model
The network card model (Default: depends on the hypervisor)
.. _init-disk-def:
.. rubric:: Disks Definitions
Disk dictionaries can contain the following properties:
name
Name of the disk. This is mostly used in the name of the disk image and as a key to merge
with the profile data.
format
Format of the disk image, like ``'qcow2'``, ``'raw'``, ``'vmdk'``.
(Default: depends on the hypervisor)
size
Disk size in MiB
pool
Path to the folder or name of the pool where disks should be created.
(Default: depends on hypervisor and the virt:storagepool configuration)
.. versionchanged:: 3001
If the value contains no '/', it is considered a pool name where to create a volume.
Using volumes will be mandatory for some pools types like rdb, iscsi, etc.
model
One of the disk busses allowed by libvirt (Default: depends on hypervisor)
See the libvirt `disk element`_ documentation for the allowed bus types.
image
Path to the image to use for the disk. If no image is provided, an empty disk will be created
(Default: ``None``)
Note that some pool types do not support uploading an image. This list can evolve with libvirt
versions.
overlay_image
``True`` to create a QCOW2 disk image with ``image`` as backing file. If ``False``
the file pointed to by the ``image`` property will simply be copied. (Default: ``False``)
.. versionchanged:: 3001
This property is only valid on path-based disks, not on volumes. To create a volume with a
backing store, set the ``backing_store_path`` and ``backing_store_format`` properties.
backing_store_path
Path to the backing store image to use. This can also be the name of a volume to use as
backing store within the same pool.
.. versionadded:: 3001
backing_store_format
Image format of the disk or volume to use as backing store. This property is mandatory when
using ``backing_store_path`` to avoid `problems <https://libvirt.org/kbase/backing_chains.html#troubleshooting>`_
.. versionadded:: 3001
source_file
Absolute path to the disk image to use. Not to be confused with ``image`` parameter. This
parameter is useful to use disk images that are created outside of this module. Can also
be ``None`` for devices that have no associated image like cdroms.
.. versionchanged:: 3001
For volume disks, this can be the name of a volume already existing in the storage pool.
device
Type of device of the disk. Can be one of 'disk', 'cdrom', 'floppy' or 'lun'.
(Default: ``'disk'``)
hostname_property
When using ZFS volumes, setting this value to a ZFS property ID will make Salt store the name of the
virtual machine inside this property. (Default: ``None``)
sparse_volume
Boolean to specify whether to use a thin provisioned ZFS volume.
Example profile for a bhyve VM with two ZFS disks. The first is
cloned from the specified image. The second disk is a thin
provisioned volume.
.. code-block:: yaml
virt:
disk:
two_zvols:
- system:
image: zroot/bhyve/CentOS-7-x86_64-v1@v1.0.5
hostname_property: virt:hostname
pool: zroot/bhyve/guests
- data:
pool: tank/disks
size: 20G
hostname_property: virt:hostname
sparse_volume: True
.. _init-graphics-def:
.. rubric:: Graphics Definition
The graphics dictionary can have the following properties:
type
Graphics type. The possible values are ``none``, ``'spice'``, ``'vnc'`` and other values
allowed as a libvirt graphics type (Default: ``None``)
See the libvirt `graphics element`_ documentation for more details on the possible types.
port
Port to export the graphics on for ``vnc``, ``spice`` and ``rdp`` types.
tls_port
Port to export the graphics over a secured connection for ``spice`` type.
listen
Dictionary defining on what address to listen on for ``vnc``, ``spice`` and ``rdp``.
It has a ``type`` property with ``address`` and ``None`` as possible values, and an
``address`` property holding the IP or hostname to listen on.
By default, not setting the ``listen`` part of the dictionary will default to
listen on all addresses.
.. rubric:: CLI Example
.. code-block:: bash
salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
salt 'hypervisor' virt.init vm_name 4 512 /var/lib/libvirt/images/img.raw
salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
The disk images will be created in an image folder within the directory
defined by the ``virt:images`` option. Its default value is
``/srv/salt-images/`` but this can changed with such a configuration:
.. code-block:: yaml
virt:
images: /data/my/vm/images/
.. _disk element: https://libvirt.org/formatdomain.html#elementsDisks
.. _graphics element: https://libvirt.org/formatdomain.html#elementsGraphics
"""
try:
conn = __get_conn(**kwargs)
caps = _capabilities(conn)
os_types = sorted({guest["os_type"] for guest in caps["guests"]})
arches = sorted({guest["arch"]["name"] for guest in caps["guests"]})
virt_hypervisor = hypervisor
if not virt_hypervisor:
# Use the machine types as possible values
# Prefer 'kvm' over the others if available
hypervisors = sorted(
{
x
for y in [
guest["arch"]["domains"].keys() for guest in caps["guests"]
]
for x in y
}
)
virt_hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0]
# esxi used to be a possible value for the hypervisor: map it to vmware since it's the same
virt_hypervisor = "vmware" if virt_hypervisor == "esxi" else virt_hypervisor
log.debug("Using hypervisor %s", virt_hypervisor)
nicp = _get_merged_nics(virt_hypervisor, nic, interfaces)
# the disks are computed as follows:
# 1 - get the disks defined in the profile
# 3 - update the disks from the profile with the ones from the user. The matching key is the name.
diskp = _disk_profile(conn, disk, virt_hypervisor, disks, name)
# Create multiple disks, empty or from specified images.
for _disk in diskp:
# No need to create an image for cdrom devices
if _disk.get("device", "disk") == "cdrom":
continue
log.debug("Creating disk for VM [ %s ]: %s", name, _disk)
if virt_hypervisor == "vmware":
if "image" in _disk:
# TODO: we should be copying the image file onto the ESX host
raise SaltInvocationError(
"virt.init does not support image "
"template in conjunction with esxi hypervisor"
)
else:
# assume libvirt manages disks for us
log.debug("Generating libvirt XML for %s", _disk)
volume_name = "{0}/{1}".format(name, _disk["name"])
filename = "{0}.{1}".format(volume_name, _disk["format"])
vol_xml = _gen_vol_xml(
filename, _disk["size"], format=_disk["format"]
)
_define_vol_xml_str(conn, vol_xml, pool=_disk.get("pool"))
elif virt_hypervisor in ["qemu", "kvm", "xen"]:
def seeder(path):
_seed_image(
seed_cmd,
path,
name,
kwargs.get("config"),
install,
pub_key,
priv_key,
)
create_overlay = _disk.get("overlay_image", False)
format = _disk.get("format")
if _disk.get("source_file"):
if os.path.exists(_disk["source_file"]):
img_dest = _disk["source_file"]
else:
img_dest = _qemu_image_create(_disk, create_overlay, saltenv)
else:
_disk_volume_create(conn, _disk, seeder if seed else None, saltenv)
img_dest = None
# Seed only if there is an image specified
if seed and img_dest and _disk.get("image", None):
seeder(img_dest)
elif hypervisor in ["bhyve"]:
img_dest = _zfs_image_create(
vm_name=name,
pool=_disk.get("pool"),
disk_name=_disk.get("name"),
disk_size=_disk.get("size"),
disk_image_name=_disk.get("image"),
hostname_property_name=_disk.get("hostname_property"),
sparse_volume=_disk.get("sparse_volume"),
)
else:
# Unknown hypervisor
raise SaltInvocationError(
"Unsupported hypervisor when handling disk image: {0}".format(
virt_hypervisor
)
)
log.debug("Generating VM XML")
if os_type is None:
os_type = "hvm" if "hvm" in os_types else os_types[0]
if arch is None:
arch = "x86_64" if "x86_64" in arches else arches[0]
if boot is not None:
boot = _handle_remote_boot_params(boot)
vm_xml = _gen_xml(
conn,
name,
cpu,
mem,
diskp,
nicp,
virt_hypervisor,
os_type,
arch,
graphics,
boot,
memtune,
**kwargs
)
conn.defineXML(vm_xml)
except libvirt.libvirtError as err:
conn.close()
raise CommandExecutionError(err.get_error_message())
if start:
log.debug("Starting VM %s", name)
_get_domain(conn, name).create()
conn.close()
return True
|
def init(
name,
cpu,
mem,
nic="default",
interfaces=None,
hypervisor=None,
start=True, # pylint: disable=redefined-outer-name
disk="default",
disks=None,
saltenv="base",
seed=True,
install=True,
pub_key=None,
priv_key=None,
seed_cmd="seed.apply",
graphics=None,
os_type=None,
arch=None,
boot=None,
memtune=None,
**kwargs
):
"""
Initialize a new vm
:param name: name of the virtual machine to create
:param cpu: Number of virtual CPUs to assign to the virtual machine
:param mem: Amount of memory to allocate to the virtual machine in MiB.
:param nic: NIC profile to use (Default: ``'default'``).
The profile interfaces can be customized / extended with the interfaces parameter.
If set to ``None``, no profile will be used.
:param interfaces:
List of dictionaries providing details on the network interfaces to create.
These data are merged with the ones from the nic profile. The structure of
each dictionary is documented in :ref:`init-nic-def`.
.. versionadded:: 2019.2.0
:param hypervisor: the virtual machine type. By default the value will be computed according
to the virtual host capabilities.
:param start: ``True`` to start the virtual machine after having defined it (Default: ``True``)
:param disk: Disk profile to use (Default: ``'default'``). If set to ``None``, no profile will be used.
:param disks: List of dictionaries providing details on the disk devices to create.
These data are merged with the ones from the disk profile. The structure of
each dictionary is documented in :ref:`init-disk-def`.
.. versionadded:: 2019.2.0
:param saltenv: Fileserver environment (Default: ``'base'``).
See :mod:`cp module for more details <salt.modules.cp>`
:param seed: ``True`` to seed the disk image. Only used when the ``image`` parameter is provided.
(Default: ``True``)
:param install: install salt minion if absent (Default: ``True``)
:param pub_key: public key to seed with (Default: ``None``)
:param priv_key: public key to seed with (Default: ``None``)
:param seed_cmd: Salt command to execute to seed the image. (Default: ``'seed.apply'``)
:param graphics:
Dictionary providing details on the graphics device to create. (Default: ``None``)
See :ref:`init-graphics-def` for more details on the possible values.
.. versionadded:: 2019.2.0
:param os_type:
type of virtualization as found in the ``//os/type`` element of the libvirt definition.
The default value is taken from the host capabilities, with a preference for ``hvm``.
.. versionadded:: 2019.2.0
:param arch:
architecture of the virtual machine. The default value is taken from the host capabilities,
but ``x86_64`` is prefed over ``i686``.
.. versionadded:: 2019.2.0
:param config: minion configuration to use when seeding.
See :mod:`seed module for more details <salt.modules.seed>`
:param boot_dev: String of space-separated devices to boot from (Default: ``'hd'``)
:param serial_type: Serial device type. One of ``'pty'``, ``'tcp'`` (Default: ``None``)
:param telnet_port: Telnet port to use for serial device of type ``tcp``.
:param console: ``True`` to add a console device along with serial one (Default: ``True``)
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param boot:
Specifies kernel, initial ramdisk and kernel command line parameters for the virtual machine.
This is an optional parameter, all of the keys are optional within the dictionary. The structure of
the dictionary is documented in :ref:`init-boot-def`. If a remote path is provided to kernel or initrd,
salt will handle the downloading of the specified remote file and modify the XML accordingly.
To boot VM with UEFI, specify loader and nvram path or specify 'efi': ``True`` if your libvirtd version
is >= 5.2.0 and QEMU >= 3.0.0.
.. versionadded:: 3000
.. code-block:: python
{
'kernel': '/root/f8-i386-vmlinuz',
'initrd': '/root/f8-i386-initrd',
'cmdline': 'console=ttyS0 ks=http://example.com/f8-i386/os/',
'loader': '/usr/share/OVMF/OVMF_CODE.fd',
'nvram': '/usr/share/OVMF/OVMF_VARS.ms.fd'
}
:param memtune:
Specifies hard_limit, soft_limit, swap_hard_limit and min_guarantee parameters for tuning the memory of the domain.
This is an optional parameter, all of the keys are optional within the dictionary. The structure of the dictionary
is documented in :ref:`init-memtune-def`. Both decimal and binary base are supported. Detail unit specification is
documented in :ref:`virt-units`
.. versionadded:: Magnesium
.. code-block:: python
{
'hard_limit': '1024'
'soft_limit': '512m'
'swap_hard_limit': '1g'
'min_guarantee': '512mib'
}
.. _init-boot-def:
.. rubric:: Boot parameters definition
The boot parameters dictionary can contains the following properties:
kernel
The URL or path to the kernel to run the virtual machine with.
initrd
The URL or path to the initrd file to run the virtual machine with.
cmdline
The parameters to pass to the kernel provided in the `kernel` property.
loader
The path to the UEFI binary loader to use.
.. versionadded:: 3001
nvram
The path to the UEFI data template. The file will be copied when creating the virtual machine.
.. versionadded:: 3001
efi
A boolean value.
.. versionadded:: sodium
.. _init-memtune-def:
.. rubric:: Memtune parameter definition
Memtune parameter can contain the following properties:
hard_limit
the maximum memory the guest can use
soft_limit
memory limit to enforce during memory contention
swap_hard_limit
the maximum memory plus swap the guest can use
min_guarantee
the guaranteed minimum memory allocation for the guest
.. _init-nic-def:
.. rubric:: Network Interfaces Definitions
Network interfaces dictionaries can contain the following properties:
name
Name of the network interface. This is only used as a key to merge with the profile data
type
Network type. One of ``'bridge'``, ``'network'``
source
The network source, typically the bridge or network name
mac
The desired mac address, computed if ``None`` (Default: ``None``).
model
The network card model (Default: depends on the hypervisor)
.. _init-disk-def:
.. rubric:: Disks Definitions
Disk dictionaries can contain the following properties:
name
Name of the disk. This is mostly used in the name of the disk image and as a key to merge
with the profile data.
format
Format of the disk image, like ``'qcow2'``, ``'raw'``, ``'vmdk'``.
(Default: depends on the hypervisor)
Specifies ``hard_limit, soft_limit``, ``swap_hard_limit`` and ``min_guarantee`` parameters for tuning the memory of the domain.
Disk size in MiB
pool
Path to the folder or name of the pool where disks should be created.
(Default: depends on hypervisor and the virt:storagepool configuration)
.. versionchanged:: 3001
If the value contains no '/', it is considered a pool name where to create a volume.
Using volumes will be mandatory for some pools types like rdb, iscsi, etc.
model
One of the disk busses allowed by libvirt (Default: depends on hypervisor)
See the libvirt `disk element`_ documentation for the allowed bus types.
image
Path to the image to use for the disk. If no image is provided, an empty disk will be created
(Default: ``None``)
Note that some pool types do not support uploading an image. This list can evolve with libvirt
versions.
overlay_image
``True`` to create a QCOW2 disk image with ``image`` as backing file. If ``False``
the file pointed to by the ``image`` property will simply be copied. (Default: ``False``)
.. versionchanged:: 3001
This property is only valid on path-based disks, not on volumes. To create a volume with a
backing store, set the ``backing_store_path`` and ``backing_store_format`` properties.
backing_store_path
Path to the backing store image to use. This can also be the name of a volume to use as
backing store within the same pool.
.. versionadded:: 3001
backing_store_format
Image format of the disk or volume to use as backing store. This property is mandatory when
using ``backing_store_path`` to avoid `problems <https://libvirt.org/kbase/backing_chains.html#troubleshooting>`_
.. versionadded:: 3001
source_file
Absolute path to the disk image to use. Not to be confused with ``image`` parameter. This
parameter is useful to use disk images that are created outside of this module. Can also
be ``None`` for devices that have no associated image like cdroms.
.. versionchanged:: 3001
For volume disks, this can be the name of a volume already existing in the storage pool.
device
Type of device of the disk. Can be one of 'disk', 'cdrom', 'floppy' or 'lun'.
(Default: ``'disk'``)
hostname_property
When using ZFS volumes, setting this value to a ZFS property ID will make Salt store the name of the
virtual machine inside this property. (Default: ``None``)
sparse_volume
Boolean to specify whether to use a thin provisioned ZFS volume.
Example profile for a bhyve VM with two ZFS disks. The first is
cloned from the specified image. The second disk is a thin
provisioned volume.
.. code-block:: yaml
virt:
disk:
two_zvols:
- system:
image: zroot/bhyve/CentOS-7-x86_64-v1@v1.0.5
hostname_property: virt:hostname
pool: zroot/bhyve/guests
- data:
pool: tank/disks
size: 20G
hostname_property: virt:hostname
sparse_volume: True
.. _init-graphics-def:
.. rubric:: Graphics Definition
The graphics dictionary can have the following properties:
type
Graphics type. The possible values are ``none``, ``'spice'``, ``'vnc'`` and other values
allowed as a libvirt graphics type (Default: ``None``)
See the libvirt `graphics element`_ documentation for more details on the possible types.
port
Port to export the graphics on for ``vnc``, ``spice`` and ``rdp`` types.
tls_port
Port to export the graphics over a secured connection for ``spice`` type.
listen
Dictionary defining on what address to listen on for ``vnc``, ``spice`` and ``rdp``.
It has a ``type`` property with ``address`` and ``None`` as possible values, and an
``address`` property holding the IP or hostname to listen on.
By default, not setting the ``listen`` part of the dictionary will default to
listen on all addresses.
.. rubric:: CLI Example
.. code-block:: bash
salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
salt 'hypervisor' virt.init vm_name 4 512 /var/lib/libvirt/images/img.raw
salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
The disk images will be created in an image folder within the directory
defined by the ``virt:images`` option. Its default value is
``/srv/salt-images/`` but this can changed with such a configuration:
.. code-block:: yaml
virt:
images: /data/my/vm/images/
.. _disk element: https://libvirt.org/formatdomain.html#elementsDisks
.. _graphics element: https://libvirt.org/formatdomain.html#elementsGraphics
"""
try:
conn = __get_conn(**kwargs)
caps = _capabilities(conn)
os_types = sorted({guest["os_type"] for guest in caps["guests"]})
arches = sorted({guest["arch"]["name"] for guest in caps["guests"]})
virt_hypervisor = hypervisor
if not virt_hypervisor:
# Use the machine types as possible values
# Prefer 'kvm' over the others if available
hypervisors = sorted(
{
x
for y in [
guest["arch"]["domains"].keys() for guest in caps["guests"]
]
for x in y
}
)
virt_hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0]
# esxi used to be a possible value for the hypervisor: map it to vmware since it's the same
virt_hypervisor = "vmware" if virt_hypervisor == "esxi" else virt_hypervisor
log.debug("Using hypervisor %s", virt_hypervisor)
nicp = _get_merged_nics(virt_hypervisor, nic, interfaces)
# the disks are computed as follows:
# 1 - get the disks defined in the profile
# 3 - update the disks from the profile with the ones from the user. The matching key is the name.
diskp = _disk_profile(conn, disk, virt_hypervisor, disks, name)
# Create multiple disks, empty or from specified images.
for _disk in diskp:
# No need to create an image for cdrom devices
if _disk.get("device", "disk") == "cdrom":
continue
log.debug("Creating disk for VM [ %s ]: %s", name, _disk)
if virt_hypervisor == "vmware":
if "image" in _disk:
# TODO: we should be copying the image file onto the ESX host
raise SaltInvocationError(
"virt.init does not support image "
"template in conjunction with esxi hypervisor"
)
else:
# assume libvirt manages disks for us
log.debug("Generating libvirt XML for %s", _disk)
volume_name = "{0}/{1}".format(name, _disk["name"])
filename = "{0}.{1}".format(volume_name, _disk["format"])
vol_xml = _gen_vol_xml(
filename, _disk["size"], format=_disk["format"]
)
_define_vol_xml_str(conn, vol_xml, pool=_disk.get("pool"))
elif virt_hypervisor in ["qemu", "kvm", "xen"]:
def seeder(path):
_seed_image(
seed_cmd,
path,
name,
kwargs.get("config"),
install,
pub_key,
priv_key,
)
create_overlay = _disk.get("overlay_image", False)
format = _disk.get("format")
if _disk.get("source_file"):
if os.path.exists(_disk["source_file"]):
img_dest = _disk["source_file"]
else:
img_dest = _qemu_image_create(_disk, create_overlay, saltenv)
else:
_disk_volume_create(conn, _disk, seeder if seed else None, saltenv)
img_dest = None
# Seed only if there is an image specified
if seed and img_dest and _disk.get("image", None):
seeder(img_dest)
elif hypervisor in ["bhyve"]:
img_dest = _zfs_image_create(
vm_name=name,
pool=_disk.get("pool"),
disk_name=_disk.get("name"),
disk_size=_disk.get("size"),
disk_image_name=_disk.get("image"),
hostname_property_name=_disk.get("hostname_property"),
sparse_volume=_disk.get("sparse_volume"),
)
else:
# Unknown hypervisor
raise SaltInvocationError(
"Unsupported hypervisor when handling disk image: {0}".format(
virt_hypervisor
)
)
log.debug("Generating VM XML")
if os_type is None:
os_type = "hvm" if "hvm" in os_types else os_types[0]
if arch is None:
arch = "x86_64" if "x86_64" in arches else arches[0]
if boot is not None:
boot = _handle_remote_boot_params(boot)
vm_xml = _gen_xml(
conn,
name,
cpu,
mem,
diskp,
nicp,
virt_hypervisor,
os_type,
arch,
graphics,
boot,
memtune,
**kwargs
)
conn.defineXML(vm_xml)
except libvirt.libvirtError as err:
conn.close()
raise CommandExecutionError(err.get_error_message())
if start:
log.debug("Starting VM %s", name)
_get_domain(conn, name).create()
conn.close()
return True
|
38,662 |
def test_list_getitem():
l = [1, 2]
expr = sn.defer(l)[1] == 3
l[1] = 3
assert expr
|
def test_getitem_list():
l = [1, 2]
expr = sn.defer(l)[1] == 3
l[1] = 3
assert expr
|
31,118 |
def upload_private_id_set(storage_bucket, private_id_set_path):
"""
Uploads the private_id_set.json artifact to the bucket.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
private_id_set_path: path to the private_id_set.json file
"""
if not private_id_set_path:
logging.info("Skipping upload of private id set to gcs.")
return
private_id_set_gcs_path = os.path.join(os.path.dirname(GCPConfig.STORAGE_PRIVATE_ID_SET_PATH),
'private_id_set.json')
blob = storage_bucket.blob(private_id_set_gcs_path)
with open(private_id_set_path, mode='r') as f:
blob.upload_from_file(f)
logging.success("Finished uploading id_set.json to storage.")
|
def upload_private_id_set_to_bucket(storage_bucket, private_id_set_path):
"""
Uploads the private_id_set.json artifact to the bucket.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
private_id_set_path: path to the private_id_set.json file
"""
if not private_id_set_path:
logging.info("Skipping upload of private id set to gcs.")
return
private_id_set_gcs_path = os.path.join(os.path.dirname(GCPConfig.STORAGE_PRIVATE_ID_SET_PATH),
'private_id_set.json')
blob = storage_bucket.blob(private_id_set_gcs_path)
with open(private_id_set_path, mode='r') as f:
blob.upload_from_file(f)
logging.success("Finished uploading id_set.json to storage.")
|
35,618 |
def ps_roi_pool(
input: Tensor,
boxes: Tensor,
output_size: int,
spatial_scale: float = 1.0,
) -> Tensor:
"""
Performs Position-Sensitive Region of Interest (RoI) Pool operator
described in R-FCN
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the input coordinates to
the box coordinates. Default: 1.0
Returns:
Tensor[K, C/(output_size[0]*output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.
"""
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.ps_roi_pool(input, rois, spatial_scale,
output_size[0],
output_size[1])
return output
|
def ps_roi_pool(
input: Tensor,
boxes: Tensor,
output_size: int,
spatial_scale: float = 1.0,
) -> Tensor:
"""
Performs Position-Sensitive Region of Interest (RoI) Pool operator
described in R-FCN
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the input coordinates to
the box coordinates. Default: 1.0
Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.
"""
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.ps_roi_pool(input, rois, spatial_scale,
output_size[0],
output_size[1])
return output
|
29,808 |
def get_next_x_cron_runs(num_runs, schedule, start_datetime):
iter = croniter(schedule, start_datetime)
next_runs = []
for _ in range(num_runs):
next_runs.append(iter.get_next(datetime))
return next_runs
|
def get_next_x_cron_runs(num_runs: int, schedule: str, start_datetime: datetime.datetime) -> List[str]:
iter = croniter(schedule, start_datetime)
next_runs = []
for _ in range(num_runs):
next_runs.append(iter.get_next(datetime))
return next_runs
|
36,263 |
def _normalize_per_cell(
X: Union[np.ndarray, spmatrix],
counts_per_cell_after: Optional[float] = None,
counts_per_cell: Optional[np.ndarray] = None,
copy: bool = False,
min_counts: int = 1,
):
"""Internal function that performs the normalization."""
X = check_array(
X, accept_sparse=("csr", "csc"), dtype=(np.float64, np.float32), copy=copy
)
if counts_per_cell is None:
if copy == False:
raise ValueError('Can only be run with copy=True')
cell_subset, counts_per_cell = filter_cells(X, min_counts=min_counts)
X = X[cell_subset]
counts_per_cell = counts_per_cell[cell_subset]
if counts_per_cell_after is None:
counts_per_cell_after = np.median(counts_per_cell)
# Conversion in case counts per cell has int values
counts_per_cell = counts_per_cell.astype(
np.promote_types(counts_per_cell.dtype, np.float32), copy=False
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
counts_per_cell += counts_per_cell == 0
counts_per_cell /= counts_per_cell_after
if not issparse(X): X /= materialize_as_ndarray(counts_per_cell[:, np.newaxis])
else: sparsefuncs.inplace_row_scale(X, 1/counts_per_cell)
return X
|
def _normalize_per_cell(
X: Union[np.ndarray, spmatrix],
counts_per_cell_after: Optional[float] = None,
counts_per_cell: Optional[np.ndarray] = None,
copy: bool = False,
min_counts: int = 1,
) -> Union[np.ndarray, spmatrix]:
"""Internal function that performs the normalization."""
X = check_array(
X, accept_sparse=("csr", "csc"), dtype=(np.float64, np.float32), copy=copy
)
if counts_per_cell is None:
if copy == False:
raise ValueError('Can only be run with copy=True')
cell_subset, counts_per_cell = filter_cells(X, min_counts=min_counts)
X = X[cell_subset]
counts_per_cell = counts_per_cell[cell_subset]
if counts_per_cell_after is None:
counts_per_cell_after = np.median(counts_per_cell)
# Conversion in case counts per cell has int values
counts_per_cell = counts_per_cell.astype(
np.promote_types(counts_per_cell.dtype, np.float32), copy=False
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
counts_per_cell += counts_per_cell == 0
counts_per_cell /= counts_per_cell_after
if not issparse(X): X /= materialize_as_ndarray(counts_per_cell[:, np.newaxis])
else: sparsefuncs.inplace_row_scale(X, 1/counts_per_cell)
return X
|
38,264 |
def format_completion(
item: CompletionItem, index: int, can_resolve_completion_items: bool, session_name: str
) -> sublime.CompletionItem:
# This is a hot function. Don't do heavy computations or IO in this function.
item_kind = item.get("kind")
if isinstance(item_kind, int) and 1 <= item_kind <= len(COMPLETION_KINDS):
kind = COMPLETION_KINDS[item_kind - 1]
else:
kind = sublime.KIND_AMBIGUOUS
if _is_completion_item_deprecated(item):
kind = (kind[0], '⚠', "⚠ {} - Deprecated".format(kind[2]))
lsp_label = item["label"]
lsp_filter_text = item.get("filterText")
st_annotation = (item.get("detail") or "").replace('\n', ' ')
st_details = ""
if can_resolve_completion_items or item.get("documentation"):
st_details += make_command_link("lsp_resolve_docs", "More", {"index": index, "session_name": session_name})
if lsp_filter_text and lsp_filter_text != lsp_label:
st_trigger = lsp_filter_text
if st_details:
st_details += " | "
st_details += "<p>{}</p>".format(html.escape(lsp_label))
else:
st_trigger = lsp_label
args = {
"item": item,
"session_name": session_name
} # type: Dict[str, Any]
completion = sublime.CompletionItem.command_completion(
trigger=st_trigger,
command="lsp_select_completion_item",
args=args,
annotation=st_annotation,
kind=kind,
details=st_details)
if item.get("textEdit"):
completion.flags = sublime.COMPLETION_FLAG_KEEP_PREFIX
return completion
|
def format_completion(
item: CompletionItem, index: int, can_resolve_completion_items: bool, session_name: str
) -> sublime.CompletionItem:
# This is a hot function. Don't do heavy computations or IO in this function.
item_kind = item.get("kind")
if isinstance(item_kind, int) and 1 <= item_kind <= len(COMPLETION_KINDS):
kind = COMPLETION_KINDS[item_kind - 1]
else:
kind = sublime.KIND_AMBIGUOUS
if _is_completion_item_deprecated(item):
kind = (kind[0], '⚠', "⚠ {} - Deprecated".format(kind[2]))
lsp_label = item["label"]
lsp_filter_text = item.get("filterText")
st_annotation = (item.get("detail") or "").replace('\n', ' ')
st_details = ""
if can_resolve_completion_items or item.get("documentation"):
st_details += make_command_link("lsp_resolve_docs", "More", {"index": index, "session_name": session_name})
if lsp_filter_text and lsp_filter_text != lsp_label:
st_trigger = lsp_filter_text
if st_details:
st_details += " | "
st_details += "<p>{}</p>".format(html.escape(lsp_label))
else:
st_trigger = lsp_label
args = {
"item": item,
"session_name": session_name
} # type: Dict[str, Any]
completion = sublime.CompletionItem.command_completion(
trigger=st_trigger,
command="lsp_select_completion_item",
args={"item": item, "session_name": session_name},
annotation=st_annotation,
kind=kind,
details=st_details)
if item.get("textEdit"):
completion.flags = sublime.COMPLETION_FLAG_KEEP_PREFIX
return completion
|
93 |
def _validate_catalog(catalog):
validation_errors = []
for message in catalog:
if message.fuzzy:
if message.lineno:
validation_errors.append(
f' Line {message.lineno}: "{message.string}" is fuzzy.'
)
else:
validation_errors.append(
' File is fuzzy. Remove line containing "#, fuzzy" found near '
'the beginning of the file.'
)
if validation_errors:
print("Validation failed...")
print("Please correct the following errors before proceeding:")
for e in validation_errors:
print(e)
return len(validation_errors) == 0
|
def _validate_catalog(catalog):
validation_errors = []
for message in catalog:
if message.fuzzy:
if message.lineno:
validation_errors.append(
f'openlibrary/i18n/te/messages.po:{message.lineno}: "{message.string}" is fuzzy.'
)
else:
validation_errors.append(
' File is fuzzy. Remove line containing "#, fuzzy" found near '
'the beginning of the file.'
)
if validation_errors:
print("Validation failed...")
print("Please correct the following errors before proceeding:")
for e in validation_errors:
print(e)
return len(validation_errors) == 0
|
52,206 |
def find_ldc_dmd_frontend_version(version_output: str):
version_regex = re.search(r'DMD v(\d+\.\d+\.\d+)', version_output)
if version_regex is not None and len(version_regex.groups()):
return version_regex.groups()[0]
return ''
|
def find_ldc_dmd_frontend_version(version_output: str):
version_regex = re.search(r'DMD v(\d+\.\d+\.\d+)', version_output)
if version_regex:
return version_regex.group(1)
return ''
|
55,352 |
def create_pyramid_level(backbone_input,
upsamplelike_input=None,
addition_input=None,
upsample_type='upsamplelike',
level=5,
ndim=2,
lite=False,
interpolation='bilinear',
feature_size=256,
z_axis_convolutions=False):
"""Create a pyramid layer from a particular backbone input layer.
Args:
backbone_input (layer): Backbone layer to use to create they pyramid
layer
upsamplelike_input (tensor): Optional input to use
as a template for shape to upsample to
addition_input (layer): Optional layer to add to
pyramid layer after convolution and upsampling.
upsample_type (str, optional): Choice of upsampling methods
from ['upsamplelike','upsampling2d','upsampling3d'].
Defaults to 'upsamplelike'.
level (int): Level to use in layer names, defaults to 5.
feature_size (int):Number of filters for
convolutional layer, defaults to 256.
ndim (int): The spatial dimensions of the input data. Default is 2,
but it also works with 3
lite (bool): Whether to use depthwise conv instead of regular conv for
feature pyramid construction
interpolation (str): Choice of interpolation mode for upsampling
layers from ['bilinear', 'nearest']. Defaults to bilinear.
Returns:
tuple: Pyramid layer after processing, upsampled pyramid layer
Raises:
ValueError: ndim is not 2 or 3
ValueError: upsample_type not ['upsamplelike','upsampling2d',
'upsampling3d']
"""
# Check input to ndims
acceptable_ndims = {2, 3}
if ndim not in acceptable_ndims:
raise ValueError('Only 2 and 3 dimensional networks are supported')
# Check if inputs to ndim and lite are compatible
if ndim == 3 and lite:
raise ValueError('lite models are not compatible with 3 dimensional '
'networks')
# Check input to interpolation
acceptable_interpolation = {'bilinear', 'nearest'}
if interpolation not in acceptable_interpolation:
raise ValueError('Interpolation mode "{}" not supported. '
'Choose from {}.'.format(
interpolation, list(acceptable_interpolation)))
# Check input to upsample_type
acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
if upsample_type not in acceptable_upsample:
raise ValueError('Upsample method "{}" not supported. '
'Choose from {}.'.format(
upsample_type, list(acceptable_upsample)))
reduced_name = 'C{}_reduced'.format(level)
upsample_name = 'P{}_upsampled'.format(level)
addition_name = 'P{}_merged'.format(level)
final_name = 'P{}'.format(level)
# Apply 1x1 conv to backbone layer
if ndim == 2:
pyramid = Conv2D(feature_size, (1, 1), strides=(1, 1),
padding='same', name=reduced_name)(backbone_input)
else:
pyramid = Conv3D(feature_size, (1, 1, 1), strides=(1, 1, 1),
padding='same', name=reduced_name)(backbone_input)
# Add and then 3x3 conv
if addition_input is not None:
pyramid = Add(name=addition_name)([pyramid, addition_input])
# Upsample pyramid input
if upsamplelike_input is not None and upsample_type == 'upsamplelike':
pyramid_upsample = UpsampleLike(name=upsample_name)(
[pyramid, upsamplelike_input])
elif upsample_type == 'upsamplelike':
pyramid_upsample = None
else:
upsampling = UpSampling2D if ndim == 2 else UpSampling3D
size = (2, 2) if ndim == 2 else (1, 2, 2)
upsampling_kwargs = {
'size': size,
'name': upsample_name,
'interpolation': interpolation
}
if ndim > 2:
del upsampling_kwargs['interpolation']
pyramid_upsample = upsampling(**upsampling_kwargs)(pyramid)
if ndim == 2:
if lite:
pyramid_final = DepthwiseConv2D((3, 3), strides=(1, 1),
padding='same',
name=final_name)(pyramid)
else:
pyramid_final = Conv2D(feature_size, (3, 3), strides=(1, 1),
padding='same', name=final_name)(pyramid)
else:
if z_axis_convolutions:
pyramid_final = Conv3D(feature_size, (3, 3, 3), strides=(1, 1, 1),
padding='same', name=final_name)(pyramid)
print('Using convolutions across the z-axis')
else:
pyramid_final = Conv3D(feature_size, (1, 3, 3), strides=(1, 1, 1),
padding='same', name=final_name)(pyramid)
print('Not using convolutions across the z-axis')
return pyramid_final, pyramid_upsample
|
def create_pyramid_level(backbone_input,
upsamplelike_input=None,
addition_input=None,
upsample_type='upsamplelike',
level=5,
ndim=2,
lite=False,
interpolation='bilinear',
feature_size=256,
z_axis_convolutions=False):
"""Create a pyramid layer from a particular backbone input layer.
Args:
backbone_input (layer): Backbone layer to use to create they pyramid
layer
upsamplelike_input (tensor): Optional input to use
as a template for shape to upsample to
addition_input (layer): Optional layer to add to
pyramid layer after convolution and upsampling.
upsample_type (str, optional): Choice of upsampling methods
from ['upsamplelike','upsampling2d','upsampling3d'].
Defaults to 'upsamplelike'.
level (int): Level to use in layer names, defaults to 5.
feature_size (int):Number of filters for
convolutional layer, defaults to 256.
ndim (int): The spatial dimensions of the input data. Default is 2,
but it also works with 3
lite (bool): Whether to use depthwise conv instead of regular conv for
feature pyramid construction
interpolation (str): Choice of interpolation mode for upsampling
layers from ['bilinear', 'nearest']. Defaults to bilinear.
Returns:
tuple: Pyramid layer after processing, upsampled pyramid layer
Raises:
ValueError: ndim is not 2 or 3
ValueError: upsample_type not ['upsamplelike','upsampling2d',
'upsampling3d']
"""
# Check input to ndims
acceptable_ndims = {2, 3}
if ndim not in acceptable_ndims:
raise ValueError('Only 2 and 3 dimensional networks are supported')
# Check if inputs to ndim and lite are compatible
if ndim == 3 and lite:
raise ValueError('lite models are not compatible with 3 dimensional '
'networks')
# Check input to interpolation
acceptable_interpolation = {'bilinear', 'nearest'}
if interpolation not in acceptable_interpolation:
raise ValueError('Interpolation mode "{}" not supported. '
'Choose from {}.'.format(
interpolation, list(acceptable_interpolation)))
# Check input to upsample_type
acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
if upsample_type not in acceptable_upsample:
raise ValueError('Upsample method "{}" not supported. '
'Choose from {}.'.format(
upsample_type, list(acceptable_upsample)))
reduced_name = 'C{}_reduced'.format(level)
upsample_name = 'P{}_upsampled'.format(level)
addition_name = 'P{}_merged'.format(level)
final_name = 'P{}'.format(level)
# Apply 1x1 conv to backbone layer
if ndim == 2:
pyramid = Conv2D(feature_size, (1, 1), strides=(1, 1),
padding='same', name=reduced_name)(backbone_input)
else:
pyramid = Conv3D(feature_size, (1, 1, 1), strides=(1, 1, 1),
padding='same', name=reduced_name)(backbone_input)
# Add and then 3x3 conv
if addition_input is not None:
pyramid = Add(name=addition_name)([pyramid, addition_input])
# Upsample pyramid input
if upsamplelike_input is not None and upsample_type == 'upsamplelike':
pyramid_upsample = UpsampleLike(name=upsample_name)(
[pyramid, upsamplelike_input])
elif upsample_type == 'upsamplelike':
pyramid_upsample = None
else:
upsampling = UpSampling2D if ndim == 2 else UpSampling3D
size = (2, 2) if ndim == 2 else (1, 2, 2)
upsampling_kwargs = {
'size': size,
'name': upsample_name,
'interpolation': interpolation
}
if ndim > 2:
del upsampling_kwargs['interpolation']
pyramid_upsample = upsampling(**upsampling_kwargs)(pyramid)
if ndim == 2:
if lite:
pyramid_final = DepthwiseConv2D((3, 3), strides=(1, 1),
padding='same',
name=final_name)(pyramid)
else:
pyramid_final = Conv2D(feature_size, (3, 3), strides=(1, 1),
padding='same', name=final_name)(pyramid)
else:
z = 3 if z_axis_convolutions else 1
pyramid_final = Conv3D(feature_size, (z, 3, 3), strides=(1, 1, 1),
padding='same', name=final_name)(pyramid)
return pyramid_final, pyramid_upsample
|
30,999 |
def fetch_incidents(client, last_run, fetch_time, mapper_in):
"""
This function will execute each interval (default is 1 minute).
Args:
client: Workday client
last_run: The greatest incident created_time we fetched from last fetch
fetch_time: The time interval when the function should execute and return events/incidents
Returns:
last_run: This will be last_run in the next fetch-incidents
events: Incidents/Events that will be created in Cortex XSOAR
"""
start = datetime.now()
events = []
from_date_time = '###'
to_date_time = '$$$'
try:
# If there is no fetch time configured, it will be set to 0 and no events will be pulled
fetch_time = int(fetch_time) if fetch_time else 0
time_elapsed_in_minutes, last_run_time = get_time_elapsed(fetch_time, last_run)
from_date_time = last_run_time
if fetch_time != 0 and time_elapsed_in_minutes >= fetch_time:
to_date_time = datetime.now().strftime(WORKDAY_DATE_TIME_FORMAT)
report_data = client.get_full_report()
report_entries = report_data.get('Report_Entry')
for entry in report_entries:
workday_user = demisto.mapObject(entry, mapper_in, INCIDENT_TYPE)
workday_user = convert_incident_fields_to_cli_names(workday_user)
demisto_user = get_demisto_user(workday_user)
profile_changed_fields = get_profile_changed_fields(workday_user, demisto_user)
terminate_date_arrived = check_if_user_should_be_terminated(workday_user)
does_email_exist = does_email_exist_in_xsoar(workday_user.get('email'))
if ((demisto_user and len(profile_changed_fields) == 0) or (not demisto_user and does_email_exist))\
and not terminate_date_arrived:
# either no change in user profile or user profile doesn't exist but the email is already used
# in both cases, don't create the incident
continue
entry['UserProfile'] = workday_user
event = {
"rawJSON": json.dumps(entry),
"details": 'Profile changed. Changed fields: ' + str(profile_changed_fields)
}
events.append(event)
last_run_time = datetime.now().strftime(WORKDAY_DATE_TIME_FORMAT)
demisto.info(f'Workday Fetch Events Completed. Response Time:'
f' {(datetime.now() - start).total_seconds()} seconds')
last_run = {'time': last_run_time, "sync_users": True}
except Exception as e:
demisto.error(f'Failed to fetch events. From Date = {from_date_time}. To Date = {to_date_time}')
raise e
return last_run, events
|
def fetch_incidents(client, last_run, fetch_time, mapper_in):
"""
This function will execute each interval (default is 1 minute).
Args:
client: Workday client
last_run: The greatest incident created_time we fetched from last fetch
fetch_time: The time interval when the function should execute and return events/incidents
Returns:
last_run: This will be last_run in the next fetch-incidents
events: Incidents/Events that will be created in Cortex XSOAR
"""
start = datetime.now()
events = []
from_date_time = '###'
to_date_time = '$$$'
try:
# If there is no fetch time configured, it will be set to 0 and no events will be pulled
fetch_time = int(fetch_time) if fetch_time else 0
time_elapsed_in_minutes, last_run_time = get_time_elapsed(fetch_time, last_run)
from_date_time = last_run_time
if fetch_time != 0 and time_elapsed_in_minutes >= fetch_time:
to_date_time = datetime.now().strftime(WORKDAY_DATE_TIME_FORMAT)
report_data = client.get_full_report(report_url)
report_entries = report_data.get('Report_Entry')
for entry in report_entries:
workday_user = demisto.mapObject(entry, mapper_in, INCIDENT_TYPE)
workday_user = convert_incident_fields_to_cli_names(workday_user)
demisto_user = get_demisto_user(workday_user)
profile_changed_fields = get_profile_changed_fields(workday_user, demisto_user)
terminate_date_arrived = check_if_user_should_be_terminated(workday_user)
does_email_exist = does_email_exist_in_xsoar(workday_user.get('email'))
if ((demisto_user and len(profile_changed_fields) == 0) or (not demisto_user and does_email_exist))\
and not terminate_date_arrived:
# either no change in user profile or user profile doesn't exist but the email is already used
# in both cases, don't create the incident
continue
entry['UserProfile'] = workday_user
event = {
"rawJSON": json.dumps(entry),
"details": 'Profile changed. Changed fields: ' + str(profile_changed_fields)
}
events.append(event)
last_run_time = datetime.now().strftime(WORKDAY_DATE_TIME_FORMAT)
demisto.info(f'Workday Fetch Events Completed. Response Time:'
f' {(datetime.now() - start).total_seconds()} seconds')
last_run = {'time': last_run_time, "sync_users": True}
except Exception as e:
demisto.error(f'Failed to fetch events. From Date = {from_date_time}. To Date = {to_date_time}')
raise e
return last_run, events
|
48,073 |
def fn_pipe_correct_type(df: DataFrame[Schema]) -> DataFrame[SchemaOut]:
return df.assign(age=30).pipe(DataFrame[AnotherSchema]) # mypy error
|
def fn_pipe_incorrect_type(df: DataFrame[Schema]) -> DataFrame[SchemaOut]:
return df.assign(age=30).pipe(DataFrame[AnotherSchema]) # mypy error
|
23,683 |
def fedis(aoi, surface_tilt, n=1.5, n_ref=1.4585):
"""
Determine the incidence angle modifiers (iam) for direct, diffuse sky,
and ground-reflected radiation using the FEDIS transmittance model.
The "Fresnel Equations" for Diffuse radiation on Inclined photovoltaic
Surfaces (FEDIS) [1]_ is an analytical solution of diffuse transmission
based on the rigorous integration of an alternate form of the
Fresnel equations. The approach leads to a simple yet accurate
relative transmittance model that reconciles the solar energy
sensed by pyranometers and PV panels.
Parameters
----------
aoi : numeric
Angle of incidence. [degrees]
surface_tilt : numeric
Surface tilt angle measured from horizontal (e.g. surface facing
up = 0, surface facing horizon = 90). [degrees]
n : float, default 1.5
Refractive index of the PV cover. The default value of 1.5
was used for an IMT reference cell in [1]_. [unitless]
n_ref : float, default 1.4585
Refractive index of the pyranometer cover. The default value
was used for a fused silica dome over a CMP22 in [1]_.
Returns
-------
iam : dict
IAM values for each type of irradiance:
* 'direct': radiation from the solar disc
* 'sky': radiation from the sky dome (zenith <= 90)
* 'ground': radiation reflected from the ground (zenith >= 90)
Notes
-----
This implementation corrects a typo in the reference regarding the sign
of the last polynomial term in Equation 5.
References
----------
.. [1] Xie, Y., M. Sengupta, A. Habte, A. Andreas, "The 'Fresnel Equations'
for Diffuse radiation on Inclined photovoltaic Surfaces (FEDIS)",
Renewable and Sustainable Energy Reviews, vol. 161, 112362. June 2022.
:doi:`10.1016/j.rser.2022.112362`
"""
# avoid undefined results for horizontal or upside-down surfaces
zeroang = 1e-06
surface_tilt = np.where(surface_tilt == 0, zeroang, surface_tilt)
surface_tilt = np.where(surface_tilt >= 90, 90 - zeroang, surface_tilt)
# and for aoi:
aoi = np.where(aoi <= 0, zeroang, aoi)
# similar for AOI > 90
aoi = np.where(aoi >= 90, 90 - zeroang, aoi)
# angle between module normal and refracted ray:
theta_0tp = asind(sind(aoi) / n) # Eq 3c
# reflectance of direct radiation on PV cover:
sin_term = sind(aoi - theta_0tp)**2 / sind(aoi + theta_0tp)**2 / 2
tan_term = tand(aoi - theta_0tp)**2 / tand(aoi + theta_0tp)**2 / 2
rd = sin_term + tan_term # Eq 3b
# reflectance on pyranometer cover:
r0 = ((n_ref-1.0)/(n_ref+1.0))**2.0 # Eq 3e
# relative transmittance of direct radiation by PV cover:
cd = (1 - rd) / (1 - r0) # Eq 3a
# weighting function
term1 = n*(n_ref+1)**2 / (n_ref*(n+1)**2)
# note: the last coefficient here differs in sign from the reference
polycoeffs = [2.77526e-09, 3.74953, -5.18727, 3.41186, -1.08794, 0.136060]
term2 = np.polynomial.polynomial.polyval(n, polycoeffs)
w = term1 * term2 # Eq 5
# relative transmittance of sky diffuse radiation by PV cover:
cosB = cosd(surface_tilt)
sinB = sind(surface_tilt)
cuk = (2*w / (np.pi * (1 + cosB))) * (
(30/7)*np.pi - (160/21)*np.radians(surface_tilt) - (10/3)*np.pi*cosB
+ (160/21)*cosB*sinB - (5/3)*np.pi*cosB*sinB**2 + (20/7)*cosB*sinB**3
- (5/16)*np.pi*cosB*sinB**4 + (16/105)*cosB*sinB**5
) # Eq 4
# relative transmittance of ground-reflected radiation by PV cover:
cug = 40 * w / (21 * (1 - cosB)) - (1 + cosB) / (1 - cosB) * cuk # Eq 6
# handle tilt=0 case correctly:
cug = np.where(surface_tilt == zeroang, 0, cug)
out = {
'direct': cd,
'sky': cuk,
'ground': cug,
}
return out
|
def fedis(aoi, surface_tilt, n=1.5, n_ref=1.4585):
"""
Determine the incidence angle modifiers (iam) for direct, diffuse sky,
and ground-reflected radiation using the FEDIS transmittance model.
The "Fresnel Equations" for Diffuse radiation on Inclined photovoltaic
Surfaces (FEDIS) [1]_ is an analytical solution of diffuse transmission
based on the rigorous integration of an alternate form of the
Fresnel equations. The approach leads to a simple yet accurate
relative transmittance model that reconciles the solar energy
sensed by pyranometers and PV panels.
Parameters
----------
aoi : numeric
Angle of incidence. [degrees]
surface_tilt : numeric
Surface tilt angle measured from horizontal (e.g. surface facing
up = 0, surface facing horizon = 90). [degrees]
n : float, default 1.5
Refractive index of the PV cover. The default value of 1.5
was used for an IMT reference cell in [1]_. [unitless]
n_ref : float, default 1.4585
Refractive index of the pyranometer cover. The default value
was used for a fused silica dome over a CMP22 in [1]_.
Returns
-------
iam : dict
IAM values for each type of irradiance:
* 'direct': radiation from the solar disc
* 'sky': radiation from the sky dome (zenith <= 90)
* 'ground': radiation reflected from the ground (zenith >= 90)
Notes
-----
This implementation corrects a typo in [1]_ regarding the sign
of the last polynomial term in Equation 5.
References
----------
.. [1] Xie, Y., M. Sengupta, A. Habte, A. Andreas, "The 'Fresnel Equations'
for Diffuse radiation on Inclined photovoltaic Surfaces (FEDIS)",
Renewable and Sustainable Energy Reviews, vol. 161, 112362. June 2022.
:doi:`10.1016/j.rser.2022.112362`
"""
# avoid undefined results for horizontal or upside-down surfaces
zeroang = 1e-06
surface_tilt = np.where(surface_tilt == 0, zeroang, surface_tilt)
surface_tilt = np.where(surface_tilt >= 90, 90 - zeroang, surface_tilt)
# and for aoi:
aoi = np.where(aoi <= 0, zeroang, aoi)
# similar for AOI > 90
aoi = np.where(aoi >= 90, 90 - zeroang, aoi)
# angle between module normal and refracted ray:
theta_0tp = asind(sind(aoi) / n) # Eq 3c
# reflectance of direct radiation on PV cover:
sin_term = sind(aoi - theta_0tp)**2 / sind(aoi + theta_0tp)**2 / 2
tan_term = tand(aoi - theta_0tp)**2 / tand(aoi + theta_0tp)**2 / 2
rd = sin_term + tan_term # Eq 3b
# reflectance on pyranometer cover:
r0 = ((n_ref-1.0)/(n_ref+1.0))**2.0 # Eq 3e
# relative transmittance of direct radiation by PV cover:
cd = (1 - rd) / (1 - r0) # Eq 3a
# weighting function
term1 = n*(n_ref+1)**2 / (n_ref*(n+1)**2)
# note: the last coefficient here differs in sign from the reference
polycoeffs = [2.77526e-09, 3.74953, -5.18727, 3.41186, -1.08794, 0.136060]
term2 = np.polynomial.polynomial.polyval(n, polycoeffs)
w = term1 * term2 # Eq 5
# relative transmittance of sky diffuse radiation by PV cover:
cosB = cosd(surface_tilt)
sinB = sind(surface_tilt)
cuk = (2*w / (np.pi * (1 + cosB))) * (
(30/7)*np.pi - (160/21)*np.radians(surface_tilt) - (10/3)*np.pi*cosB
+ (160/21)*cosB*sinB - (5/3)*np.pi*cosB*sinB**2 + (20/7)*cosB*sinB**3
- (5/16)*np.pi*cosB*sinB**4 + (16/105)*cosB*sinB**5
) # Eq 4
# relative transmittance of ground-reflected radiation by PV cover:
cug = 40 * w / (21 * (1 - cosB)) - (1 + cosB) / (1 - cosB) * cuk # Eq 6
# handle tilt=0 case correctly:
cug = np.where(surface_tilt == zeroang, 0, cug)
out = {
'direct': cd,
'sky': cuk,
'ground': cug,
}
return out
|
5,366 |
def test_uptodate_with_pkgs_no_changes():
"""
Test pkg.uptodate with no changes
"""
pkgs = {
"pkga": {"old": "1.0.1", "new": "2.0.1"},
"pkgb": {"old": "1.0.2", "new": "2.0.2"},
"pkgc": {"old": "1.0.3", "new": "2.0.3"},
}
list_upgrades = MagicMock(return_value={})
upgrade = MagicMock(return_value={})
with patch.dict(
pkg.__salt__, {"pkg.list_upgrades": list_upgrades, "pkg.upgrade": upgrade}
):
# Run state with test=false
with patch.dict(pkg.__opts__, {"test": False}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"]
assert ret["changes"] == {}
# Run state with test=true
with patch.dict(pkg.__opts__, {"test": True}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"]
assert ret["changes"] == {}
|
def test_uptodate_with_pkgs_no_changes(pkgs):
"""
Test pkg.uptodate with no changes
"""
pkgs = {
"pkga": {"old": "1.0.1", "new": "2.0.1"},
"pkgb": {"old": "1.0.2", "new": "2.0.2"},
"pkgc": {"old": "1.0.3", "new": "2.0.3"},
}
list_upgrades = MagicMock(return_value={})
upgrade = MagicMock(return_value={})
with patch.dict(
pkg.__salt__, {"pkg.list_upgrades": list_upgrades, "pkg.upgrade": upgrade}
):
# Run state with test=false
with patch.dict(pkg.__opts__, {"test": False}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"]
assert ret["changes"] == {}
# Run state with test=true
with patch.dict(pkg.__opts__, {"test": True}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"]
assert ret["changes"] == {}
|
2,259 |
def _num_features(X):
"""Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
type_ = type(X)
if type_.__module__ == "builtins":
type_name = type_.__qualname__
else:
type_name = f"{type_.__module__}.{type_.__qualname__}"
message = (
"Unable to find the number of features from X of type "
f"{type_name}"
)
if not hasattr(X, '__len__') and not hasattr(X, 'shape'):
if not hasattr(X, '__array__'):
raise TypeError(message)
# Only convert X to a numpy array if there is no cheaper, heuristic
# option.
X = np.asarray(X)
if hasattr(X, 'shape'):
if not hasattr(X.shape, '__len__') or len(X.shape) <= 1:
message += f" with shape {X.shape}"
raise TypeError(message)
return X.shape[1]
first_sample = X[0]
# Do not consider an array-like of strings of dicts to be a 2D array
if isinstance(first_sample, (str, bytes, dict)):
message += (f" where the samples are of type "
f"{type(first_sample).__qualname__}")
raise TypeError(message)
try:
# If X is a list of lists, for instance, we assume that all nested
# lists have the same length without checking or converting to
# a numpy array to keep this function call as cheap as possible.
return len(first_sample)
except Exception as err:
raise TypeError(message) from err
|
def _num_features(X):
"""Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
type_ = type(X)
if type_.__module__ == "builtins":
type_name = type_.__qualname__
else:
type_name = f"{type_.__module__}.{type_.__qualname__}"
message = (
"Unable to find the number of features from X of type "
f"{type_name}"
)
if not hasattr(X, '__len__') and not hasattr(X, 'shape'):
if not hasattr(X, '__array__'):
raise TypeError(message)
# Only convert X to a numpy array if there is no cheaper, heuristic
# option.
X = np.asarray(X)
if hasattr(X, 'shape'):
if not hasattr(X.shape, '__len__') or len(X.shape) <= 1:
message += f" with shape {X.shape}"
raise TypeError(message)
return X.shape[1]
first_sample = X[0]
# Do not consider an array-like of strings or dicts to be a 2D array
if isinstance(first_sample, (str, bytes, dict)):
message += (f" where the samples are of type "
f"{type(first_sample).__qualname__}")
raise TypeError(message)
try:
# If X is a list of lists, for instance, we assume that all nested
# lists have the same length without checking or converting to
# a numpy array to keep this function call as cheap as possible.
return len(first_sample)
except Exception as err:
raise TypeError(message) from err
|
31,494 |
def change_dict_keys(new_names_dict: Dict[str, str], output_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
Takes a dictionary and changes the names of the keys
Args:
new_names_dict: a dictionary with the old names as keys and their new names as their values
output_dict: Dictionary with string keys
Returns:
Same dictionary but with keys with the new names or the same dictionary if the old keys don't exist
Raises:
TypeError: output_dict is not a dictionary
"""
for key in new_names_dict:
new_name = new_names_dict[key]
if key in output_dict:
output_dict[new_name] = output_dict[key]
del output_dict[key]
return output_dict
|
def change_dict_keys(new_names_dict: Dict[str, str], output_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
Takes a dictionary and changes the names of the keys
Args:
new_names_dict: a dictionary with the old names as keys and their new names as their values
output_dict: Dictionary with string keys
Returns:
Same dictionary but with keys with the new names or the same dictionary if the old keys don't exist
Raises:
TypeError: output_dict is not a dictionary
"""
for key in new_names_dict:
new_name = new_names_dict[key]
if key in output_dict:
output_dict[new_name] = output_dict.pop(key)
return output_dict
|
5,731 |
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used
Pearson (value 1)
Pearson's chi-squared statistic. In this case, the function is
equivalent to `stats.chisquare`.
Log-Likelihood (value 0)
Log-likelihood ratio. Also known as the G-test [3]_.
Freeman-Turkey (value -1/2)
Freeman-Tukey statistic.
Mod-Log-Likelihood (value -1)
Modified log-likelihood ratio.
Neyman (value -2)
Neyman's statistic.
Cressie-Read (value 2/3)
The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `power_divergence` raises an error if the sums
do not agree within a relative tolerance of ``1e-8``.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. "
"Valid strings are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
f_obs_float = f_obs.astype(np.float64)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
bshape = _broadcast_shapes(f_obs_float.shape, f_exp.shape)
f_obs_float = _m_broadcast_to(f_obs_float, bshape)
f_exp = _m_broadcast_to(f_exp, bshape)
rtol = 1e-8 # to pass existing tests
with np.errstate(invalid='ignore'):
f_obs_sum = f_obs_float.sum(axis=axis)
f_exp_sum = f_exp.sum(axis=axis)
relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
np.minimum(f_obs_sum, f_exp_sum))
diff_gt_tol = (relative_diff > rtol).any()
if diff_gt_tol:
msg = (f"For each axis slice, the sum of the observed "
f"frequencies must agree with the sum of the "
f"expected frequencies to a relative tolerance "
f"of {rtol}, but the percent differences are:\n"
f"{relative_diff}")
raise ValueError(msg)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs_float - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
|
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used
Pearson (value 1)
Pearson's chi-squared statistic. In this case, the function is
equivalent to `chisquare`.
Log-Likelihood (value 0)
Log-likelihood ratio. Also known as the G-test [3]_.
Freeman-Turkey (value -1/2)
Freeman-Tukey statistic.
Mod-Log-Likelihood (value -1)
Modified log-likelihood ratio.
Neyman (value -2)
Neyman's statistic.
Cressie-Read (value 2/3)
The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `power_divergence` raises an error if the sums
do not agree within a relative tolerance of ``1e-8``.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. "
"Valid strings are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
f_obs_float = f_obs.astype(np.float64)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
bshape = _broadcast_shapes(f_obs_float.shape, f_exp.shape)
f_obs_float = _m_broadcast_to(f_obs_float, bshape)
f_exp = _m_broadcast_to(f_exp, bshape)
rtol = 1e-8 # to pass existing tests
with np.errstate(invalid='ignore'):
f_obs_sum = f_obs_float.sum(axis=axis)
f_exp_sum = f_exp.sum(axis=axis)
relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
np.minimum(f_obs_sum, f_exp_sum))
diff_gt_tol = (relative_diff > rtol).any()
if diff_gt_tol:
msg = (f"For each axis slice, the sum of the observed "
f"frequencies must agree with the sum of the "
f"expected frequencies to a relative tolerance "
f"of {rtol}, but the percent differences are:\n"
f"{relative_diff}")
raise ValueError(msg)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs_float - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
|
21,460 |
def set_cors_headers(request: Request):
"""Set the CORs headers so that javascript running in a web browsers can
use this API
Args:
request: The http request to add CORs to.
"""
request.setHeader(b"Access-Control-Allow-Origin", b"*")
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, POST, PUT, DELETE, OPTIONS"
)
request.setHeader(
b"Access-Control-Allow-Headers",
b"Origin, X-Requested-With, Content-Type, Accept, Authorization",
)
|
def set_cors_headers(request: Request):
"""Set the CORS headers so that javascript running in a web browsers can
use this API
Args:
request: The http request to add CORs to.
"""
request.setHeader(b"Access-Control-Allow-Origin", b"*")
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, POST, PUT, DELETE, OPTIONS"
)
request.setHeader(
b"Access-Control-Allow-Headers",
b"Origin, X-Requested-With, Content-Type, Accept, Authorization",
)
|
50,671 |
def offline_detection(
source_path,
all_timestamps,
frame_index_range,
calculated_frame_indices,
shared_memory,
):
batch_size = 30
frame_start, frame_end = frame_index_range
frame_indices = sorted(
set(range(frame_start, frame_end + 1)) - set(calculated_frame_indices)
)
if not frame_indices:
return
frame_count = frame_end - frame_start + 1
shared_memory.progress = (frame_indices[0] - frame_start + 1) / frame_count
yield None
src = video_capture.File_Source(
SimpleNamespace(), source_path, fill_gaps=False, timing=None
)
timestamps_no_gaps = src.timestamps
uncalculated_timestamps = all_timestamps[frame_indices]
seek_poses = np.searchsorted(timestamps_no_gaps, uncalculated_timestamps)
queue = []
for frame_index, timestamp, target_frame_idx in zip(
frame_indices, uncalculated_timestamps, seek_poses
):
detections = []
if timestamp in timestamps_no_gaps:
src.seek_to_frame(target_frame_idx)
frame = src.get_frame()
detections = _detect(frame)
if detections:
serialized_dicts = [fm.Serialized_Dict(d) for d in detections]
else:
serialized_dicts = [fm.Serialized_Dict({})]
queue.append((timestamp, serialized_dicts, frame_index, len(detections)))
if len(queue) >= batch_size:
shared_memory.progress = (frame_index - frame_start + 1) / frame_count
data = queue[:batch_size]
del queue[:batch_size]
yield data
yield queue
|
def offline_detection(
source_path,
all_timestamps,
frame_index_range,
calculated_frame_indices,
shared_memory,
):
batch_size = 30
frame_start, frame_end = frame_index_range
frame_indices = sorted(
set(range(frame_start, frame_end + 1)) - set(calculated_frame_indices)
)
if not frame_indices:
return
frame_count = frame_end - frame_start + 1
shared_memory.progress = (frame_indices[0] - frame_start + 1) / frame_count
yield None
src = video_capture.File_Source(
SimpleNamespace(), source_path, fill_gaps=False, timing=None
)
timestamps_no_gaps = src.timestamps
uncalculated_timestamps = all_timestamps[frame_indices]
seek_poses = np.searchsorted(timestamps_no_gaps, uncalculated_timestamps)
queue = []
for frame_index, timestamp, target_frame_idx in zip(
frame_indices, uncalculated_timestamps, seek_poses
):
detections = []
if timestamp in timestamps_no_gaps:
if target_frame_idx != src.target_frame_idx:
src.seek_to_frame(target_frame_idx) # only seek if necessary
frame = src.get_frame()
detections = _detect(frame)
if detections:
serialized_dicts = [fm.Serialized_Dict(d) for d in detections]
else:
serialized_dicts = [fm.Serialized_Dict({})]
queue.append((timestamp, serialized_dicts, frame_index, len(detections)))
if len(queue) >= batch_size:
shared_memory.progress = (frame_index - frame_start + 1) / frame_count
data = queue[:batch_size]
del queue[:batch_size]
yield data
yield queue
|
16,599 |
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
# pylint: disable=import-outside-toplevel
from . import config as config_util
parser = argparse.ArgumentParser(
description="Home Assistant: Observe, Control, Automate.",
epilog="If restart is requested, exits with code {RESTART_EXIT_CODE}",
)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"--safe-mode", action="store_true", help="Start Home Assistant in safe mode"
)
parser.add_argument(
"--debug", action="store_true", help="Start Home Assistant in debug mode"
)
parser.add_argument(
"--open-ui", action="store_true", help="Open the webinterface in a browser"
)
parser.add_argument(
"--skip-pip",
action="store_true",
help="Skips pip install of required packages on startup",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging to file."
)
parser.add_argument(
"--pid-file",
metavar="path_to_pid_file",
default=None,
help="Path to PID file useful for running as daemon",
)
parser.add_argument(
"--log-rotate-days",
type=int,
default=None,
help="Enables daily log rotation and keeps up to the specified days",
)
parser.add_argument(
"--log-file",
type=str,
default=None,
help="Log file to write to. If not set, CONFIG/home-assistant.log is used",
)
parser.add_argument(
"--log-no-color", action="store_true", help="Disable color logs"
)
parser.add_argument(
"--script", nargs=argparse.REMAINDER, help="Run one of the embedded scripts"
)
if os.name == "posix":
parser.add_argument(
"--daemon", action="store_true", help="Run Home Assistant as daemon"
)
arguments = parser.parse_args()
if os.name != "posix" or arguments.debug or arguments.runner:
setattr(arguments, "daemon", False)
return arguments
|
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
# pylint: disable=import-outside-toplevel
from . import config as config_util
parser = argparse.ArgumentParser(
description="Home Assistant: Observe, Control, Automate.",
epilog=f"If restart is requested, exits with code {RESTART_EXIT_CODE}",
)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"--safe-mode", action="store_true", help="Start Home Assistant in safe mode"
)
parser.add_argument(
"--debug", action="store_true", help="Start Home Assistant in debug mode"
)
parser.add_argument(
"--open-ui", action="store_true", help="Open the webinterface in a browser"
)
parser.add_argument(
"--skip-pip",
action="store_true",
help="Skips pip install of required packages on startup",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging to file."
)
parser.add_argument(
"--pid-file",
metavar="path_to_pid_file",
default=None,
help="Path to PID file useful for running as daemon",
)
parser.add_argument(
"--log-rotate-days",
type=int,
default=None,
help="Enables daily log rotation and keeps up to the specified days",
)
parser.add_argument(
"--log-file",
type=str,
default=None,
help="Log file to write to. If not set, CONFIG/home-assistant.log is used",
)
parser.add_argument(
"--log-no-color", action="store_true", help="Disable color logs"
)
parser.add_argument(
"--script", nargs=argparse.REMAINDER, help="Run one of the embedded scripts"
)
if os.name == "posix":
parser.add_argument(
"--daemon", action="store_true", help="Run Home Assistant as daemon"
)
arguments = parser.parse_args()
if os.name != "posix" or arguments.debug or arguments.runner:
setattr(arguments, "daemon", False)
return arguments
|
14,487 |
def report_outcome(result: "LintResult", options, mark_as_success=False) -> int:
"""Display information about how to skip found rules.
Returns exit code, 2 if errors were found, 0 when only warnings were found.
"""
failures = 0
warnings = 0
msg = """\
# .ansible-lint
warn_list: # or 'skip_list' to silence them completely
"""
matches_unignored = [match for match in result.matches if not match.ignored]
# counting
matched_rules = {match.rule.id: match.rule for match in matches_unignored}
for match in result.matches:
if {match.rule.id, *match.rule.tags}.isdisjoint(options.warn_list):
failures += 1
else:
warnings += 1
entries = []
for key in sorted(matched_rules.keys()):
if {key, *matched_rules[key].tags}.isdisjoint(options.warn_list):
entries.append(f" - {key} # {matched_rules[key].shortdesc}\n")
for match in result.matches:
if "experimental" in match.rule.tags:
entries.append(" - experimental # all rules tagged as experimental\n")
break
msg += "".join(sorted(entries))
# Do not deprecate the odl tags just yet. Why? Because it is not currently feasible
# to migrate old tags to new tags. There are a lot of things out there that still
# use ansible-lint 4 (for example, Ansible Galaxy and Automation Hub imports). If we
# replace the odl tags, those tools will report warnings. If we do not replace them,
# ansible-lint 5 will report warnings.
#
# We can do the deprecation once the ecosystem caught up at least a bit.
# for k, v in used_old_tags.items():
# _logger.warning(
# "Replaced deprecated tag '%s' with '%s' but it will become an "
# "error in the future.",
# k,
# v,
# )
if result.matches and not options.quiet:
console_stderr.print(
"You can skip specific rules or tags by adding them to your "
"configuration file:"
)
console_stderr.print(render_yaml(msg))
console_stderr.print(
f"Finished with {failures} failure(s), {warnings} warning(s) "
f"on {len(result.files)} files."
)
if mark_as_success or not failures:
return 0
return 2
|
def report_outcome(result: "LintResult", options, mark_as_success=False) -> int:
"""Display information about how to skip found rules.
Returns exit code, 2 if errors were found, 0 when only warnings were found.
"""
failures = 0
warnings = 0
msg = """\
# .ansible-lint
warn_list: # or 'skip_list' to silence them completely
"""
matches_unignored = [match for match in result.matches if not match.ignored]
# counting
matched_rules = {match.rule.id: match.rule for match in matches_unignored}
for match in result.matches:
if {match.rule.id, *match.rule.tags}.isdisjoint(options.warn_list):
failures += 1
else:
warnings += 1
entries = []
for key in sorted(matched_rules.keys()):
if {key, *matched_rules[key].tags}.isdisjoint(options.warn_list):
entries.append(f" - {key} # {matched_rules[key].shortdesc}\n")
for match in result.matches:
if "experimental" in match.rule.tags:
entries.append(" - experimental # all rules tagged as experimental\n")
break
msg += "".join(sorted(entries))
# Do not deprecate the old tags just yet. Why? Because it is not currently feasible
# to migrate old tags to new tags. There are a lot of things out there that still
# use ansible-lint 4 (for example, Ansible Galaxy and Automation Hub imports). If we
# replace the odl tags, those tools will report warnings. If we do not replace them,
# ansible-lint 5 will report warnings.
#
# We can do the deprecation once the ecosystem caught up at least a bit.
# for k, v in used_old_tags.items():
# _logger.warning(
# "Replaced deprecated tag '%s' with '%s' but it will become an "
# "error in the future.",
# k,
# v,
# )
if result.matches and not options.quiet:
console_stderr.print(
"You can skip specific rules or tags by adding them to your "
"configuration file:"
)
console_stderr.print(render_yaml(msg))
console_stderr.print(
f"Finished with {failures} failure(s), {warnings} warning(s) "
f"on {len(result.files)} files."
)
if mark_as_success or not failures:
return 0
return 2
|
42,350 |
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get a role argument specification.
.. note:: Version added: 2.2
:param str role: Simple role name, or fully qualified collection role name, to query.
:param str collection: If specified, will be combined with the role name to form a fully qualified collection role name.
If this is supplied, the ``role`` param should not be fully qualified.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get a role argument specification.
.. note:: Version added: 2.2
:param str role: Simple role name, or fully qualified collection role name, to query.
:param str collection: If specified, will be combined with the role name to form a fully qualified collection role name.
If this is supplied, the ``role`` param should not be fully qualified.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
30,995 |
def fetch_incidents(url):
feed = feedparser.parse(url)
incidents = []
# demisto.getLastRun() will returns an obj with the previous run in it.
last_run = demisto.getLastRun()
# Get the last fetch time, if exists
last_fetch = last_run.get('last_fetch')
if last_fetch == None:
last_fetch = datetime(1970, 1, 1)
else:
last_fetch = datetime.strptime(last_fetch, '%Y-%m-%dT%H:%M:%S.%f')
for entry in feed.entries:
date_parsed = email.utils.parsedate(entry.published)
dt = datetime.fromtimestamp(mktime(date_parsed))
incident = {
'name': entry.title,
'occured': dt.isoformat(),
'rawJSON': json.dumps(entry)
}
incidents.append(incident)
dtnow = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
demisto.setLastRun({'last_fetch': dtnow})
return incidents
|
def fetch_incidents(url):
feed = feedparser.parse(url)
incidents = []
# demisto.getLastRun() will returns an obj with the previous run in it.
last_run = demisto.getLastRun()
# Get the last fetch time, if exists
last_fetch = last_run.get('last_fetch')
if last_fetch == None:
last_fetch = datetime(1970, 1, 1)
else:
last_fetch = datetime.strptime(last_fetch, '%Y-%m-%dT%H:%M:%S.%f')
for entry in feed.entries:
date_parsed = email.utils.parsedate(entry.published)
dt = datetime.fromtimestamp(mktime(date_parsed))
incident = {
'name': entry.title,
'occured': dt.isoformat(),
'rawJSON': json.dumps(entry)
}
incidents.append(incident)
dtnow = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
demisto.setLastRun({'last_fetch': dt.strftime('%Y-%m-%dT%H:%M:%S.%f')})
return incidents
|
19,955 |
def poolRunner(target, queue, coverage_number=None, omit_patterns=[], cov_config_file=True): # pragma: no cover
"""
I am the function that pool worker processes run. I run one unit test.
coverage_config_file is a special option that is either a string specifying
the custom coverage config file or the special default value True (which
causes coverage to search for it's standard config files).
"""
# Each pool worker gets his own temp directory, to avoid having tests that
# are used to taking turns using the same temp file name from interfering
# with eachother. So long as the test doesn't use a hard-coded temp
# directory, anyway.
saved_tempdir = tempfile.tempdir
tempfile.tempdir = tempfile.mkdtemp()
def raise_internal_failure(msg):
err = sys.exc_info()
t = ProtoTest()
t.module = 'green.loader'
t.class_name = 'N/A'
t.description = msg
t.method_name = 'poolRunner'
result.startTest(t)
result.addError(t, err)
result.stopTest(t)
queue.put(result)
cleanup()
def cleanup():
# Restore the state of the temp directory
# TODO: Make this not necessary on macOS+Python3 (see #173)
if sys.version_info[0] == 2:
shutil.rmtree(tempfile.tempdir, ignore_errors=True)
tempfile.tempdir = saved_tempdir
queue.put(None)
# Finish coverage
if coverage_number:
cov.stop()
cov.save()
# Each pool starts its own coverage, later combined by the main process.
if coverage_number:
cov = coverage.coverage(
data_file='.coverage.{}_{}'.format(
coverage_number, random.randint(0, 10000)),
omit=omit_patterns,
config_file=cov_config_file)
cov._warn_no_data = False
cov.start()
# What to do each time an individual test is started
already_sent = set()
def start_callback(test):
# Let the main process know what test we are starting
test = proto_test(test)
if test not in already_sent:
queue.put(test)
already_sent.add(test)
def finalize_callback(test_result):
# Let the main process know what happened with the test run
queue.put(test_result)
result = ProtoTestResult(start_callback, finalize_callback)
test = None
try:
loader = GreenTestLoader()
test = loader.loadTargets(target)
except:
raise_internal_failure('Green encountered an error loading the unit test.')
return
if getattr(test, 'run', False):
# Loading was successful, lets do this
try:
test.run(result)
# If your class setUpClass(self) method crashes, the test doesn't
# raise an exception, but it does add an entry to errors. Some
# other things add entries to errors as well, but they all call the
# finalize callback.
if result and (not result.finalize_callback_called) and getattr(result, 'errors', False):
queue.put(test)
queue.put(result)
except:
# Some frameworks like testtools record the error AND THEN let it
# through to crash things. So we only need to manufacture another
# error if the underlying framework didn't, but either way we don't
# want to crash.
if result.errors:
queue.put(result)
else:
try:
err = sys.exc_info()
result.startTest(test)
result.addError(test, err)
result.stopTest(test)
queue.put(result)
except:
raise_internal_failure('Green encoundered an error when running the test.')
return
else:
# loadTargets() returned an object without a run() method, probably
# None
description = ('Test loader returned an un-runnable object. Is "{}" '
'importable from your current location? Maybe you '
'forgot an __init__.py in your directory? Unrunnable '
'object looks like: {} of type {} with dir {}'
.format(target, str(test), type(test), dir(test))
)
err = (TypeError, TypeError(description), None)
t = ProtoTest()
target_list = target.split('.')
t.module = '.'.join(target_list[:-2]) if len(target_list) > 1 else target
t.class_name = target.split('.')[-2] if len(target_list) > 1 else 'UnknownClass'
t.description = description
t.method_name = target.split('.')[-1] if len(target_list) > 1 else 'unknown_method'
result.startTest(t)
result.addError(t, err)
result.stopTest(t)
queue.put(result)
cleanup()
|
def poolRunner(target, queue, coverage_number=None, omit_patterns=[], cov_config_file=True): # pragma: no cover
"""
I am the function that pool worker processes run. I run one unit test.
coverage_config_file is a special option that is either a string specifying
the custom coverage config file or the special default value True (which
causes coverage to search for it's standard config files).
"""
# Each pool worker gets his own temp directory, to avoid having tests that
# are used to taking turns using the same temp file name from interfering
# with eachother. So long as the test doesn't use a hard-coded temp
# directory, anyway.
saved_tempdir = tempfile.tempdir
tempfile.tempdir = tempfile.mkdtemp()
def raise_internal_failure(msg):
err = sys.exc_info()
t = ProtoTest()
t.module = 'green.loader'
t.class_name = 'N/A'
t.description = msg
t.method_name = 'poolRunner'
result.startTest(t)
result.addError(t, err)
result.stopTest(t)
queue.put(result)
cleanup()
def cleanup():
# Restore the state of the temp directory
# TODO: Make this not necessary on macOS+Python3 (see #173)
if sys.version_info[0] == 2:
shutil.rmtree(tempfile.tempdir, ignore_errors=True)
tempfile.tempdir = saved_tempdir
queue.put(None)
# Finish coverage
if coverage_number:
cov.stop()
cov.save()
# Each pool starts its own coverage, later combined by the main process.
if coverage_number:
cov = coverage.coverage(
data_file='.coverage.{}_{}'.format(
coverage_number, random.randint(0, 10000)),
omit=omit_patterns,
config_file=cov_config_file)
cov._warn_no_data = False
cov.start()
# What to do each time an individual test is started
already_sent = set()
def start_callback(test):
# Let the main process know what test we are starting
test = proto_test(test)
if test not in already_sent:
queue.put(test)
already_sent.add(test)
def finalize_callback(test_result):
# Let the main process know what happened with the test run
queue.put(test_result)
result = ProtoTestResult(start_callback, finalize_callback)
test = None
try:
loader = GreenTestLoader()
test = loader.loadTargets(target)
except:
raise_internal_failure('Green encountered an error loading the unit test.')
return
if getattr(test, 'run', False):
# Loading was successful, lets do this
try:
test.run(result)
# If your class setUpClass(self) method crashes, the test doesn't
# raise an exception, but it does add an entry to errors. Some
# other things add entries to errors as well, but they all call the
# finalize callback.
if result and (not result.finalize_callback_called) and getattr(result, 'errors', False):
queue.put(test)
queue.put(result)
except:
# Some frameworks like testtools record the error AND THEN let it
# through to crash things. So we only need to manufacture another
# error if the underlying framework didn't, but either way we don't
# want to crash.
if result.errors:
queue.put(result)
else:
try:
err = sys.exc_info()
result.startTest(test)
result.addError(test, err)
result.stopTest(test)
queue.put(result)
except:
raise_internal_failure('Green encountered an internal error when running the test. This error is known to occur with doctests, which Green does not yet support.')
return
else:
# loadTargets() returned an object without a run() method, probably
# None
description = ('Test loader returned an un-runnable object. Is "{}" '
'importable from your current location? Maybe you '
'forgot an __init__.py in your directory? Unrunnable '
'object looks like: {} of type {} with dir {}'
.format(target, str(test), type(test), dir(test))
)
err = (TypeError, TypeError(description), None)
t = ProtoTest()
target_list = target.split('.')
t.module = '.'.join(target_list[:-2]) if len(target_list) > 1 else target
t.class_name = target.split('.')[-2] if len(target_list) > 1 else 'UnknownClass'
t.description = description
t.method_name = target.split('.')[-1] if len(target_list) > 1 else 'unknown_method'
result.startTest(t)
result.addError(t, err)
result.stopTest(t)
queue.put(result)
cleanup()
|
30,242 |
def get_urlscan_http_transaction_list():
uuid = urlscan_submit_url()
ready = polling(uuid)
if ready is True:
format_http_transaction_list(uuid)
|
def get_urlscan_http_transaction_list():
uuid = urlscan_submit_url()
ready = polling(uuid)
if ready:
format_http_transaction_list(uuid)
|
30,671 |
def convert_to_json(response):
raw_json_response = json.loads(xml2json(response))
workers_data = raw_json_response.get('Envelope').get('Body').get('Get_Workers_Response').get('Response_Data'). \
get('Worker')
return raw_json_response, workers_data
|
def convert_to_json(response):
raw_json_response = json.loads(xml2json(response))
workers_data = raw_json_response.get('Envelope', {}).get('Body', {}).get('Get_Workers_Response', {}).get('Response_Data', {}). \
get('Worker')
return raw_json_response, workers_data
|
57,095 |
def validate_topic_and_sub_topic_change(obj):
"""Validates Topic or Sub topic change.
Args:
obj: dict. Data that needs to be validated.
"""
allowed_commands = [
command['name'] for command in topic_domain.ALLOWED_COMMANDS
]
if obj['cmd'] not in allowed_commands:
raise base.BaseHandler.InvalidInputException(
'%s cmd is not allowed.' % obj['cmd']
)
|
def validate_topic_and_sub_topic_change(obj):
"""Validates Topic or Sub topic change.
Args:
obj: dict. Data that needs to be validated.
"""
allowed_commands = [
command['name'] for command in topic_domain.ALLOWED_COMMANDS
]
if obj['cmd'] not in allowed_commands:
raise base.BaseHandler.InvalidInputException(
'%s cmd is not allowed.' % obj['cmd']
)
|
37,950 |
def load_ocean_ridge_points():
"""
Load a table of ocean ridge points for the entire world as a
pandas.DataFrame.
This is the ``@ridge.txt`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Columns are longitude and latitude.
"""
fname = which("@ridge.txt", download="c")
data = pd.read_csv(
fname, sep=r"\s+", names=["longitude", "latitude"], skiprows=1, comment=">"
)
return data
|
def load_ocean_ridge_points():
"""
Load a table of ocean ridge points for the entire world as a
pandas.DataFrame.
This is the ``@ridge.txt`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Columns are longitude and latitude.
"""
fname = which("@ridge.txt", download="c")
data = pd.read_csv(
fname, sep=r"\s+", names=["longitude", "latitude"], skiprows=1, comment=">"
)
return data
|
10,777 |
def device_array_like(ary, stream=0):
"""Call cuda.devicearray() with information from the array.
"""
# Avoid attempting to recompute strides if the default strides will be
# sufficient to create a contiguous array.
if ary.is_c_contiguous() or ary.ndim <= 1:
return device_array(shape=ary.shape, dtype=ary.dtype, stream=stream)
# Otherwise, we need to compute new strides using an algorithm adapted from
# Numpy's PyArray_NewLikeArrayWithShape in core/src/multiarray/ctors.c. We
# permute the strides in ascending order then compute the stride for the
# dimensions with the same permutation.
# Stride permuation. E.g. a stride array (4, -2, 12) becomes
# [(1, -2), (0, 4), (2, 12)]
strideperm = [ x for x in enumerate(ary.strides) ]
strideperm.sort(key = lambda x: x[1])
# Compute new strides using permutation
strides = [0] * len(ary.strides)
stride = ary.dtype.itemsize
for i_perm, _ in strideperm:
strides[i_perm] = stride
stride *= ary.shape[i_perm]
strides = tuple(strides)
return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
stream=stream)
|
def device_array_like(ary, stream=0):
"""Call cuda.devicearray() with information from the array.
"""
# Avoid attempting to recompute strides if the default strides will be
# sufficient to create a contiguous array.
if ary.is_c_contiguous() or ary.ndim <= 1:
return device_array(shape=ary.shape, dtype=ary.dtype, stream=stream)
# Otherwise, we need to compute new strides using an algorithm adapted from
# NumPy's PyArray_NewLikeArrayWithShape in core/src/multiarray/ctors.c. We
# permute the strides in ascending order then compute the stride for the
# dimensions with the same permutation.
# Stride permuation. E.g. a stride array (4, -2, 12) becomes
# [(1, -2), (0, 4), (2, 12)]
strideperm = [ x for x in enumerate(ary.strides) ]
strideperm.sort(key = lambda x: x[1])
# Compute new strides using permutation
strides = [0] * len(ary.strides)
stride = ary.dtype.itemsize
for i_perm, _ in strideperm:
strides[i_perm] = stride
stride *= ary.shape[i_perm]
strides = tuple(strides)
return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
stream=stream)
|
31,270 |
def main():
try:
indicator = demisto.args()['indicator']
resp = demisto.executeCommand("getIndicator", {'value': indicator})
if isError(resp) or not resp:
demisto.results(resp)
sys.exit(0)
data = resp[0].get("Contents")
if not data:
demisto.results("No results.")
sys.exit(0)
for entry in data:
for results, outputs in iterate_indicator_entry(indicator, entry):
return_results(results)
appendContext(DbotScoreKey, outputs)
except Exception as error:
return_error(str(error), error)
|
def main():
try:
indicator = demisto.args()['indicator']
resp = demisto.executeCommand("getIndicator", {'value': indicator})
if isError(resp) or not resp:
demisto.results(resp)
return
data = resp[0].get("Contents")
if not data:
demisto.results("No results.")
sys.exit(0)
for entry in data:
for results, outputs in iterate_indicator_entry(indicator, entry):
return_results(results)
appendContext(DbotScoreKey, outputs)
except Exception as error:
return_error(str(error), error)
|
46,053 |
def flow2rgb(flow: np.ndarray,
color_wheel: Optional[np.ndarray] = None,
unknown_thr: float = 1e6) -> np.ndarray:
"""Convert flow map to RGB image.
Args:
flow (ndarray): Array of optical flow.
color_wheel (ndarray or None): Color wheel used to map flow field to
RGB colorspace. Default color wheel will be used if not specified.
unknown_thr (float): Values above this threshold will be marked asx
unknown and thus ignored.
Returns:
ndarray: RGB image that can be visualized.
"""
assert flow.ndim == 3 and flow.shape[-1] == 2
if color_wheel is None:
color_wheel = make_color_wheel()
assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3
num_bins = color_wheel.shape[0]
dx = flow[:, :, 0].copy()
dy = flow[:, :, 1].copy()
ignore_inds = (
np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) |
(np.abs(dy) > unknown_thr))
dx[ignore_inds] = 0
dy[ignore_inds] = 0
rad = np.sqrt(dx**2 + dy**2)
if np.any(rad > np.finfo(float).eps):
max_rad = np.max(rad)
dx /= max_rad
dy /= max_rad
rad = np.sqrt(dx**2 + dy**2)
angle = np.arctan2(-dy, -dx) / np.pi
bin_real = (angle + 1) / 2 * (num_bins - 1)
bin_left = np.floor(bin_real).astype(int)
bin_right = (bin_left + 1) % num_bins
w = (bin_real - bin_left.astype(np.float32))[..., None]
flow_img = (1 -
w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :]
small_ind = rad <= 1
flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind])
flow_img[np.logical_not(small_ind)] *= 0.75
flow_img[ignore_inds, :] = 0
return flow_img
|
def flow2rgb(flow: np.ndarray,
color_wheel: Optional[np.ndarray] = None,
unknown_thr: float = 1e6) -> np.ndarray:
"""Convert flow map to RGB image.
Args:
flow (ndarray): Array of optical flow.
color_wheel (ndarray or None): Color wheel used to map flow field to
RGB colorspace. Default color wheel will be used if not specified.
unknown_thr (float): Values above this threshold will be marked as
unknown and thus ignored.
Returns:
ndarray: RGB image that can be visualized.
"""
assert flow.ndim == 3 and flow.shape[-1] == 2
if color_wheel is None:
color_wheel = make_color_wheel()
assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3
num_bins = color_wheel.shape[0]
dx = flow[:, :, 0].copy()
dy = flow[:, :, 1].copy()
ignore_inds = (
np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) |
(np.abs(dy) > unknown_thr))
dx[ignore_inds] = 0
dy[ignore_inds] = 0
rad = np.sqrt(dx**2 + dy**2)
if np.any(rad > np.finfo(float).eps):
max_rad = np.max(rad)
dx /= max_rad
dy /= max_rad
rad = np.sqrt(dx**2 + dy**2)
angle = np.arctan2(-dy, -dx) / np.pi
bin_real = (angle + 1) / 2 * (num_bins - 1)
bin_left = np.floor(bin_real).astype(int)
bin_right = (bin_left + 1) % num_bins
w = (bin_real - bin_left.astype(np.float32))[..., None]
flow_img = (1 -
w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :]
small_ind = rad <= 1
flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind])
flow_img[np.logical_not(small_ind)] *= 0.75
flow_img[ignore_inds, :] = 0
return flow_img
|
32,064 |
def wildfire_get_report_command(args):
"""
Args:
args: the command arguments from demisto.args(), including url or file hash (sha256 or md5) to query on
Returns:
A single or list of CommandResults, and the status of the reports of the url or file of interest.
Note that the status is only used for the polling sequence, where the command will always receive a single
file or url. Hence, when running this command via the polling sequence, the CommandResults list will contain a
single item, and the status will represent that result's status.
"""
command_results_list = []
urls = argToList(args.get('url', ''))
if 'sha256' in args:
sha256 = args.get('sha256')
elif 'hash' in args:
sha256 = args.get('hash')
else:
sha256 = None
md5 = args.get('md5')
inputs = urls if urls else hash_args_handler(sha256, md5)
status = 'NotFound'
for element in inputs:
command_results, status = wildfire_get_url_report(element) if urls else wildfire_get_file_report(element, args)
command_results_list.append(command_results)
return command_results_list, status
|
def wildfire_get_report_command(args):
"""
Args:
args: the command arguments from demisto.args(), including url or file hash (sha256 or md5) to query on
Returns:
A single or list of CommandResults, and the status of the reports of the url or file of interest.
Note that the status is only used for the polling sequence, where the command will always receive a single
file or url. Hence, when running this command via the polling sequence, the CommandResults list will contain a
single item, and the status will represent that result's status.
"""
command_results_list = []
urls = argToList(args.get('url', ''))
if 'sha256' in args:
sha256 = args.get('sha256')
elif 'hash' in args:
sha256 = args.get('hash')
else:
sha256 = None
md5 = args.get('md5')
inputs = urls if urls else hash_args_handler(sha256, md5)
status = ''
for element in inputs:
command_results, status = wildfire_get_url_report(element) if urls else wildfire_get_file_report(element, args)
command_results_list.append(command_results)
return command_results_list, status
|
15,258 |
def item_payload(item, media_library):
"""
Create response payload for a single media item.
Used by async_browse_media.
"""
if "songid" in item:
media_content_type = MEDIA_TYPE_TRACK
media_content_id = f"{item['songid']}"
elif "albumid" in item:
media_content_type = MEDIA_TYPE_ALBUM
media_content_id = f"{item['albumid']}"
elif "artistid" in item:
media_content_type = MEDIA_TYPE_ARTIST
media_content_id = f"{item['artistid']}"
elif "movieid" in item:
media_content_type = MEDIA_TYPE_MOVIE
media_content_id = f"{item['movieid']}"
elif "episodeid" in item:
media_content_type = MEDIA_TYPE_EPISODE
media_content_id = f"{item['episodeid']}"
elif "seasonid" in item:
media_content_type = MEDIA_TYPE_SEASON
media_content_id = f"{item['tvshowid']}/{item['season']}"
elif "tvshowid" in item:
media_content_type = MEDIA_TYPE_TVSHOW
media_content_id = f"{item['tvshowid']}"
else:
# this case is for the top folder of each type
# possible content types: album, artist, movie, library_music, tvshow
media_content_type = item.get("type")
media_content_id = ""
title = item["label"]
can_play = media_content_type in PLAYABLE_MEDIA_TYPES and bool(media_content_id)
can_expand = media_content_type in EXPANDABLE_MEDIA_TYPES
thumbnail = item.get("thumbnail")
if thumbnail:
thumbnail = media_library.thumbnail_url(thumbnail)
if media_content_type == MEDIA_TYPE_MOVIE and not bool(media_content_id):
media_class = MEDIA_CLASS_DIRECTORY
can_expand = True
else:
media_class = CONTENT_TYPE_MEDIA_CLASS[media_content_type]
return BrowseMedia(
title=title,
media_class=media_class,
media_content_type=media_content_type,
media_content_id=media_content_id,
can_play=can_play,
can_expand=can_expand,
thumbnail=thumbnail,
)
|
def item_payload(item, media_library):
"""
Create response payload for a single media item.
Used by async_browse_media.
"""
if "songid" in item:
media_content_type = MEDIA_TYPE_TRACK
media_content_id = f"{item['songid']}"
elif "albumid" in item:
media_content_type = MEDIA_TYPE_ALBUM
media_content_id = f"{item['albumid']}"
elif "artistid" in item:
media_content_type = MEDIA_TYPE_ARTIST
media_content_id = f"{item['artistid']}"
elif "movieid" in item:
media_content_type = MEDIA_TYPE_MOVIE
media_content_id = f"{item['movieid']}"
elif "episodeid" in item:
media_content_type = MEDIA_TYPE_EPISODE
media_content_id = f"{item['episodeid']}"
elif "seasonid" in item:
media_content_type = MEDIA_TYPE_SEASON
media_content_id = f"{item['tvshowid']}/{item['season']}"
elif "tvshowid" in item:
media_content_type = MEDIA_TYPE_TVSHOW
media_content_id = f"{item['tvshowid']}"
else:
# this case is for the top folder of each type
# possible content types: album, artist, movie, library_music, tvshow
media_content_type = item.get("type")
media_content_id = ""
title = item["label"]
can_play = media_content_type in PLAYABLE_MEDIA_TYPES and bool(media_content_id)
can_expand = media_content_type in EXPANDABLE_MEDIA_TYPES
thumbnail = item.get("thumbnail")
if thumbnail:
thumbnail = media_library.thumbnail_url(thumbnail)
if media_content_type == MEDIA_TYPE_MOVIE and not media_content_id:
media_class = MEDIA_CLASS_DIRECTORY
can_expand = True
else:
media_class = CONTENT_TYPE_MEDIA_CLASS[media_content_type]
return BrowseMedia(
title=title,
media_class=media_class,
media_content_type=media_content_type,
media_content_id=media_content_id,
can_play=can_play,
can_expand=can_expand,
thumbnail=thumbnail,
)
|
47,500 |
def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor:
"""
Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is
meant as a workaround for the following issue (https://github.com/tensorflow/tensorflow/issues/55682), and will be
removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that
softmax(x) = softmax(x + c) (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html).
Args:
logits (`tf.Tensor`). Must be one of the following types: half, float32, float64.
axis (`int`, *optional*). The dimension softmax would be performed on. The default is -1 which indicates the
last dimension.
name (`str`, *optional*). A name for the operation (optional).
Returns:
`tf.Tensor`: A Tensor. Has the same type and shape as logits.
"""
return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name)
|
def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor:
"""
Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is
meant as a workaround for the [following issue](https://github.com/tensorflow/tensorflow/issues/55682), and will be
removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that
softmax(x) = softmax(x + c) (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html).
Args:
logits (`tf.Tensor`). Must be one of the following types: half, float32, float64.
axis (`int`, *optional*). The dimension softmax would be performed on. The default is -1 which indicates the
last dimension.
name (`str`, *optional*). A name for the operation (optional).
Returns:
`tf.Tensor`: A Tensor. Has the same type and shape as logits.
"""
return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name)
|
45,742 |
def forecast(
precip,
velocity,
timesteps,
precip_thr=None,
n_cascade_levels=6,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
ar_order=2,
conditional=False,
probmatching_method="cdf",
num_workers=1,
fft_method="numpy",
domain="spatial",
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the Spectral Prognosis (S-PROG) method.
Parameters
----------
precip: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between
the inputs are assumed to be regular.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the
advection field.
The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
precip_thr: float, required
The threshold value for minimum observable precipitation intensity.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}, optional
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}, optional
Name of the bandpass filter method to use with the cascade decomposition.
See the documentation of pysteps.cascade.interface.
ar_order: int, optional
The order of the autoregressive model to use. Must be >= 1.
conditional: bool, optional
If set to True, compute the statistics of the precipitation field
conditionally by excluding pixels where the values are
below the threshold precip_thr.
probmatching_method: {'cdf','mean',None}, optional
Method for matching the conditional statistics of the forecast field
(areas with precipitation intensity above the threshold precip_thr) with
those of the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, 'mean'=adjust only the mean value,
None=no matching applied.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if dask
is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
domain: {"spatial", "spectral"}
If "spatial", all computations are done in the spatial domain (the
classical S-PROG model). If "spectral", the AR(2) models are applied
directly in the spectral domain to reduce memory footprint and improve
performance :cite:`PCH2019a`.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input precipitation fields
precip. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface
References
----------
:cite:`Seed2003`, :cite:`PCH2019a`
"""
_check_inputs(precip, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
if filter_kwargs is None:
filter_kwargs = dict()
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if precip_thr is None:
raise ValueError("precip_thr required but not specified")
print("Computing S-PROG nowcast")
print("------------------------")
print("")
print("Inputs")
print("------")
print(f"input dimensions: {precip.shape[1]}x{precip.shape[2]}")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"bandpass filter: {bandpass_filter_method}")
print(f"decomposition: {decomp_method}")
print("conditional statistics: {}".format("yes" if conditional else "no"))
print(f"probability matching: {probmatching_method}")
print(f"FFT method: {fft_method}")
print(f"domain: {domain}")
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"parallel threads: {num_workers}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the AR(p) model: {ar_order}")
print(f"precip. intensity threshold: {precip_thr}")
if measure_time:
starttime_init = time.time()
fft = utils.get_method(fft_method, shape=precip.shape[1:], n_threads=num_workers)
m, n = precip.shape[1:]
# initialize the band-pass filter
filter_method = cascade.get_method(bandpass_filter_method)
filter = filter_method((m, n), n_cascade_levels, **filter_kwargs)
decomp_method, recomp_method = cascade.get_method(decomp_method)
extrapolator_method = extrapolation.get_method(extrap_method)
precip = precip[-(ar_order + 1) :, :, :].copy()
precip_min = np.nanmin(precip)
# determine the domain mask from non-finite values
domain_mask = np.logical_or.reduce(
[~np.isfinite(precip[i, :]) for i in range(precip.shape[0])]
)
# determine the precipitation threshold mask
if conditional:
mask_thr = np.logical_and.reduce(
[precip[i, :, :] >= precip_thr for i in range(precip.shape[0])]
)
else:
mask_thr = None
# initialize the extrapolator
x_values, y_values = np.meshgrid(
np.arange(precip.shape[2]), np.arange(precip.shape[1])
)
xy_coords = np.stack([x_values, y_values])
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(precip)) else False
)
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
res = list()
def f(precip, i):
return extrapolator_method(
precip[i, :], velocity, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not DASK_IMPORTED:
precip[i, :, :] = f(precip, i)
else:
res.append(dask.delayed(f)(precip, i))
if DASK_IMPORTED:
num_workers_ = len(res) if num_workers > len(res) else num_workers
precip = np.stack(
list(dask.compute(*res, num_workers=num_workers_)) + [precip[-1, :, :]]
)
# replace non-finite values with the minimum value
precip = precip.copy()
for i in range(precip.shape[0]):
precip[i, ~np.isfinite(precip[i, :])] = np.nanmin(precip[i, :])
# compute the cascade decompositions of the input precipitation fields
precip_d = []
for i in range(ar_order + 1):
precip_ = decomp_method(
precip[i, :, :],
filter,
mask=mask_thr,
fft_method=fft,
output_domain=domain,
normalize=True,
compute_stats=True,
compact_output=True,
)
precip_d.append(precip_)
# rearrange the cascade levels into a four-dimensional array of shape
# (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
precip_c = nowcast_utils.stack_cascades(
precip_d, n_cascade_levels, convert_to_full_arrays=True
)
# compute lag-l temporal autocorrelation coefficients for each cascade level
gamma = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
if domain == "spatial":
gamma[i, :] = correlation.temporal_autocorrelation(
precip_c[i], mask=mask_thr
)
else:
gamma[i, :] = correlation.temporal_autocorrelation(
precip_c[i], domain="spectral", x_shape=precip.shape[1:]
)
precip_c = nowcast_utils.stack_cascades(
precip_d, n_cascade_levels, convert_to_full_arrays=False
)
precip_d = precip_d[-1]
nowcast_utils.print_corrcoefs(gamma)
if ar_order == 2:
# adjust the lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
gamma[i, 1] = autoregression.adjust_lag2_corrcoef2(gamma[i, 0], gamma[i, 1])
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
phi = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
phi[i, :] = autoregression.estimate_ar_params_yw(gamma[i, :])
nowcast_utils.print_ar_params(phi)
# discard all except the p-1 last cascades because they are not needed for
# the AR(p) model
precip_c = [precip_c[i][-ar_order:] for i in range(n_cascade_levels)]
if probmatching_method == "mean":
mu_0 = np.mean(precip[-1, :, :][precip[-1, :, :] >= precip_thr])
else:
mu_0 = None
# compute precipitation mask and wet area ratio
mask_p = precip[-1, :, :] >= precip_thr
war = 1.0 * np.sum(mask_p) / (precip.shape[1] * precip.shape[2])
if measure_time:
init_time = time.time() - starttime_init
precip = precip[-1, :, :]
print("Starting nowcast computation.")
precip_f = []
state = {"precip_c": precip_c, "precip_d": precip_d}
params = {
"domain": domain,
"domain_mask": domain_mask,
"fft": fft,
"mu_0": mu_0,
"n_cascade_levels": n_cascade_levels,
"phi": phi,
"precip_0": precip,
"precip_min": precip_min,
"probmatching_method": probmatching_method,
"recomp_method": recomp_method,
"war": war,
}
precip_f = nowcast_main_loop(
precip,
velocity,
state,
timesteps,
extrap_method,
_update,
extrap_kwargs=extrap_kwargs,
params=params,
measure_time=measure_time,
)
if measure_time:
precip_f, mainloop_time = precip_f
precip_f = np.stack(precip_f)
if measure_time:
return precip_f, init_time, mainloop_time
else:
return precip_f
|
def forecast(
precip,
velocity,
timesteps,
precip_thr=None,
n_cascade_levels=6,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
ar_order=2,
conditional=False,
probmatching_method="cdf",
num_workers=1,
fft_method="numpy",
domain="spatial",
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the Spectral Prognosis (S-PROG) method.
Parameters
----------
precip: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between
the inputs are assumed to be regular.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the
advection field.
The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
precip_thr: float, required
The threshold value for minimum observable precipitation intensity.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}, optional
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}, optional
Name of the bandpass filter method to use with the cascade decomposition.
See the documentation of pysteps.cascade.interface.
ar_order: int, optional
The order of the autoregressive model to use. Must be >= 1.
conditional: bool, optional
If set to True, compute the statistics of the precipitation field
conditionally by excluding pixels where the values are
below the threshold precip_thr.
probmatching_method: {'cdf','mean',None}, optional
Method for matching the conditional statistics of the forecast field
(areas with precipitation intensity above the threshold precip_thr) with
those of the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, 'mean'=adjust only the mean value,
None=no matching applied.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if dask
is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
domain: {"spatial", "spectral"}
If "spatial", all computations are done in the spatial domain (the
classical S-PROG model). If "spectral", the AR(2) models are applied
directly in the spectral domain to reduce memory footprint and improve
performance :cite:`PCH2019a`.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input precipitation fields
precip. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface
References
----------
:cite:`Seed2003`, :cite:`PCH2019a`
"""
_check_inputs(precip, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
if filter_kwargs is None:
filter_kwargs = dict()
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if precip_thr is None:
raise ValueError("precip_thr required but not specified")
print("Computing S-PROG nowcast")
print("------------------------")
print("")
print("Inputs")
print("------")
print(f"input dimensions: {precip.shape[1]}x{precip.shape[2]}")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"bandpass filter: {bandpass_filter_method}")
print(f"decomposition: {decomp_method}")
print("conditional statistics: {}".format("yes" if conditional else "no"))
print(f"probability matching: {probmatching_method}")
print(f"FFT method: {fft_method}")
print(f"domain: {domain}")
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"parallel threads: {num_workers}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the AR(p) model: {ar_order}")
print(f"precip. intensity threshold: {precip_thr}")
if measure_time:
starttime_init = time.time()
fft = utils.get_method(fft_method, shape=precip.shape[1:], n_threads=num_workers)
m, n = precip.shape[1:]
# initialize the band-pass filter
filter_method = cascade.get_method(bandpass_filter_method)
filter = filter_method((m, n), n_cascade_levels, **filter_kwargs)
decomp_method, recomp_method = cascade.get_method(decomp_method)
extrapolator_method = extrapolation.get_method(extrap_method)
precip = precip[-(ar_order + 1) :, :, :].copy()
precip_min = np.nanmin(precip)
# determine the domain mask from non-finite values
domain_mask = np.logical_or.reduce(
[~np.isfinite(precip[i, :]) for i in range(precip.shape[0])]
)
# determine the precipitation threshold mask
if conditional:
mask_thr = np.logical_and.reduce(
[precip[i, :, :] >= precip_thr for i in range(precip.shape[0])]
)
else:
mask_thr = None
# initialize the extrapolator
x_values, y_values = np.meshgrid(
np.arange(precip.shape[2]), np.arange(precip.shape[1])
)
xy_coords = np.stack([x_values, y_values])
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(precip)) else False
)
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
res = list()
def f(precip, i):
return extrapolator_method(
precip[i, :], velocity, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not DASK_IMPORTED:
precip[i, :, :] = f(precip, i)
else:
res.append(dask.delayed(f)(precip, i))
if DASK_IMPORTED:
num_workers_ = len(res) if num_workers > len(res) else num_workers
precip = np.stack(
list(dask.compute(*res, num_workers=num_workers_)) + [precip[-1, :, :]]
)
# replace non-finite values with the minimum value
precip = precip.copy()
for i in range(precip.shape[0]):
precip[i, ~np.isfinite(precip[i, :])] = np.nanmin(precip[i, :])
# compute the cascade decompositions of the input precipitation fields
precip_d = []
for i in range(ar_order + 1):
precip_ = decomp_method(
precip[i, :, :],
filter,
mask=mask_thr,
fft_method=fft,
output_domain=domain,
normalize=True,
compute_stats=True,
compact_output=True,
)
precip_d.append(precip_)
# rearrange the cascade levels into a four-dimensional array of shape
# (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
precip_cascades = nowcast_utils.stack_cascades(
precip_d, n_cascade_levels, convert_to_full_arrays=True
)
# compute lag-l temporal autocorrelation coefficients for each cascade level
gamma = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
if domain == "spatial":
gamma[i, :] = correlation.temporal_autocorrelation(
precip_c[i], mask=mask_thr
)
else:
gamma[i, :] = correlation.temporal_autocorrelation(
precip_c[i], domain="spectral", x_shape=precip.shape[1:]
)
precip_c = nowcast_utils.stack_cascades(
precip_d, n_cascade_levels, convert_to_full_arrays=False
)
precip_d = precip_d[-1]
nowcast_utils.print_corrcoefs(gamma)
if ar_order == 2:
# adjust the lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
gamma[i, 1] = autoregression.adjust_lag2_corrcoef2(gamma[i, 0], gamma[i, 1])
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
phi = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
phi[i, :] = autoregression.estimate_ar_params_yw(gamma[i, :])
nowcast_utils.print_ar_params(phi)
# discard all except the p-1 last cascades because they are not needed for
# the AR(p) model
precip_c = [precip_c[i][-ar_order:] for i in range(n_cascade_levels)]
if probmatching_method == "mean":
mu_0 = np.mean(precip[-1, :, :][precip[-1, :, :] >= precip_thr])
else:
mu_0 = None
# compute precipitation mask and wet area ratio
mask_p = precip[-1, :, :] >= precip_thr
war = 1.0 * np.sum(mask_p) / (precip.shape[1] * precip.shape[2])
if measure_time:
init_time = time.time() - starttime_init
precip = precip[-1, :, :]
print("Starting nowcast computation.")
precip_f = []
state = {"precip_c": precip_c, "precip_d": precip_d}
params = {
"domain": domain,
"domain_mask": domain_mask,
"fft": fft,
"mu_0": mu_0,
"n_cascade_levels": n_cascade_levels,
"phi": phi,
"precip_0": precip,
"precip_min": precip_min,
"probmatching_method": probmatching_method,
"recomp_method": recomp_method,
"war": war,
}
precip_f = nowcast_main_loop(
precip,
velocity,
state,
timesteps,
extrap_method,
_update,
extrap_kwargs=extrap_kwargs,
params=params,
measure_time=measure_time,
)
if measure_time:
precip_f, mainloop_time = precip_f
precip_f = np.stack(precip_f)
if measure_time:
return precip_f, init_time, mainloop_time
else:
return precip_f
|
38,529 |
def l2_norm(dim: int, var: pp.ad.Ad_array) -> pp.ad.Ad_array:
"""L2 norm of a vector variable.
For the example of dim=3 components and n vectors, the ordering is assumed
to be
[u0, v0, w0, u1, v1, w1, ..., un, vn, wn]
Usage note:
See module level documentation on how to wrap functions like this in ad.Function.
Parameters
----------
dim : int
Dimension, i.e. number of vector components.
var : pp.ad.Ad_array
Ad operator (variable or expression) which is argument of the norm
function.
Returns
-------
pp.ad.Ad_array
The norm of var with appropriate val and jac attributes.
"""
if dim == 1:
return pp.ad.functions.abs(var)
resh = np.reshape(var.val, (dim, -1), order="F")
vals = np.linalg.norm(resh, axis=0)
# Avoid dividing by zero
tol = 1e-12
nonzero_inds = vals > tol
jac_vals = np.zeros(resh.shape)
jac_vals[:, nonzero_inds] = resh[:, nonzero_inds] / vals[nonzero_inds]
jac_vals[:, ~nonzero_inds] = 1
# Prepare for left multiplication with var.jac to yield
# norm(var).jac = var/norm(var) * var.jac
dim_size = var.val.size
size = int(var.val.size / dim)
local_inds_t = np.arange(dim_size)
local_inds_n = np.int32(np.kron(np.arange(size), np.ones(dim)))
norm_jac = sps.csr_matrix(
(jac_vals.ravel("F"), (local_inds_n, local_inds_t)),
shape=(size, dim_size),
)
jac = norm_jac * var.jac
return pp.ad.Ad_array(vals, jac)
|
def l2_norm(dim: int, var: pp.ad.Ad_array) -> pp.ad.Ad_array:
"""L2 norm of a vector variable, taken cell-wise and represented as an Ad_array.
For the example of dim=3 components and n vectors, the ordering is assumed
to be
[u0, v0, w0, u1, v1, w1, ..., un, vn, wn]
Usage note:
See module level documentation on how to wrap functions like this in ad.Function.
Parameters
----------
dim : int
Dimension, i.e. number of vector components.
var : pp.ad.Ad_array
Ad operator (variable or expression) which is argument of the norm
function.
Returns
-------
pp.ad.Ad_array
The norm of var with appropriate val and jac attributes.
"""
if dim == 1:
return pp.ad.functions.abs(var)
resh = np.reshape(var.val, (dim, -1), order="F")
vals = np.linalg.norm(resh, axis=0)
# Avoid dividing by zero
tol = 1e-12
nonzero_inds = vals > tol
jac_vals = np.zeros(resh.shape)
jac_vals[:, nonzero_inds] = resh[:, nonzero_inds] / vals[nonzero_inds]
jac_vals[:, ~nonzero_inds] = 1
# Prepare for left multiplication with var.jac to yield
# norm(var).jac = var/norm(var) * var.jac
dim_size = var.val.size
size = int(var.val.size / dim)
local_inds_t = np.arange(dim_size)
local_inds_n = np.int32(np.kron(np.arange(size), np.ones(dim)))
norm_jac = sps.csr_matrix(
(jac_vals.ravel("F"), (local_inds_n, local_inds_t)),
shape=(size, dim_size),
)
jac = norm_jac * var.jac
return pp.ad.Ad_array(vals, jac)
|
38,001 |
def load_mars_shape():
"""
Load a table of data for the shape of Mars.
This is the ``@mars370d.txt`` dataset used in GMT examples, with data and
information from Smith, D. E., and M. T. Zuber (1996), The shape of Mars
and the topographic signature of the hemispheric dichotomy. Data columns
are "longitude," "latitude", and "radius (meters)."
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Use ``print(data.describe())`` to see the available
columns.
"""
fname = which("@mars370d.txt", download="c")
data = pd.read_csv(fname, sep="\t", header=None, names=["lon", "lat", "radius(m)"])
return data
|
def load_mars_shape():
"""
Load a table of data for the shape of Mars.
This is the ``@mars370d.txt`` dataset used in GMT examples, with data and
information from Smith, D. E., and M. T. Zuber (1996), The shape of Mars
and the topographic signature of the hemispheric dichotomy. Data columns
are "longitude," "latitude", and "radius (meters)."
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table with columns "longitude", "latitude", and "radius(m)".
"""
fname = which("@mars370d.txt", download="c")
data = pd.read_csv(fname, sep="\t", header=None, names=["lon", "lat", "radius(m)"])
return data
|
36,164 |
def upload_calculation(
node: CalcJobNode,
transport: Transport,
calc_info: CalcInfo,
folder: SandboxFolder,
inputs: Optional[MappingType[str, Any]] = None,
dry_run: bool = False
) -> None:
"""Upload a `CalcJob` instance
:param node: the `CalcJobNode`.
:param transport: an already opened transport to use to submit the calculation.
:param calc_info: the calculation info datastructure returned by `CalcJob.presubmit`
:param folder: temporary local file system folder containing the inputs written by `CalcJob.prepare_for_submission`
"""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
# If the calculation already has a `remote_folder`, simply return. The upload was apparently already completed
# before, which can happen if the daemon is restarted and it shuts down after uploading but before getting the
# chance to perform the state transition. Upon reloading this calculation, it will re-attempt the upload.
link_label = 'remote_folder'
if node.base.links.get_outgoing(RemoteData, link_label_filter=link_label).first():
EXEC_LOGGER.warning(f'CalcJobNode<{node.pk}> already has a `{link_label}` output: skipping upload')
return calc_info
computer = node.computer
codes_info = calc_info.codes_info
input_codes = [load_node(_.code_uuid, sub_classes=(Code,)) for _ in codes_info]
logger_extra = get_dblogger_extra(node)
transport.set_logger_extra(logger_extra)
logger = LoggerAdapter(logger=EXEC_LOGGER, extra=logger_extra)
if not dry_run and not node.is_stored:
raise ValueError(
f'Cannot submit calculation {node.pk} because it is not stored! If you just want to test the submission, '
'set `metadata.dry_run` to True in the inputs.'
)
# If we are performing a dry-run, the working directory should actually be a local folder that should already exist
if dry_run:
workdir = transport.getcwd()
else:
remote_user = transport.whoami()
remote_working_directory = computer.get_workdir().format(username=remote_user)
if not remote_working_directory.strip():
raise exceptions.ConfigurationError(
"[submission of calculation {}] No remote_working_directory configured for computer '{}'".format(
node.pk, computer.label
)
)
# If it already exists, no exception is raised
try:
transport.chdir(remote_working_directory)
except IOError:
logger.debug(
'[submission of calculation {}] Unable to chdir in {}, trying to create it'.format(
node.pk, remote_working_directory
)
)
try:
transport.makedirs(remote_working_directory)
transport.chdir(remote_working_directory)
except EnvironmentError as exc:
raise exceptions.ConfigurationError(
'[submission of calculation {}] '
'Unable to create the remote directory {} on '
"computer '{}': {}".format(node.pk, remote_working_directory, computer.label, exc)
)
# Store remotely with sharding (here is where we choose
# the folder structure of remote jobs; then I store this
# in the calculation properties using _set_remote_dir
# and I do not have to know the logic, but I just need to
# read the absolute path from the calculation properties.
transport.mkdir(calc_info.uuid[:2], ignore_existing=True)
transport.chdir(calc_info.uuid[:2])
transport.mkdir(calc_info.uuid[2:4], ignore_existing=True)
transport.chdir(calc_info.uuid[2:4])
try:
# The final directory may already exist, most likely because this function was already executed once, but
# failed and as a result was rescheduled by the eninge. In this case it would be fine to delete the folder
# and create it from scratch, except that we cannot be sure that this the actual case. Therefore, to err on
# the safe side, we move the folder to the lost+found directory before recreating the folder from scratch
transport.mkdir(calc_info.uuid[4:])
except OSError:
# Move the existing directory to lost+found, log a warning and create a clean directory anyway
path_existing = os.path.join(transport.getcwd(), calc_info.uuid[4:])
path_lost_found = os.path.join(remote_working_directory, REMOTE_WORK_DIRECTORY_LOST_FOUND)
path_target = os.path.join(path_lost_found, calc_info.uuid)
logger.warning(
f'tried to create path {path_existing} but it already exists, moving the entire folder to {path_target}'
)
# Make sure the lost+found directory exists, then copy the existing folder there and delete the original
transport.mkdir(path_lost_found, ignore_existing=True)
transport.copytree(path_existing, path_target)
transport.rmtree(path_existing)
# Now we can create a clean folder for this calculation
transport.mkdir(calc_info.uuid[4:])
finally:
transport.chdir(calc_info.uuid[4:])
# I store the workdir of the calculation for later file retrieval
workdir = transport.getcwd()
node.set_remote_workdir(workdir)
# I first create the code files, so that the code can put
# default files to be overwritten by the plugin itself.
# Still, beware! The code file itself could be overwritten...
# But I checked for this earlier.
for code in input_codes:
if isinstance(code, (PortableCode, PortableContainerizedCode)):
# Note: this will possibly overwrite files
for filename in code.base.repository.list_object_names():
# Note, once #2579 is implemented, use the `node.open` method instead of the named temporary file in
# combination with the new `Transport.put_object_from_filelike`
# Since the content of the node could potentially be binary, we read the raw bytes and pass them on
with NamedTemporaryFile(mode='wb+') as handle:
try:
handle.write(code.base.repository.get_object_content(filename, mode='rb'))
except:
# raise TypeError('directory not supperted.')
pass
handle.flush()
transport.put(handle.name, filename)
transport.chmod(code.filepath_executable, 0o755) # rwxr-xr-x
# local_copy_list is a list of tuples, each with (uuid, dest_path, rel_path)
# NOTE: validation of these lists are done inside calculation.presubmit()
local_copy_list = calc_info.local_copy_list or []
remote_copy_list = calc_info.remote_copy_list or []
remote_symlink_list = calc_info.remote_symlink_list or []
provenance_exclude_list = calc_info.provenance_exclude_list or []
for uuid, filename, target in local_copy_list:
logger.debug(f'[submission of calculation {node.uuid}] copying local file/folder to {target}')
try:
data_node = load_node(uuid=uuid)
except exceptions.NotExistent:
data_node = _find_data_node(inputs, uuid) if inputs else None
if data_node is None:
logger.warning(f'failed to load Node<{uuid}> specified in the `local_copy_list`')
else:
# If no explicit source filename is defined, we assume the top-level directory
filename_source = filename or '.'
filename_target = target or ''
# Make the target filepath absolute and create any intermediate directories if they don't yet exist
filepath_target = pathlib.Path(folder.abspath) / filename_target
filepath_target.parent.mkdir(parents=True, exist_ok=True)
if data_node.base.repository.get_object(filename_source).file_type == FileType.DIRECTORY:
# If the source object is a directory, we copy its entire contents
data_node.base.repository.copy_tree(filepath_target, filename_source)
provenance_exclude_list.extend(data_node.base.repository.list_object_names(filename_source))
else:
# Otherwise, simply copy the file
with folder.open(target, 'wb') as handle:
with data_node.base.repository.open(filename, 'rb') as source:
shutil.copyfileobj(source, handle)
provenance_exclude_list.append(target)
# In a dry_run, the working directory is the raw input folder, which will already contain these resources
if not dry_run:
for filename in folder.get_content_list():
logger.debug(f'[submission of calculation {node.pk}] copying file/folder {filename}...')
transport.put(folder.get_abs_path(filename), filename)
for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_copy_list:
if remote_computer_uuid == computer.uuid:
logger.debug(
'[submission of calculation {}] copying {} remotely, directly on the machine {}'.format(
node.pk, dest_rel_path, computer.label
)
)
try:
transport.copy(remote_abs_path, dest_rel_path)
except (IOError, OSError):
logger.warning(
'[submission of calculation {}] Unable to copy remote resource from {} to {}! '
'Stopping.'.format(node.pk, remote_abs_path, dest_rel_path)
)
raise
else:
raise NotImplementedError(
'[submission of calculation {}] Remote copy between two different machines is '
'not implemented yet'.format(node.pk)
)
for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_symlink_list:
if remote_computer_uuid == computer.uuid:
logger.debug(
'[submission of calculation {}] copying {} remotely, directly on the machine {}'.format(
node.pk, dest_rel_path, computer.label
)
)
try:
transport.symlink(remote_abs_path, dest_rel_path)
except (IOError, OSError):
logger.warning(
'[submission of calculation {}] Unable to create remote symlink from {} to {}! '
'Stopping.'.format(node.pk, remote_abs_path, dest_rel_path)
)
raise
else:
raise IOError(
f'It is not possible to create a symlink between two different machines for calculation {node.pk}'
)
else:
if remote_copy_list:
filepath = os.path.join(workdir, '_aiida_remote_copy_list.txt')
with open(filepath, 'w', encoding='utf-8') as handle: # type: ignore[assignment]
for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_copy_list:
handle.write(
'would have copied {} to {} in working directory on remote {}'.format(
remote_abs_path, dest_rel_path, computer.label
)
)
if remote_symlink_list:
filepath = os.path.join(workdir, '_aiida_remote_symlink_list.txt')
with open(filepath, 'w', encoding='utf-8') as handle: # type: ignore[assignment]
for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_symlink_list:
handle.write(
'would have created symlinks from {} to {} in working directory on remote {}'.format(
remote_abs_path, dest_rel_path, computer.label
)
)
# Loop recursively over content of the sandbox folder copying all that are not in `provenance_exclude_list`. Note
# that directories are not created explicitly. The `node.put_object_from_filelike` call will create intermediate
# directories for nested files automatically when needed. This means though that empty folders in the sandbox or
# folders that would be empty when considering the `provenance_exclude_list` will *not* be copied to the repo. The
# advantage of this explicit copying instead of deleting the files from `provenance_exclude_list` from the sandbox
# first before moving the entire remaining content to the node's repository, is that in this way we are guaranteed
# not to accidentally move files to the repository that should not go there at all cost. Note that all entries in
# the provenance exclude list are normalized first, just as the paths that are in the sandbox folder, otherwise the
# direct equality test may fail, e.g.: './path/file.txt' != 'path/file.txt' even though they reference the same file
provenance_exclude_list = [os.path.normpath(entry) for entry in provenance_exclude_list]
for root, _, filenames in os.walk(folder.abspath):
for filename in filenames:
filepath = os.path.join(root, filename)
relpath = os.path.normpath(os.path.relpath(filepath, folder.abspath))
dirname = os.path.dirname(relpath)
# Construct a list of all (partial) filepaths
# For example, if `relpath == 'some/sub/directory/file.txt'` then the list of relative directory paths is
# ['some', 'some/sub', 'some/sub/directory']
# This is necessary, because if any of these paths is in the `provenance_exclude_list` the file should not
# be copied over.
components = dirname.split(os.sep)
dirnames = [os.path.join(*components[:i]) for i in range(1, len(components) + 1)]
if relpath not in provenance_exclude_list and all(
dirname not in provenance_exclude_list for dirname in dirnames
):
with open(filepath, 'rb') as handle: # type: ignore[assignment]
node.base.repository._repository.put_object_from_filelike(handle, relpath) # pylint: disable=protected-access
# Since the node is already stored, we cannot use the normal repository interface since it will raise a
# `ModificationNotAllowed` error. To bypass it, we go straight to the underlying repository instance to store the
# files, however, this means we have to manually update the node's repository metadata.
node.base.repository._update_repository_metadata() # pylint: disable=protected-access
if not dry_run:
# Make sure that attaching the `remote_folder` with a link is the last thing we do. This gives the biggest
# chance of making this method idempotent. That is to say, if a runner gets interrupted during this action, it
# will simply retry the upload, unless we got here and managed to link it up, in which case we move to the next
# task. Because in that case, the check for the existence of this link at the top of this function will exit
# early from this command.
remotedata = RemoteData(computer=computer, remote_path=workdir)
remotedata.base.links.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder')
remotedata.store()
|
def upload_calculation(
node: CalcJobNode,
transport: Transport,
calc_info: CalcInfo,
folder: SandboxFolder,
inputs: Optional[MappingType[str, Any]] = None,
dry_run: bool = False
) -> None:
"""Upload a `CalcJob` instance
:param node: the `CalcJobNode`.
:param transport: an already opened transport to use to submit the calculation.
:param calc_info: the calculation info datastructure returned by `CalcJob.presubmit`
:param folder: temporary local file system folder containing the inputs written by `CalcJob.prepare_for_submission`
"""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
# If the calculation already has a `remote_folder`, simply return. The upload was apparently already completed
# before, which can happen if the daemon is restarted and it shuts down after uploading but before getting the
# chance to perform the state transition. Upon reloading this calculation, it will re-attempt the upload.
link_label = 'remote_folder'
if node.base.links.get_outgoing(RemoteData, link_label_filter=link_label).first():
EXEC_LOGGER.warning(f'CalcJobNode<{node.pk}> already has a `{link_label}` output: skipping upload')
return calc_info
computer = node.computer
codes_info = calc_info.codes_info
input_codes = [load_node(_.code_uuid, sub_classes=(Code,)) for _ in codes_info]
logger_extra = get_dblogger_extra(node)
transport.set_logger_extra(logger_extra)
logger = LoggerAdapter(logger=EXEC_LOGGER, extra=logger_extra)
if not dry_run and not node.is_stored:
raise ValueError(
f'Cannot submit calculation {node.pk} because it is not stored! If you just want to test the submission, '
'set `metadata.dry_run` to True in the inputs.'
)
# If we are performing a dry-run, the working directory should actually be a local folder that should already exist
if dry_run:
workdir = transport.getcwd()
else:
remote_user = transport.whoami()
remote_working_directory = computer.get_workdir().format(username=remote_user)
if not remote_working_directory.strip():
raise exceptions.ConfigurationError(
"[submission of calculation {}] No remote_working_directory configured for computer '{}'".format(
node.pk, computer.label
)
)
# If it already exists, no exception is raised
try:
transport.chdir(remote_working_directory)
except IOError:
logger.debug(
'[submission of calculation {}] Unable to chdir in {}, trying to create it'.format(
node.pk, remote_working_directory
)
)
try:
transport.makedirs(remote_working_directory)
transport.chdir(remote_working_directory)
except EnvironmentError as exc:
raise exceptions.ConfigurationError(
'[submission of calculation {}] '
'Unable to create the remote directory {} on '
"computer '{}': {}".format(node.pk, remote_working_directory, computer.label, exc)
)
# Store remotely with sharding (here is where we choose
# the folder structure of remote jobs; then I store this
# in the calculation properties using _set_remote_dir
# and I do not have to know the logic, but I just need to
# read the absolute path from the calculation properties.
transport.mkdir(calc_info.uuid[:2], ignore_existing=True)
transport.chdir(calc_info.uuid[:2])
transport.mkdir(calc_info.uuid[2:4], ignore_existing=True)
transport.chdir(calc_info.uuid[2:4])
try:
# The final directory may already exist, most likely because this function was already executed once, but
# failed and as a result was rescheduled by the eninge. In this case it would be fine to delete the folder
# and create it from scratch, except that we cannot be sure that this the actual case. Therefore, to err on
# the safe side, we move the folder to the lost+found directory before recreating the folder from scratch
transport.mkdir(calc_info.uuid[4:])
except OSError:
# Move the existing directory to lost+found, log a warning and create a clean directory anyway
path_existing = os.path.join(transport.getcwd(), calc_info.uuid[4:])
path_lost_found = os.path.join(remote_working_directory, REMOTE_WORK_DIRECTORY_LOST_FOUND)
path_target = os.path.join(path_lost_found, calc_info.uuid)
logger.warning(
f'tried to create path {path_existing} but it already exists, moving the entire folder to {path_target}'
)
# Make sure the lost+found directory exists, then copy the existing folder there and delete the original
transport.mkdir(path_lost_found, ignore_existing=True)
transport.copytree(path_existing, path_target)
transport.rmtree(path_existing)
# Now we can create a clean folder for this calculation
transport.mkdir(calc_info.uuid[4:])
finally:
transport.chdir(calc_info.uuid[4:])
# I store the workdir of the calculation for later file retrieval
workdir = transport.getcwd()
node.set_remote_workdir(workdir)
# I first create the code files, so that the code can put
# default files to be overwritten by the plugin itself.
# Still, beware! The code file itself could be overwritten...
# But I checked for this earlier.
for code in input_codes:
if isinstance(code, PortableCode):
# Note: this will possibly overwrite files
for filename in code.base.repository.list_object_names():
# Note, once #2579 is implemented, use the `node.open` method instead of the named temporary file in
# combination with the new `Transport.put_object_from_filelike`
# Since the content of the node could potentially be binary, we read the raw bytes and pass them on
with NamedTemporaryFile(mode='wb+') as handle:
try:
handle.write(code.base.repository.get_object_content(filename, mode='rb'))
except:
# raise TypeError('directory not supperted.')
pass
handle.flush()
transport.put(handle.name, filename)
transport.chmod(code.filepath_executable, 0o755) # rwxr-xr-x
# local_copy_list is a list of tuples, each with (uuid, dest_path, rel_path)
# NOTE: validation of these lists are done inside calculation.presubmit()
local_copy_list = calc_info.local_copy_list or []
remote_copy_list = calc_info.remote_copy_list or []
remote_symlink_list = calc_info.remote_symlink_list or []
provenance_exclude_list = calc_info.provenance_exclude_list or []
for uuid, filename, target in local_copy_list:
logger.debug(f'[submission of calculation {node.uuid}] copying local file/folder to {target}')
try:
data_node = load_node(uuid=uuid)
except exceptions.NotExistent:
data_node = _find_data_node(inputs, uuid) if inputs else None
if data_node is None:
logger.warning(f'failed to load Node<{uuid}> specified in the `local_copy_list`')
else:
# If no explicit source filename is defined, we assume the top-level directory
filename_source = filename or '.'
filename_target = target or ''
# Make the target filepath absolute and create any intermediate directories if they don't yet exist
filepath_target = pathlib.Path(folder.abspath) / filename_target
filepath_target.parent.mkdir(parents=True, exist_ok=True)
if data_node.base.repository.get_object(filename_source).file_type == FileType.DIRECTORY:
# If the source object is a directory, we copy its entire contents
data_node.base.repository.copy_tree(filepath_target, filename_source)
provenance_exclude_list.extend(data_node.base.repository.list_object_names(filename_source))
else:
# Otherwise, simply copy the file
with folder.open(target, 'wb') as handle:
with data_node.base.repository.open(filename, 'rb') as source:
shutil.copyfileobj(source, handle)
provenance_exclude_list.append(target)
# In a dry_run, the working directory is the raw input folder, which will already contain these resources
if not dry_run:
for filename in folder.get_content_list():
logger.debug(f'[submission of calculation {node.pk}] copying file/folder {filename}...')
transport.put(folder.get_abs_path(filename), filename)
for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_copy_list:
if remote_computer_uuid == computer.uuid:
logger.debug(
'[submission of calculation {}] copying {} remotely, directly on the machine {}'.format(
node.pk, dest_rel_path, computer.label
)
)
try:
transport.copy(remote_abs_path, dest_rel_path)
except (IOError, OSError):
logger.warning(
'[submission of calculation {}] Unable to copy remote resource from {} to {}! '
'Stopping.'.format(node.pk, remote_abs_path, dest_rel_path)
)
raise
else:
raise NotImplementedError(
'[submission of calculation {}] Remote copy between two different machines is '
'not implemented yet'.format(node.pk)
)
for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_symlink_list:
if remote_computer_uuid == computer.uuid:
logger.debug(
'[submission of calculation {}] copying {} remotely, directly on the machine {}'.format(
node.pk, dest_rel_path, computer.label
)
)
try:
transport.symlink(remote_abs_path, dest_rel_path)
except (IOError, OSError):
logger.warning(
'[submission of calculation {}] Unable to create remote symlink from {} to {}! '
'Stopping.'.format(node.pk, remote_abs_path, dest_rel_path)
)
raise
else:
raise IOError(
f'It is not possible to create a symlink between two different machines for calculation {node.pk}'
)
else:
if remote_copy_list:
filepath = os.path.join(workdir, '_aiida_remote_copy_list.txt')
with open(filepath, 'w', encoding='utf-8') as handle: # type: ignore[assignment]
for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_copy_list:
handle.write(
'would have copied {} to {} in working directory on remote {}'.format(
remote_abs_path, dest_rel_path, computer.label
)
)
if remote_symlink_list:
filepath = os.path.join(workdir, '_aiida_remote_symlink_list.txt')
with open(filepath, 'w', encoding='utf-8') as handle: # type: ignore[assignment]
for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_symlink_list:
handle.write(
'would have created symlinks from {} to {} in working directory on remote {}'.format(
remote_abs_path, dest_rel_path, computer.label
)
)
# Loop recursively over content of the sandbox folder copying all that are not in `provenance_exclude_list`. Note
# that directories are not created explicitly. The `node.put_object_from_filelike` call will create intermediate
# directories for nested files automatically when needed. This means though that empty folders in the sandbox or
# folders that would be empty when considering the `provenance_exclude_list` will *not* be copied to the repo. The
# advantage of this explicit copying instead of deleting the files from `provenance_exclude_list` from the sandbox
# first before moving the entire remaining content to the node's repository, is that in this way we are guaranteed
# not to accidentally move files to the repository that should not go there at all cost. Note that all entries in
# the provenance exclude list are normalized first, just as the paths that are in the sandbox folder, otherwise the
# direct equality test may fail, e.g.: './path/file.txt' != 'path/file.txt' even though they reference the same file
provenance_exclude_list = [os.path.normpath(entry) for entry in provenance_exclude_list]
for root, _, filenames in os.walk(folder.abspath):
for filename in filenames:
filepath = os.path.join(root, filename)
relpath = os.path.normpath(os.path.relpath(filepath, folder.abspath))
dirname = os.path.dirname(relpath)
# Construct a list of all (partial) filepaths
# For example, if `relpath == 'some/sub/directory/file.txt'` then the list of relative directory paths is
# ['some', 'some/sub', 'some/sub/directory']
# This is necessary, because if any of these paths is in the `provenance_exclude_list` the file should not
# be copied over.
components = dirname.split(os.sep)
dirnames = [os.path.join(*components[:i]) for i in range(1, len(components) + 1)]
if relpath not in provenance_exclude_list and all(
dirname not in provenance_exclude_list for dirname in dirnames
):
with open(filepath, 'rb') as handle: # type: ignore[assignment]
node.base.repository._repository.put_object_from_filelike(handle, relpath) # pylint: disable=protected-access
# Since the node is already stored, we cannot use the normal repository interface since it will raise a
# `ModificationNotAllowed` error. To bypass it, we go straight to the underlying repository instance to store the
# files, however, this means we have to manually update the node's repository metadata.
node.base.repository._update_repository_metadata() # pylint: disable=protected-access
if not dry_run:
# Make sure that attaching the `remote_folder` with a link is the last thing we do. This gives the biggest
# chance of making this method idempotent. That is to say, if a runner gets interrupted during this action, it
# will simply retry the upload, unless we got here and managed to link it up, in which case we move to the next
# task. Because in that case, the check for the existence of this link at the top of this function will exit
# early from this command.
remotedata = RemoteData(computer=computer, remote_path=workdir)
remotedata.base.links.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder')
remotedata.store()
|
24,736 |
def _get_config_paths(curdir):
paths = []
config_names = ("pylintrc", ".pylintrc", "pyproject.toml", "setup.cfg")
for config_name in config_names:
config_path = os.path.join(curdir, config_name)
if os.path.isfile(config_path):
if config_name.endswith(".toml") and not _toml_has_config(config_path):
continue
if config_name.endswith(".cfg") and not _cfg_has_config(config_path):
continue
paths.append(config_path)
return paths
|
def _get_config_paths(curdir: Union[Path, str]) -> List[Path]:
paths = []
config_names = ("pylintrc", ".pylintrc", "pyproject.toml", "setup.cfg")
for config_name in config_names:
config_path = os.path.join(curdir, config_name)
if os.path.isfile(config_path):
if config_name.endswith(".toml") and not _toml_has_config(config_path):
continue
if config_name.endswith(".cfg") and not _cfg_has_config(config_path):
continue
paths.append(config_path)
return paths
|
10,495 |
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
no_remove=dict(type='bool', default=False, aliases=['no-remove']),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
# We skip cache update in auto install the dependency if the
# user explicitly declared it with update_cache=no.
if module.params.get('update_cache') is False:
module.warn("Auto-installing missing dependency without updating cache: %s" % PYTHON_APT)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
no_remove = p['no_remove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache with exponential backoff
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = random.randint(0, 1000) / 1000.0
for retry in range(update_cache_retries):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
else:
module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, no_remove=no_remove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
no_remove=no_remove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
|
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
no_remove=dict(type='bool', default=False, aliases=['no-remove']),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
# We skip cache update in auto install the dependency if the
# user explicitly declared it with update_cache=no.
if module.params.get('update_cache') is False:
module.warn("Auto-installing missing dependency without updating cache: %s" % PYTHON_APT)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
fail_on_autoremove = p['fail_on_autoremove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache with exponential backoff
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = random.randint(0, 1000) / 1000.0
for retry in range(update_cache_retries):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
else:
module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, no_remove=no_remove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
no_remove=no_remove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
|
29,385 |
def send_mail_to_notify_contributor_ranking_achievement(
contributor_ranking_email_info: (
suggestion_registry.ContributorMilestoneEmailInfo)) -> None:
"""Sends an email to translation/question submitters and reviewers when
they achieve a new rank.
Args:
contributor_ranking_email_info:
ContributorMilestoneEmailInfo. An object with contributor ranking
email information.
"""
if not feconf.CAN_SEND_EMAILS:
logging.error('This app cannot send emails to users.')
return
recipient_username = user_services.get_username(
contributor_ranking_email_info.contributor_user_id)
can_user_receive_email = user_services.get_email_preferences(
contributor_ranking_email_info.contributor_user_id
).can_receive_email_updates
if not can_user_receive_email:
logging.error('This user can not recieve emails.')
return
email_template = NOTIFICATION_FOR_CONTRIBUTOR_RANKING_ACHIEVEMENT[
contributor_ranking_email_info.contribution_type][
contributor_ranking_email_info.contribution_sub_type]
email_body = ''
if contributor_ranking_email_info.contribution_type == (
feconf.CONTRIBUTION_TYPE_TRANSLATION):
language = utils.get_supported_audio_language_description(
contributor_ranking_email_info.language_code)
email_body = email_template['email_body_template'] % (
recipient_username,
contributor_ranking_email_info.rank_name,
language,
feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL
)
else:
email_body = email_template['email_body_template'] % (
recipient_username,
contributor_ranking_email_info.rank_name,
feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL
)
_send_email(
contributor_ranking_email_info.contributor_user_id,
feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS,
email_template['email_subject'], email_body,
feconf.NOREPLY_EMAIL_ADDRESS)
|
def send_contributor_rank_achievement_email(
contributor_ranking_email_info: (
suggestion_registry.ContributorMilestoneEmailInfo)) -> None:
"""Sends an email to translation/question submitters and reviewers when
they achieve a new rank.
Args:
contributor_ranking_email_info:
ContributorMilestoneEmailInfo. An object with contributor ranking
email information.
"""
if not feconf.CAN_SEND_EMAILS:
logging.error('This app cannot send emails to users.')
return
recipient_username = user_services.get_username(
contributor_ranking_email_info.contributor_user_id)
can_user_receive_email = user_services.get_email_preferences(
contributor_ranking_email_info.contributor_user_id
).can_receive_email_updates
if not can_user_receive_email:
logging.error('This user can not recieve emails.')
return
email_template = NOTIFICATION_FOR_CONTRIBUTOR_RANKING_ACHIEVEMENT[
contributor_ranking_email_info.contribution_type][
contributor_ranking_email_info.contribution_sub_type]
email_body = ''
if contributor_ranking_email_info.contribution_type == (
feconf.CONTRIBUTION_TYPE_TRANSLATION):
language = utils.get_supported_audio_language_description(
contributor_ranking_email_info.language_code)
email_body = email_template['email_body_template'] % (
recipient_username,
contributor_ranking_email_info.rank_name,
language,
feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL
)
else:
email_body = email_template['email_body_template'] % (
recipient_username,
contributor_ranking_email_info.rank_name,
feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL
)
_send_email(
contributor_ranking_email_info.contributor_user_id,
feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS,
email_template['email_subject'], email_body,
feconf.NOREPLY_EMAIL_ADDRESS)
|
27,086 |
def task_must_have_owners(task: BaseOperator):
if task.owner and not isinstance(task.owner, str):
raise AirflowClusterPolicyViolation(f'''owner should be a string. Current value: {task.owner}''')
if not task.owner or task.owner.lower() == conf.get('operators', 'default_owner'):
raise AirflowClusterPolicyViolation(
f'''Task must have non-None non-default owner. Current value: {task.owner}'''
)
|
def task_must_have_owners(task: BaseOperator):
if task.owner and not isinstance(task.owner, str):
raise AirflowClusterPolicyViolation(f'''owner should be a string. Current value: {task.owner!r}''')
if not task.owner or task.owner.lower() == conf.get('operators', 'default_owner'):
raise AirflowClusterPolicyViolation(
f'''Task must have non-None non-default owner. Current value: {task.owner}'''
)
|
35,160 |
def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,
callback=None, atol=None, callback_type=None):
"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``.
Args:
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): The real or complex
matrix of the linear system with shape ``(n, n)``.
b (cupy.ndarray): Right hand side of the linear system with shape
``(n,)`` or ``(n, 1)``.
x0 (cupy.ndarray): Starting guess for the solution.
tol (float): Tolerance for convergence.
restart (int): Number of iterations between restarts. Larger values
increase iteration cost, but may be necessary for convergence.
maxiter (int): Maximum number of iterations.
M (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Preconditioner for
``A``. The preconditioner should approximate the inverse of ``A``.
callback (function): User-specified function to call on every restart.
It is called as ``callback(arg)``, where ``arg`` is selected by
``callback_type``.
callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution
vector is used as an argument of callback function. if `pr_norm`,
relative (preconditioned) residual norm is used as an arugment.
atol (float): Tolerance for convergence.
Returns:
tuple:
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
the converged solution and ``info`` provides convergence
information.
Reference:
M. Wang, H. Klie, M. Parashar and H. Sudan, "Solving Sparse Linear
Systems on NVIDIA Tesla GPUs", ICCS 2009 (2009).
.. seealso:: :func:`scipy.sparse.linalg.gmres`
"""
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(A.shape))
if A.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype))
n = A.shape[0]
if not (b.shape == (n,) or b.shape == (n, 1)):
raise ValueError('b has incompatible dimensins')
b = b.astype(A.dtype).ravel()
if n == 0:
return cupy.empty_like(b), 0
b_norm = cupy.linalg.norm(b)
if b_norm == 0:
return b, 0
if atol is None:
atol = tol * float(b_norm)
else:
atol = max(float(atol), tol * float(b_norm))
if x0 is None:
x = cupy.zeros((n,), dtype=A.dtype)
else:
if not (x0.shape == (n,) or x0.shape == (n, 1)):
raise ValueError('x0 has incompatible dimensins')
x = x0.astype(A.dtype).ravel()
if maxiter is None:
maxiter = n * 10
if restart is None:
restart = 20
restart = min(restart, n)
if callback_type is None:
callback_type = 'pr_norm'
if callback_type not in ('x', 'pr_norm'):
raise ValueError('Unknow callback_type: {}'.format(callback_type))
if callback is None:
callback_type = None
V = cupy.empty((n, restart), dtype=A.dtype, order='F')
H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')
e = numpy.zeros((restart+1,), dtype=A.dtype)
matvec, psolve = _make_funcs(A, M)
compute_hu = _make_compute_hu(V)
iters = 0
while True:
mx = psolve(x)
r = b - matvec(mx)
r_norm = cublas.nrm2(r)
if callback_type == 'x':
callback(mx)
elif callback_type == 'pr_norm' and iters > 0:
callback(r_norm / b_norm)
if r_norm <= atol or iters >= maxiter:
break
v = r / r_norm
V[:, 0] = v
e[0] = r_norm
# Arnoldi iteration
for j in range(restart):
z = psolve(v)
u = matvec(z)
H[:j+1, j], u = compute_hu(u, j)
cublas.nrm2(u, out=H[j+1, j])
if j+1 < restart:
v = u / H[j+1, j]
V[:, j+1] = v
# Note: The least-square solution to equation Hy = e is computed on CPU
# because it is faster if tha matrix size is small.
ret = scipy.linalg.lstsq(cupy.asnumpy(H), e)
y = cupy.array(ret[0])
x += V @ y
iters += restart
info = 0
if iters == maxiter and not (r_norm <= atol):
info = iters
return mx, info
|
def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,
callback=None, atol=None, callback_type=None):
"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``.
Args:
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): The real or complex
matrix of the linear system with shape ``(n, n)``.
b (cupy.ndarray): Right hand side of the linear system with shape
``(n,)`` or ``(n, 1)``.
x0 (cupy.ndarray): Starting guess for the solution.
tol (float): Tolerance for convergence.
restart (int): Number of iterations between restarts. Larger values
increase iteration cost, but may be necessary for convergence.
maxiter (int): Maximum number of iterations.
M (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Preconditioner for
``A``. The preconditioner should approximate the inverse of ``A``.
callback (function): User-specified function to call on every restart.
It is called as ``callback(arg)``, where ``arg`` is selected by
``callback_type``.
callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution
vector is used as an argument of callback function. if `pr_norm`,
relative (preconditioned) residual norm is used as an arugment.
atol (float): Tolerance for convergence.
Returns:
tuple:
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
the converged solution and ``info`` provides convergence
information.
Reference:
M. Wang, H. Klie, M. Parashar and H. Sudan, "Solving Sparse Linear
Systems on NVIDIA Tesla GPUs", ICCS 2009 (2009).
.. seealso:: :func:`scipy.sparse.linalg.gmres`
"""
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(A.shape))
if A.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype))
n = A.shape[0]
if not (b.shape == (n,) or b.shape == (n, 1)):
raise ValueError('b has incompatible dimensins')
b = b.astype(A.dtype).ravel()
if n == 0:
return cupy.empty_like(b), 0
b_norm = cupy.linalg.norm(b)
if b_norm == 0:
return b, 0
if atol is None:
atol = tol * float(b_norm)
else:
atol = max(float(atol), tol * float(b_norm))
if x0 is None:
x = cupy.zeros((n,), dtype=A.dtype)
else:
if not (x0.shape == (n,) or x0.shape == (n, 1)):
raise ValueError('x0 has incompatible dimensions')
x = x0.astype(A.dtype).ravel()
if maxiter is None:
maxiter = n * 10
if restart is None:
restart = 20
restart = min(restart, n)
if callback_type is None:
callback_type = 'pr_norm'
if callback_type not in ('x', 'pr_norm'):
raise ValueError('Unknow callback_type: {}'.format(callback_type))
if callback is None:
callback_type = None
V = cupy.empty((n, restart), dtype=A.dtype, order='F')
H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')
e = numpy.zeros((restart+1,), dtype=A.dtype)
matvec, psolve = _make_funcs(A, M)
compute_hu = _make_compute_hu(V)
iters = 0
while True:
mx = psolve(x)
r = b - matvec(mx)
r_norm = cublas.nrm2(r)
if callback_type == 'x':
callback(mx)
elif callback_type == 'pr_norm' and iters > 0:
callback(r_norm / b_norm)
if r_norm <= atol or iters >= maxiter:
break
v = r / r_norm
V[:, 0] = v
e[0] = r_norm
# Arnoldi iteration
for j in range(restart):
z = psolve(v)
u = matvec(z)
H[:j+1, j], u = compute_hu(u, j)
cublas.nrm2(u, out=H[j+1, j])
if j+1 < restart:
v = u / H[j+1, j]
V[:, j+1] = v
# Note: The least-square solution to equation Hy = e is computed on CPU
# because it is faster if tha matrix size is small.
ret = scipy.linalg.lstsq(cupy.asnumpy(H), e)
y = cupy.array(ret[0])
x += V @ y
iters += restart
info = 0
if iters == maxiter and not (r_norm <= atol):
info = iters
return mx, info
|
28,591 |
def plot_trace(
data: InferenceData,
var_names: Optional[Sequence[str]] = None,
filter_vars: Optional[str] = None,
transform: Optional[Callable] = None,
coords: Optional[CoordSpec] = None,
divergences: Optional[str] = "auto",
kind: Optional[str] = "trace",
figsize: Optional[Tuple[float, float]] = None,
rug: bool = False,
lines: Optional[List[Tuple[str, CoordSpec, Any]]] = None,
circ_var_names: Optional[List[str]] = None,
circ_var_units: str = "radians",
compact: bool = True,
compact_prop: Optional[Union[str, Mapping[str, Any]]] = None,
combined: bool = False,
chain_prop: Optional[Union[str, Mapping[str, Any]]] = None,
legend: bool = False,
plot_kwargs: Optional[KwargSpec] = None,
fill_kwargs: Optional[KwargSpec] = None,
rug_kwargs: Optional[KwargSpec] = None,
hist_kwargs: Optional[KwargSpec] = None,
trace_kwargs: Optional[KwargSpec] = None,
rank_kwargs: Optional[KwargSpec] = None,
labeller=None,
axes=None,
backend: Optional[str] = None,
backend_config: Optional[KwargSpec] = None,
backend_kwargs: Optional[KwargSpec] = None,
show: Optional[bool] = None,
):
"""Plot distribution (histogram or kernel density estimates) and sampled values or rank plot.
If `divergences` data is available in `sample_stats`, will plot the location of divergences as
dashed vertical lines.
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: str or list of str, optional
One or more variables to be plotted. Prefix the variables by `~` when you want
to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords: dict of {str: slice or array_like}, optional
Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`
divergences: {"bottom", "top", None}, optional
Plot location of divergences on the traceplots.
kind: {"trace", "rank_bar", "rank_vlines"}, optional
Choose between plotting sampled values per iteration and rank plots.
transform: callable, optional
Function to transform data (defaults to None i.e.the identity function)
figsize: tuple of (float, float), optional
If None, size is (12, variables * 2)
rug: bool, optional
If True adds a rugplot of samples. Defaults to False. Ignored for 2D KDE.
Only affects continuous variables.
lines: list of tuple of (str, dict, array_like), optional
List of (var_name, {'coord': selection}, [line, positions]) to be overplotted as
vertical lines on the density and horizontal lines on the trace.
circ_var_names : str or list of str, optional
List of circular variables to account for when plotting KDE.
circ_var_units : str
Whether the variables in `circ_var_names` are in "degrees" or "radians".
compact: bool, optional
Plot multidimensional variables in a single plot.
compact_prop: str or dict {str: array_like}, optional
Tuple containing the property name and the property values to distinguish different
dimensions with compact=True
combined: bool, optional
Flag for combining multiple chains into a single line. If False (default), chains will be
plotted separately.
chain_prop: str or dict {str: array_like}, optional
Tuple containing the property name and the property values to distinguish different chains
legend: bool, optional
Add a legend to the figure with the chain color code.
plot_kwargs, fill_kwargs, rug_kwargs, hist_kwargs: dict, optional
Extra keyword arguments passed to :func:`arviz.plot_dist`. Only affects continuous variables.
trace_kwargs: dict, optional
Extra keyword arguments passed to :meth:`matplotlib.axes.Axes.plot`
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
rank_kwargs : dict, optional
Extra keyword arguments passed to :func:`arviz.plot_rank`
axes: axes, optional
Matplotlib axes or bokeh figures.
backend: {"matplotlib", "bokeh"}, optional
Select plotting backend.
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.
backend_kwargs: dict, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_rank : Plot rank order statistics of chains.
Examples
--------
Plot a subset variables and select them with partial naming
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('non_centered_eight')
>>> coords = {'school': ['Choate', 'Lawrenceville']}
>>> az.plot_trace(data, var_names=('theta'), filter_vars="like", coords=coords)
Show all dimensions of multidimensional variables in the same plot
.. plot::
:context: close-figs
>>> az.plot_trace(data, compact=True)
Display a rank plot instead of trace
.. plot::
:context: close-figs
>>> az.plot_trace(data, var_names=["mu", "tau"], kind="rank_bars")
Combine all chains into one distribution and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_trace(
>>> data, var_names=('^theta'), filter_vars="regex", coords=coords, combined=True
>>> )
Plot reference lines against distribution and trace
.. plot::
:context: close-figs
>>> lines = (('theta_t',{'school': "Choate"}, [-1]),)
>>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, lines=lines)
"""
if kind not in {"trace", "rank_vlines", "rank_bars"}:
raise ValueError("The value of kind must be either trace, rank_vlines or rank_bars.")
if divergences == "auto":
divergences = "top" if rug else "bottom"
if divergences:
try:
divergence_data = convert_to_dataset(data, group="sample_stats").diverging
except (ValueError, AttributeError): # No sample_stats, or no `.diverging`
divergences = None
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if divergences:
divergence_data = get_coords(
divergence_data, {k: v for k, v in coords.items() if k in ("chain", "draw")}
)
else:
divergence_data = False
coords_data = get_coords(convert_to_dataset(data, group="posterior"), coords)
if transform is not None:
coords_data = transform(coords_data)
var_names = _var_names(var_names, coords_data, filter_vars)
if compact:
skip_dims = set(coords_data.dims) - {"chain", "draw"}
else:
skip_dims = set()
plotters = list(
xarray_var_iter(coords_data, var_names=var_names, combined=True, skip_dims=skip_dims)
)
max_plots = rcParams["plot.max_subplots"]
max_plots = len(plotters) if max_plots is None else max(max_plots // 2, 1)
if len(plotters) > max_plots:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of variables to plot ({len_plotters}), generating only {max_plots} "
"plots".format(max_plots=max_plots, len_plotters=len(plotters)),
UserWarning,
)
plotters = plotters[:max_plots]
# TODO: Check if this can be further simplified
trace_plot_args = dict(
# User Kwargs
data=coords_data,
var_names=var_names,
# coords = coords,
divergences=divergences,
kind=kind,
figsize=figsize,
rug=rug,
lines=lines,
circ_var_names=circ_var_names,
circ_var_units=circ_var_units,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
hist_kwargs=hist_kwargs,
trace_kwargs=trace_kwargs,
rank_kwargs=rank_kwargs,
compact=compact,
compact_prop=compact_prop,
combined=combined,
chain_prop=chain_prop,
legend=legend,
labeller=labeller,
# Generated kwargs
divergence_data=divergence_data,
# skip_dims=skip_dims,
plotters=plotters,
axes=axes,
backend_config=backend_config,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
plot = get_plotting_function("plot_trace", "traceplot", backend)
axes = plot(**trace_plot_args)
return axes
|
def plot_trace(
data: InferenceData,
var_names: Optional[Sequence[str]] = None,
filter_vars: Optional[str] = None,
transform: Optional[Callable] = None,
coords: Optional[CoordSpec] = None,
divergences: Optional[str] = "auto",
kind: Optional[str] = "trace",
figsize: Optional[Tuple[float, float]] = None,
rug: bool = False,
lines: Optional[List[Tuple[str, CoordSpec, Any]]] = None,
circ_var_names: Optional[List[str]] = None,
circ_var_units: str = "radians",
compact: bool = True,
compact_prop: Optional[Union[str, Mapping[str, Any]]] = None,
combined: bool = False,
chain_prop: Optional[Union[str, Mapping[str, Any]]] = None,
legend: bool = False,
plot_kwargs: Optional[KwargSpec] = None,
fill_kwargs: Optional[KwargSpec] = None,
rug_kwargs: Optional[KwargSpec] = None,
hist_kwargs: Optional[KwargSpec] = None,
trace_kwargs: Optional[KwargSpec] = None,
rank_kwargs: Optional[KwargSpec] = None,
labeller=None,
axes=None,
backend: Optional[str] = None,
backend_config: Optional[KwargSpec] = None,
backend_kwargs: Optional[KwargSpec] = None,
show: Optional[bool] = None,
):
"""Plot distribution (histogram or kernel density estimates) and sampled values or rank plot.
If `divergences` data is available in `sample_stats`, will plot the location of divergences as
dashed vertical lines.
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: str or list of str, optional
One or more variables to be plotted. Prefix the variables by ``~`` when you want
to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords: dict of {str: slice or array_like}, optional
Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`
divergences: {"bottom", "top", None}, optional
Plot location of divergences on the traceplots.
kind: {"trace", "rank_bar", "rank_vlines"}, optional
Choose between plotting sampled values per iteration and rank plots.
transform: callable, optional
Function to transform data (defaults to None i.e.the identity function)
figsize: tuple of (float, float), optional
If None, size is (12, variables * 2)
rug: bool, optional
If True adds a rugplot of samples. Defaults to False. Ignored for 2D KDE.
Only affects continuous variables.
lines: list of tuple of (str, dict, array_like), optional
List of (var_name, {'coord': selection}, [line, positions]) to be overplotted as
vertical lines on the density and horizontal lines on the trace.
circ_var_names : str or list of str, optional
List of circular variables to account for when plotting KDE.
circ_var_units : str
Whether the variables in `circ_var_names` are in "degrees" or "radians".
compact: bool, optional
Plot multidimensional variables in a single plot.
compact_prop: str or dict {str: array_like}, optional
Tuple containing the property name and the property values to distinguish different
dimensions with compact=True
combined: bool, optional
Flag for combining multiple chains into a single line. If False (default), chains will be
plotted separately.
chain_prop: str or dict {str: array_like}, optional
Tuple containing the property name and the property values to distinguish different chains
legend: bool, optional
Add a legend to the figure with the chain color code.
plot_kwargs, fill_kwargs, rug_kwargs, hist_kwargs: dict, optional
Extra keyword arguments passed to :func:`arviz.plot_dist`. Only affects continuous variables.
trace_kwargs: dict, optional
Extra keyword arguments passed to :meth:`matplotlib.axes.Axes.plot`
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
rank_kwargs : dict, optional
Extra keyword arguments passed to :func:`arviz.plot_rank`
axes: axes, optional
Matplotlib axes or bokeh figures.
backend: {"matplotlib", "bokeh"}, optional
Select plotting backend.
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.
backend_kwargs: dict, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_rank : Plot rank order statistics of chains.
Examples
--------
Plot a subset variables and select them with partial naming
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('non_centered_eight')
>>> coords = {'school': ['Choate', 'Lawrenceville']}
>>> az.plot_trace(data, var_names=('theta'), filter_vars="like", coords=coords)
Show all dimensions of multidimensional variables in the same plot
.. plot::
:context: close-figs
>>> az.plot_trace(data, compact=True)
Display a rank plot instead of trace
.. plot::
:context: close-figs
>>> az.plot_trace(data, var_names=["mu", "tau"], kind="rank_bars")
Combine all chains into one distribution and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_trace(
>>> data, var_names=('^theta'), filter_vars="regex", coords=coords, combined=True
>>> )
Plot reference lines against distribution and trace
.. plot::
:context: close-figs
>>> lines = (('theta_t',{'school': "Choate"}, [-1]),)
>>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, lines=lines)
"""
if kind not in {"trace", "rank_vlines", "rank_bars"}:
raise ValueError("The value of kind must be either trace, rank_vlines or rank_bars.")
if divergences == "auto":
divergences = "top" if rug else "bottom"
if divergences:
try:
divergence_data = convert_to_dataset(data, group="sample_stats").diverging
except (ValueError, AttributeError): # No sample_stats, or no `.diverging`
divergences = None
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if divergences:
divergence_data = get_coords(
divergence_data, {k: v for k, v in coords.items() if k in ("chain", "draw")}
)
else:
divergence_data = False
coords_data = get_coords(convert_to_dataset(data, group="posterior"), coords)
if transform is not None:
coords_data = transform(coords_data)
var_names = _var_names(var_names, coords_data, filter_vars)
if compact:
skip_dims = set(coords_data.dims) - {"chain", "draw"}
else:
skip_dims = set()
plotters = list(
xarray_var_iter(coords_data, var_names=var_names, combined=True, skip_dims=skip_dims)
)
max_plots = rcParams["plot.max_subplots"]
max_plots = len(plotters) if max_plots is None else max(max_plots // 2, 1)
if len(plotters) > max_plots:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of variables to plot ({len_plotters}), generating only {max_plots} "
"plots".format(max_plots=max_plots, len_plotters=len(plotters)),
UserWarning,
)
plotters = plotters[:max_plots]
# TODO: Check if this can be further simplified
trace_plot_args = dict(
# User Kwargs
data=coords_data,
var_names=var_names,
# coords = coords,
divergences=divergences,
kind=kind,
figsize=figsize,
rug=rug,
lines=lines,
circ_var_names=circ_var_names,
circ_var_units=circ_var_units,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
hist_kwargs=hist_kwargs,
trace_kwargs=trace_kwargs,
rank_kwargs=rank_kwargs,
compact=compact,
compact_prop=compact_prop,
combined=combined,
chain_prop=chain_prop,
legend=legend,
labeller=labeller,
# Generated kwargs
divergence_data=divergence_data,
# skip_dims=skip_dims,
plotters=plotters,
axes=axes,
backend_config=backend_config,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
plot = get_plotting_function("plot_trace", "traceplot", backend)
axes = plot(**trace_plot_args)
return axes
|
11,792 |
def export_regions(objs):
"""
Convenience function to convert a sequence of Ginga canvas objects
to a ds9 file containing regions and
return a list of matching .
Parameters
----------
objs : seq of subclasses of `~ginga.canvas.CanvasObject.CanvasObjectBase`
Sequence of Ginga canvas objects compatible with Regions
Returns
-------
regions : `~regions.Regions` object
Returns an astropy regions Regions object
"""
regs = regions.Regions(map(ginga_canvas_object_to_astropy_region, objs))
return regs
|
def export_regions(objs):
"""
Convenience function to convert a sequence of Ginga canvas objects
to a DS9 file containing regions and
return a list of matching .
Parameters
----------
objs : seq of subclasses of `~ginga.canvas.CanvasObject.CanvasObjectBase`
Sequence of Ginga canvas objects compatible with Regions
Returns
-------
regions : `~regions.Regions` object
Returns an astropy regions Regions object
"""
regs = regions.Regions(map(ginga_canvas_object_to_astropy_region, objs))
return regs
|
45,565 |
def ReminderCancelled(name, timestamp=None):
return {
"event": "cancel",
"timestamp": timestamp,
"name": name
}
|
def ReminderCancelled(name, timestamp=None):
return {
"event": "cancel_reminder",
"timestamp": timestamp,
"name": name
}
|
17,460 |
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
for name in list(non_dim_coord_names):
if isinstance(name, str) and " " in name:
warnings.warn(
"coordinate {!r} has a space in its name, which means it "
"cannot be marked as a coordinate on disk and will be "
"saved as a data variable instead".format(name),
SerializationWarning,
stacklevel=6,
)
non_dim_coord_names.discard(name)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
not_technically_coordinates = set()
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (
k not in non_dim_coord_names
and k not in v.dims
and set(target_dims) <= set(v.dims)
):
variable_coordinates[k].add(coord_name)
if any(
attr_name in v.encoding and coord_name in v.encoding.get(attr_name)
for attr_name in CF_RELATED_DATA
):
not_technically_coordinates.add(coord_name)
global_coordinates.discard(coord_name)
variables = {k: v.copy(deep=False) for k, v in variables.items()}
# keep track of variable names written to file under the "coordinates" attributes
written_coords = set()
for name, var in variables.items():
encoding = var.encoding
attrs = var.attrs
if "coordinates" in attrs and "coordinates" in encoding:
raise ValueError(
f"'coordinates' found in both attrs and encoding for variable {name!r}."
)
# if coordinates set to None, don't write coordinates attribute
if "coordinates" in attrs and attrs.get("coordinates") is None:
continue
# this will copy coordinates from encoding to attrs if "coordinates" in attrs
# after the next line, "coordinates" is never in encoding
# we get support for attrs["coordinates"] for free.
coords_str = pop_to(encoding, attrs, "coordinates")
if not coords_str and variable_coordinates[name]:
coordinates_text = " ".join(
str(coord_name)
for coord_name in variable_coordinates[name]
if coord_name not in not_technically_coordinates
)
if coordinates_text:
attrs["coordinates"] = coordinates_text
if "coordinates" in attrs:
written_coords.update(attrs["coordinates"].split())
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xarray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/007571.html
global_coordinates.difference_update(written_coords)
if global_coordinates:
attributes = dict(attributes)
if "coordinates" in attributes:
warnings.warn(
f"cannot serialize global coordinates {global_coordinates!r} because the global "
f"attribute 'coordinates' already exists. This may prevent faithful roundtripping"
f"of xarray datasets",
SerializationWarning,
)
else:
attributes["coordinates"] = " ".join(map(str, global_coordinates))
return variables, attributes
|
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
for name in list(non_dim_coord_names):
if isinstance(name, str) and " " in name:
warnings.warn(
"coordinate {!r} has a space in its name, which means it "
"cannot be marked as a coordinate on disk and will be "
"saved as a data variable instead".format(name),
SerializationWarning,
stacklevel=6,
)
non_dim_coord_names.discard(name)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
not_technically_coordinates = set()
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (
k not in non_dim_coord_names
and k not in v.dims
and set(target_dims) <= set(v.dims)
):
variable_coordinates[k].add(coord_name)
if any(
attr_name in v.encoding and coord_name in v.encoding.get(attr_name)
for attr_name in CF_RELATED_DATA
):
not_technically_coordinates.add(coord_name)
global_coordinates.discard(coord_name)
variables = {k: v.copy(deep=False) for k, v in variables.items()}
# keep track of variable names written to file under the "coordinates" attributes
written_coords = set()
for name, var in variables.items():
encoding = var.encoding
attrs = var.attrs
if "coordinates" in attrs and "coordinates" in encoding:
raise ValueError(
f"'coordinates' found in both attrs and encoding for variable {name!r}."
)
# if coordinates set to None, don't write coordinates attribute
if attrs.get("coordinates") is None or encoding.get("coordinates") is None:
continue
# this will copy coordinates from encoding to attrs if "coordinates" in attrs
# after the next line, "coordinates" is never in encoding
# we get support for attrs["coordinates"] for free.
coords_str = pop_to(encoding, attrs, "coordinates")
if not coords_str and variable_coordinates[name]:
coordinates_text = " ".join(
str(coord_name)
for coord_name in variable_coordinates[name]
if coord_name not in not_technically_coordinates
)
if coordinates_text:
attrs["coordinates"] = coordinates_text
if "coordinates" in attrs:
written_coords.update(attrs["coordinates"].split())
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xarray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/007571.html
global_coordinates.difference_update(written_coords)
if global_coordinates:
attributes = dict(attributes)
if "coordinates" in attributes:
warnings.warn(
f"cannot serialize global coordinates {global_coordinates!r} because the global "
f"attribute 'coordinates' already exists. This may prevent faithful roundtripping"
f"of xarray datasets",
SerializationWarning,
)
else:
attributes["coordinates"] = " ".join(map(str, global_coordinates))
return variables, attributes
|
34,532 |
def _validate_domain(domain_path: Text):
from rasa.shared.core.domain import InvalidDomain
from rasa.shared.core.domain import Domain
try:
Domain.load(domain_path)
except InvalidDomain as e:
cli_utils.print_error_and_exit(
"The provided domain file could not be loaded. " "Error: {}".format(e)
)
|
def _validate_domain(domain_path: Text):
from rasa.shared.core.domain import Domain, InvalidDomain
from rasa.shared.core.domain import Domain
try:
Domain.load(domain_path)
except InvalidDomain as e:
cli_utils.print_error_and_exit(
"The provided domain file could not be loaded. " "Error: {}".format(e)
)
|
32,521 |
def main() -> None:
"""
main function, parses params and runs command functions
"""
params = demisto.params()
args = demisto.args()
command = demisto.command()
api_key = params.get('apikey')
# get the service API url
base_url = urljoin(params.get('url'), '/api/v1')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not params.get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=params.get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = params.get('proxy', False)
# Integration that implements reputation commands (e.g. url, ip, domain,..., etc) must have
# a reliability score of the source providing the intelligence data.
reliability = params.get('integrationReliability', DBotScoreReliability.C)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {command}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, params, first_fetch_timestamp)
return_results(result)
elif command == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
alert_status = params.get('alert_status', None)
alert_type = params.get('alert_type', None)
min_severity = params.get('min_severity', None)
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=params.get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to create
demisto.incidents(incidents)
elif command == 'ip':
default_threshold_ip = int(params.get('threshold_ip', '65'))
return_results(ip_reputation_command(client, args, default_threshold_ip, reliability))
elif command == 'domain':
default_threshold_domain = int(params.get('threshold_domain', '65'))
return_results(domain_reputation_command(client, args, default_threshold_domain, reliability))
elif command == 'helloworld-say-hello':
return_results(say_hello_command(client, args))
elif command == 'helloworld-search-alerts':
return_results(search_alerts_command(client, args))
elif command == 'helloworld-get-alert':
return_results(get_alert_command(client, args))
elif command == 'helloworld-update-alert-status':
return_results(update_alert_status_command(client, args))
elif command == 'helloworld-scan-start':
return_results(scan_start_command(client, args))
elif command == 'helloworld-scan-status':
return_results(scan_status_command(client, args))
elif command == 'helloworld-scan-results':
return_results(scan_results_command(client, args))
else:
raise NotImplementedError(f'Command {command} is not implemented')
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
|
def main() -> None:
"""
main function, parses params and runs command functions
"""
params = demisto.params()
args = demisto.args()
command = demisto.command()
api_key = params.get('apikey')
# get the service API url
base_url = urljoin(params.get('url'), '/api/v1')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not params.get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=params.get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = params.get('proxy', False)
# Integration that implements reputation commands (e.g. url, ip, domain,..., etc) must have
# a reliability score of the source providing the intelligence data.
reliability = params.get('integrationReliability', DBotScoreReliability.C)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {command}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, params, first_fetch_timestamp)
return_results(result)
elif command == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
alert_status = params.get('alert_status', None)
alert_type = params.get('alert_type', None)
min_severity = params.get('min_severity', None)
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=params.get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to create
demisto.incidents(incidents)
elif command == 'ip':
default_threshold_ip = arg_to_number(params.get('threshold_ip', '65'))
return_results(ip_reputation_command(client, args, default_threshold_ip, reliability))
elif command == 'domain':
default_threshold_domain = int(params.get('threshold_domain', '65'))
return_results(domain_reputation_command(client, args, default_threshold_domain, reliability))
elif command == 'helloworld-say-hello':
return_results(say_hello_command(client, args))
elif command == 'helloworld-search-alerts':
return_results(search_alerts_command(client, args))
elif command == 'helloworld-get-alert':
return_results(get_alert_command(client, args))
elif command == 'helloworld-update-alert-status':
return_results(update_alert_status_command(client, args))
elif command == 'helloworld-scan-start':
return_results(scan_start_command(client, args))
elif command == 'helloworld-scan-status':
return_results(scan_status_command(client, args))
elif command == 'helloworld-scan-results':
return_results(scan_results_command(client, args))
else:
raise NotImplementedError(f'Command {command} is not implemented')
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
|
38,240 |
def load_openers(opt) -> Optional[List[str]]:
if opt['task'].startswith('internal:'):
base_task = opt['task']
else:
base_task = opt['task'].split(':')[0]
if base_task == 'self_chat':
# TODO(#2284): Load default openers from s3
return None
print('[ loading conversation openers... ]')
# create dummy task so we can get openers from the data
task_opt = copy.deepcopy(opt)
task_opt['task'] = base_task
# default train will loop forever, but evalmode will stop after one epoch
datatype = task_opt['datatype']
if 'train' in datatype and 'evalmode' not in datatype:
task_opt['datatype'] = f'{datatype}:evalmode'
task_opt['interactive_task'] = False
task_opt['selfchat_task'] = False
task_opt['fixed_response'] = None
task_agent = FixedResponseAgent(task_opt)
task_world = create_task(task_opt, task_agent)
# run through task data, collecting all first messages
openers = set()
is_first_turn = True
while not task_world.epoch_done():
task_world.parley()
msg = task_world.get_acts()[0]
# add only the first message in the episode
if is_first_turn and msg.get('text'):
openers.add(msg['text'])
is_first_turn = msg.get('episode_done', False)
print(f'[ loaded {len(openers)} openers ]')
return list(openers)
|
def load_openers(opt) -> Optional[List[str]]:
if opt['task'].startswith('internal:') or opt['task'].startswith('fb:'):
base_task = opt['task']
else:
base_task = opt['task'].split(':')[0]
if base_task == 'self_chat':
# TODO(#2284): Load default openers from s3
return None
print('[ loading conversation openers... ]')
# create dummy task so we can get openers from the data
task_opt = copy.deepcopy(opt)
task_opt['task'] = base_task
# default train will loop forever, but evalmode will stop after one epoch
datatype = task_opt['datatype']
if 'train' in datatype and 'evalmode' not in datatype:
task_opt['datatype'] = f'{datatype}:evalmode'
task_opt['interactive_task'] = False
task_opt['selfchat_task'] = False
task_opt['fixed_response'] = None
task_agent = FixedResponseAgent(task_opt)
task_world = create_task(task_opt, task_agent)
# run through task data, collecting all first messages
openers = set()
is_first_turn = True
while not task_world.epoch_done():
task_world.parley()
msg = task_world.get_acts()[0]
# add only the first message in the episode
if is_first_turn and msg.get('text'):
openers.add(msg['text'])
is_first_turn = msg.get('episode_done', False)
print(f'[ loaded {len(openers)} openers ]')
return list(openers)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.