id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
2,519 |
def d2_pinball_loss_score(
y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average"
):
"""
:math:`D^2` regression score function, \
fraction of pinball loss deviance explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical median of `y_true`
as constant prediction, disregarding the input features,
gets a :math:`D^2` score of 0.0.
Read more in the :ref:`User Guide <d2_score>`.
.. versionadded:: 1.1
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), optional
Sample weights.
alpha : float, default=0.5
Pinball loss quantile parameter, determines the slope of the pinball_loss.
Equivalent to `d2_absolute_error_score` when `alpha=0.5`.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
Returns
-------
score : float or ndarray of floats
The :math:`D^2` score with a pinball loss deviance
or ndarray of scores if `multioutput='raw_values'`.
Notes
-----
This is not a symmetric function.
Like :math:`R^2`, :math:`D^2` score may be negative
(it need not actually be the square of a quantity D).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
Wainwright. "Statistical Learning with Sparsity: The Lasso and
Generalizations." (2015). https://trevorhastie.github.io
Examples
--------
>>> from sklearn.metrics import d2_pinball_loss_score
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 3, 3]
>>> d2_pinball_loss_score(y_true, y_pred)
0.5
>>> d2_pinball_loss_score(y_true, y_pred, alpha=0.9)
0.772...
>>> d2_pinball_loss_score(y_true, y_pred, alpha=0.1)
-1.045...
>>> d2_pinball_loss_score(y_true, y_true, alpha=0.1)
1.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = "D^2 score is not well-defined with less than two samples."
warnings.warn(msg, UndefinedMetricWarning)
return float("nan")
numerator = mean_pinball_loss(
y_true,
y_pred,
sample_weight=sample_weight,
alpha=alpha,
multioutput="raw_values",
)
if sample_weight is None:
y_quantile = [np.percentile(y_true, q=alpha * 100, axis=0)] * len(y_true)
else:
sample_weight = _check_sample_weight(sample_weight, y_true)
y_quantile = [
_weighted_percentile(
y_true, sample_weight=sample_weight, percentile=alpha * 100
)
] * len(y_true)
denominator = mean_pinball_loss(
y_true,
y_quantile,
sample_weight=sample_weight,
alpha=alpha,
multioutput="raw_values",
)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0
if isinstance(multioutput, str):
if multioutput == "raw_values":
# return scores individually
return output_scores
elif multioutput == "uniform_average":
# passing None as weights to np.average results in uniform mean
avg_weights = None
else:
raise ValueError(
"multioutput is expected to be 'raw_values' "
"or 'uniform_average' but we got %r"
" instead." % multioutput
)
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
|
def d2_pinball_loss_score(
y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average"
):
"""
:math:`D^2` regression score function, \
fraction of pinball loss deviance explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical alpha-quantile of `y_true`
as constant prediction, disregarding the input features,
gets a :math:`D^2` score of 0.0.
Read more in the :ref:`User Guide <d2_score>`.
.. versionadded:: 1.1
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), optional
Sample weights.
alpha : float, default=0.5
Pinball loss quantile parameter, determines the slope of the pinball_loss.
Equivalent to `d2_absolute_error_score` when `alpha=0.5`.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
Returns
-------
score : float or ndarray of floats
The :math:`D^2` score with a pinball loss deviance
or ndarray of scores if `multioutput='raw_values'`.
Notes
-----
This is not a symmetric function.
Like :math:`R^2`, :math:`D^2` score may be negative
(it need not actually be the square of a quantity D).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
Wainwright. "Statistical Learning with Sparsity: The Lasso and
Generalizations." (2015). https://trevorhastie.github.io
Examples
--------
>>> from sklearn.metrics import d2_pinball_loss_score
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 3, 3]
>>> d2_pinball_loss_score(y_true, y_pred)
0.5
>>> d2_pinball_loss_score(y_true, y_pred, alpha=0.9)
0.772...
>>> d2_pinball_loss_score(y_true, y_pred, alpha=0.1)
-1.045...
>>> d2_pinball_loss_score(y_true, y_true, alpha=0.1)
1.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = "D^2 score is not well-defined with less than two samples."
warnings.warn(msg, UndefinedMetricWarning)
return float("nan")
numerator = mean_pinball_loss(
y_true,
y_pred,
sample_weight=sample_weight,
alpha=alpha,
multioutput="raw_values",
)
if sample_weight is None:
y_quantile = [np.percentile(y_true, q=alpha * 100, axis=0)] * len(y_true)
else:
sample_weight = _check_sample_weight(sample_weight, y_true)
y_quantile = [
_weighted_percentile(
y_true, sample_weight=sample_weight, percentile=alpha * 100
)
] * len(y_true)
denominator = mean_pinball_loss(
y_true,
y_quantile,
sample_weight=sample_weight,
alpha=alpha,
multioutput="raw_values",
)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0
if isinstance(multioutput, str):
if multioutput == "raw_values":
# return scores individually
return output_scores
elif multioutput == "uniform_average":
# passing None as weights to np.average results in uniform mean
avg_weights = None
else:
raise ValueError(
"multioutput is expected to be 'raw_values' "
"or 'uniform_average' but we got %r"
" instead." % multioutput
)
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
|
50,255 |
def generate_attributes_search_document_value(
assigned_attributes: "QuerySet",
):
"""Prepare `search_document` value for assigned attributes.
Method should received assigned attributes with prefetched `values`
and `assignment__attribute`.
"""
attribute_data = ""
for assigned_attribute in assigned_attributes:
attribute = assigned_attribute.assignment.attribute
input_type = attribute.input_type
values = assigned_attribute.values.all()
values_list = []
if input_type in [AttributeInputType.DROPDOWN, AttributeInputType.MULTISELECT]:
values_list = [value.name for value in values]
elif input_type == AttributeInputType.RICH_TEXT:
values_list = [
clean_editor_js(value.rich_text, to_string=True) for value in values
]
elif input_type == AttributeInputType.NUMERIC:
unit = attribute.unit
if unit:
values_list = [value.name + unit for value in values]
else:
values_list = [value.name for value in values]
elif input_type in [AttributeInputType.DATE, AttributeInputType.DATE_TIME]:
values_list = [value.date_time.isoformat() for value in values]
if values_list:
values_data = "\n".join(values_list)
attribute_data += values_data + "\n"
return attribute_data.lower()
|
def generate_attributes_search_document_value(
assigned_attributes: "QuerySet",
):
"""Prepare `search_document` value for assigned attributes.
Method should received assigned attributes with prefetched `values`
and `assignment__attribute`.
"""
attribute_data = ""
for assigned_attribute in assigned_attributes:
attribute = assigned_attribute.assignment.attribute
input_type = attribute.input_type
values = assigned_attribute.values.all()
values_list = []
if input_type in [AttributeInputType.DROPDOWN, AttributeInputType.MULTISELECT]:
values_list = [value.name for value in values]
elif input_type == AttributeInputType.RICH_TEXT:
values_list = [
clean_editor_js(value.rich_text, to_string=True) for value in values
]
elif input_type == AttributeInputType.NUMERIC:
unit = attribute.unit or ""
values_list = [value.name + unit for value in values]
elif input_type in [AttributeInputType.DATE, AttributeInputType.DATE_TIME]:
values_list = [value.date_time.isoformat() for value in values]
if values_list:
values_data = "\n".join(values_list)
attribute_data += values_data + "\n"
return attribute_data.lower()
|
39,704 |
def main():
module = ForemanOperatingsystemModule(
foreman_spec=dict(
name=dict(required=True),
release_name=dict(),
description=dict(),
os_family=dict(choices=OS_LIST, flat_name='family', aliases=['family']),
major=dict(),
minor=dict(),
architectures=dict(type='entity_list'),
media=dict(type='entity_list', flat_name='medium_ids', resource_type='media'),
ptables=dict(type='entity_list'),
provisioning_templates=dict(type='entity_list'),
password_hash=dict(choices=['MD5', 'SHA256', 'SHA512', 'Base64', 'Base64-Windows']),
# parameters is already in the HostMixin, but the flat_name detection does not work for operatingsystems
parameters=dict(type='list', elements='dict', options=parameter_ansible_spec, flat_name='os_parameters_attributes'),
),
argument_spec=dict(
state=dict(default='present', choices=['present', 'present_with_defaults', 'absent']),
updated_name=dict(),
),
required_if=[
['state', 'present', ['name', 'major', 'os_family']],
['state', 'present_with_defaults', ['name', 'major', 'os_family']],
],
required_one_of=[
['description', 'name'],
['description', 'major'],
],
)
module_params = module.foreman_params
with module.api_connection():
# Try to find the Operating System to work on
# name is however not unique, but description is, as well as "<name> <major>[.<minor>]"
entity = None
# If we have a description, search for it
if 'description' in module_params and module_params['description'] != '':
search_string = 'description="{0}"'.format(module_params['description'])
entity = module.find_resource('operatingsystems', search_string, failsafe=True)
# If we did not yet find a unique OS, search by name & version
# In case of state == absent, those information might be missing, we assume that we did not find an operatingsytem to delete then
if entity is None and 'name' in module_params and 'major' in module_params:
search_string = ','.join('{0}="{1}"'.format(key, module_params[key]) for key in ('name', 'major', 'minor') if key in module_params)
entity = module.find_resource('operatingsystems', search_string, failsafe=True)
if not entity and (module.state == 'present' or module.state == 'present_with_defaults'):
# we actually attempt to create a new one...
for param_name in ['major', 'os_family', 'password_hash']:
if param_name not in module_params.keys():
module.fail_json(msg='{0} is a required parameter to create a new operating system.'.format(param_name))
module.set_entity('entity', entity)
module.run()
|
def main():
module = ForemanOperatingsystemModule(
foreman_spec=dict(
name=dict(required=True),
release_name=dict(),
description=dict(),
os_family=dict(choices=OS_LIST, flat_name='family', aliases=['family']),
major=dict(),
minor=dict(),
architectures=dict(type='entity_list'),
media=dict(type='entity_list', flat_name='medium_ids', resource_type='media'),
ptables=dict(type='entity_list'),
provisioning_templates=dict(type='entity_list'),
password_hash=dict(choices=['MD5', 'SHA256', 'SHA512', 'Base64', 'Base64-Windows']),
# parameters is already in the ParametersMixin, but the flat_name detection does not work for operatingsystems
parameters=dict(type='list', elements='dict', options=parameter_ansible_spec, flat_name='os_parameters_attributes'),
),
argument_spec=dict(
state=dict(default='present', choices=['present', 'present_with_defaults', 'absent']),
updated_name=dict(),
),
required_if=[
['state', 'present', ['name', 'major', 'os_family']],
['state', 'present_with_defaults', ['name', 'major', 'os_family']],
],
required_one_of=[
['description', 'name'],
['description', 'major'],
],
)
module_params = module.foreman_params
with module.api_connection():
# Try to find the Operating System to work on
# name is however not unique, but description is, as well as "<name> <major>[.<minor>]"
entity = None
# If we have a description, search for it
if 'description' in module_params and module_params['description'] != '':
search_string = 'description="{0}"'.format(module_params['description'])
entity = module.find_resource('operatingsystems', search_string, failsafe=True)
# If we did not yet find a unique OS, search by name & version
# In case of state == absent, those information might be missing, we assume that we did not find an operatingsytem to delete then
if entity is None and 'name' in module_params and 'major' in module_params:
search_string = ','.join('{0}="{1}"'.format(key, module_params[key]) for key in ('name', 'major', 'minor') if key in module_params)
entity = module.find_resource('operatingsystems', search_string, failsafe=True)
if not entity and (module.state == 'present' or module.state == 'present_with_defaults'):
# we actually attempt to create a new one...
for param_name in ['major', 'os_family', 'password_hash']:
if param_name not in module_params.keys():
module.fail_json(msg='{0} is a required parameter to create a new operating system.'.format(param_name))
module.set_entity('entity', entity)
module.run()
|
32,440 |
def compare_two_dicts(left: dict, right: dict):
"""Compares two dictionaries and returns the differences in values."""
differences: list[tuple] = []
for left_k, left_v in left.items():
right_v = right.get(left_k)
if right.get(left_k) != left_v:
differences.append((f"{left_k} - {left_v}", right_v))
return differences
|
def compare_two_dicts(left: dict, right: dict):
"""Compares two dictionaries and returns the differences in values."""
differences: list[tuple] = []
for left_k, left_v in left.items():
right_v = right.get(left_k)
if right_v != left_v:
differences.append((f"{left_k} - {left_v}", right_v))
return differences
|
38,517 |
def test_half_space_interior_point_star_shaped_2d():
# Find interior point in star-shaped, but not convex domain
n = np.array(
[[0, 0, 1, 0, -1, 0, 1], [1, 1, 0, 1, 0, -1, 0], [0, 0, 0, 0, 0, 0, 0]]
)
x0 = np.array(
[
[0, 0, 2.0 / 3.0, 0, 1, 0, 0],
[1.0 / 3.0, 1.0 / 3.0, 0, 0, 0, 2.0 / 3.0, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
pts = np.array([[0, 1], [0, 2.0 / 3.0], [0, 0]])
pt = half_space.half_space_interior_point(n, x0, pts).reshape((-1, 1))
# Verify that the computed point is on the same side of the all normal
# vectors as a point known to be in the interior.
known_pt = np.array([5.0 / 6.0, 1.0 / 2.0, 0.0]).reshape((-1, 1))
assert np.all(
np.sign(np.sum(n * (pt - x0), axis=0))
== np.sign(np.sum(n * (known_pt - x0), axis=0))
)
|
def test_half_space_interior_point_star_shaped_2d():
# Find interior point in star-shaped, but not convex domain
n = np.array(
[[0, 0, 1, 0, -1, 0, 1], [1, 1, 0, 1, 0, -1, 0], [0, 0, 0, 0, 0, 0, 0]]
)
x0 = np.array(
[
[0, 0, 2.0 / 3.0, 0, 1, 0, 0],
[1.0 / 3.0, 1.0 / 3.0, 0, 0, 0, 2.0 / 3.0, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
pts = np.array([[0, 1], [0, 2.0 / 3.0], [0, 0]])
pt = half_space.half_space_interior_point(n, x0, pts).reshape((-1, 1))
# Verify that the computed point is on the same side of all the normal
# vectors as a point known to be in the interior.
known_pt = np.array([5.0 / 6.0, 1.0 / 2.0, 0.0]).reshape((-1, 1))
assert np.all(
np.sign(np.sum(n * (pt - x0), axis=0))
== np.sign(np.sum(n * (known_pt - x0), axis=0))
)
|
26,294 |
def _get_core_msg_lines(installed, latest) -> Tuple[List[List[str]], str]:
installed_s = installed.to_version_string(skip_matcher=True)
installed_line = ["installed", installed_s, ""]
update_info = ""
if latest is None:
update_info = (
"The latest version of dbt could not be determined!\n"
"Make sure that the following URL is accessible:\n"
f"{PYPI_VERSION_URL}"
)
return [installed_line], update_info
latest_s = latest.to_version_string(skip_matcher=True)
latest_line = ["latest", latest_s, green("Up to date!")]
if installed > latest:
latest_line[2] = green("Ahead of latest version!")
elif installed < latest:
latest_line[2] = yellow("Update available!")
update_info = (
"Your version of dbt is out of date! "
"You can find instructions for upgrading here:\n"
"https://docs.getdbt.com/docs/installation"
)
return [
installed_line,
latest_line,
], update_info
|
def _get_core_msg_lines(installed, latest) -> Tuple[List[List[str]], str]:
installed_s = installed.to_version_string(skip_matcher=True)
installed_line = ["installed", installed_s, ""]
update_info = ""
if latest is None:
update_info = (
"The latest version of dbt could not be determined!\n"
"Make sure that the following URL is accessible:\n"
f"{PYPI_VERSION_URL}"
)
return [installed_line], update_info
latest_s = latest.to_version_string(skip_matcher=True)
latest_line = ["latest", latest_s, green("Up to date!")]
if installed > latest:
latest_line[2] = green("Ahead of latest version!")
elif installed < latest:
latest_line[2] = yellow("Update available!")
update_info = (
"Your version of dbt-core is out of date! "
"You can find instructions for upgrading here:\n"
"https://docs.getdbt.com/docs/installation"
)
return [
installed_line,
latest_line,
], update_info
|
12,216 |
def find_index_cache() -> Tuple[str]:
files = []
for pkgs_dir in find_pkgs_dirs():
# caches are directories in pkgs_dir
path = join(pkgs_dir, "cache")
if isdir(path):
files.append(path)
return files
|
def find_index_cache() -> List[str]:
files = []
for pkgs_dir in find_pkgs_dirs():
# caches are directories in pkgs_dir
path = join(pkgs_dir, "cache")
if isdir(path):
files.append(path)
return files
|
48,495 |
def get_toolbar_el(driver: WebDriver, model: Plot) -> WebElement:
script = FIND_VIEW_SCRIPT + """
const id = arguments[0]
const {ToolbarPanelView} = Bokeh.require("models/annotations/toolbar_panel")
function* fn(view) {
for (const rv of view.renderer_views.values()) {
if (rv instanceof ToolbarPanelView) {
yield rv._toolbar_view.el
break
}
}
}
return head(find(views(), id, fn)) ?? null
"""
el = driver.execute_script(script, model.id)
if el is not None:
return el
else:
raise RuntimeError(f"can't resolve a view for {model}")
|
def get_toolbar_el(driver: WebDriver, model: Plot) -> WebElement:
script = FIND_VIEW_SCRIPT + """
const id = arguments[0]
const {ToolbarPanelView} = Bokeh.require("models/annotations/toolbar_panel")
function* fn(view) {
for (const rv of view.renderer_views.values()) {
if (rv instanceof ToolbarPanelView) {
yield rv._toolbar_view.el
break
}
}
}
return head(find(views(), id, fn)) ?? null
"""
el = driver.execute_script(script, model.id)
if el is None:
raise RuntimeError(f"can't resolve a view for {model}")
return el
|
2,387 |
def test_y_std_with_multitarget_normalized():
"""
Regression test for issues #17394 and #18065.
Check if GPR can compute y_std in predict() method when normalize_y==True
in multi-target regression.
"""
X_train = np.random.rand((11, 10))
# 6 target features -> multi-target
y_train = np.random.rand((11, 6))
X_test = np.random.rand((4, 10))
# Generic kernel
kernel = kernels.ConstantKernel(1.0, (1e-1, 1e3))
kernel *= kernels.RBF(10.0, (1e-3, 1e3))
# normalize_y == True
model = GaussianProcessRegressor(kernel=kernel,
n_restarts_optimizer=10,
alpha=0.1,
normalize_y=True)
model.fit(X_train, y_train)
y_pred, std = model.predict(X_test, return_std=True)
|
def test_y_std_with_multitarget_normalized():
"""
Regression test for issues #17394 and #18065.
Check if GPR can compute y_std in predict() method when normalize_y==True
in multi-target regression.
"""
X_train = np.random.rand((11, 10))
# 6 target features -> multi-target
y_train = np.random.rand((11, 6))
X_test = np.random.rand((4, 10))
# Generic kernel
kernel = kernels.ConstantKernel(1.0, (1e-1, 1e3))
kernel *= kernels.RBF(10.0, (1e-3, 1e3))
# normalize_y == True
model = GaussianProcessRegressor(kernel=kernel,
n_restarts_optimizer=10,
alpha=0.1,
normalize_y=True)
model.fit(X_train, y_train)
y_pred, y_std = model.predict(X_test, return_std=True)
|
20,838 |
def BrowserManager(
command_queue, status_queue, browser_params, manager_params, crash_recovery
):
"""
The BrowserManager function runs in each new browser process.
It is responsible for listening to command instructions from
the Task Manager and passing them to the command module to execute
and interface with Selenium. Command execution status is sent back
to the TaskManager.
"""
logger = logging.getLogger("openwpm")
try:
# Start Xvfb (if necessary), webdriver, and browser
driver, prof_folder, browser_settings = deploy_browser.deploy_browser(
status_queue, browser_params, manager_params, crash_recovery
)
if prof_folder[-1] != "/":
prof_folder += "/"
# Read the extension port -- if extension is enabled
# TODO: Initial communication from extension to TM should use sockets
if (
browser_params["browser"] == "firefox"
and browser_params["extension_enabled"]
):
logger.debug(
"BROWSER %i: Looking for extension port information "
"in %s" % (browser_params["browser_id"], prof_folder)
)
elapsed = 0
port = None
ep_filename = os.path.join(prof_folder, "extension_port.txt")
while elapsed < 5:
try:
with open(ep_filename, "rt") as f:
port = int(f.read().strip())
break
except IOError as e:
if e.errno != errno.ENOENT:
raise
time.sleep(0.1)
elapsed += 0.1
if port is None:
# try one last time, allowing all exceptions to propagate
with open(ep_filename, "rt") as f:
port = int(f.read().strip())
logger.debug(
"BROWSER %i: Connecting to extension on port %i"
% (browser_params["browser_id"], port)
)
extension_socket = clientsocket(serialization="json")
extension_socket.connect("127.0.0.1", int(port))
else:
extension_socket = None
logger.debug("BROWSER %i: BrowserManager ready." % browser_params["browser_id"])
# passes the profile folder back to the
# TaskManager to signal a successful startup
status_queue.put(("STATUS", "Browser Ready", (prof_folder, "READY")))
browser_params["profile_path"] = prof_folder
# starts accepting arguments until told to die
while True:
# no command for now -> sleep to avoid pegging CPU on blocking get
if command_queue.empty():
time.sleep(0.001)
continue
command = command_queue.get()
if type(command) is ShutdownCommand:
# Geckodriver creates a copy of the profile (and the original
# temp file created by FirefoxProfile() is deleted).
# We clear the profile attribute here to prevent prints from:
# https://github.com/SeleniumHQ/selenium/blob/4e4160dd3d2f93757cafb87e2a1c20d6266f5554/py/selenium/webdriver/firefox/webdriver.py#L193-L199
if driver.profile and not os.path.isdir(driver.profile.path):
driver.profile = None
driver.quit()
status_queue.put("OK")
return
logger.info(
"BROWSER %i: EXECUTING COMMAND: %s"
% (browser_params["browser_id"], str(command))
)
# attempts to perform an action and return an OK signal
# if command fails for whatever reason, tell the TaskManager to
# kill and restart its worker processes
try:
command_executor.execute_command(
command,
driver,
browser_settings,
browser_params,
manager_params,
extension_socket,
)
status_queue.put("OK")
except WebDriverException:
status_obj = Status()
# We handle WebDriverExceptions separately here because they
# are quite common, and we often still have a handle to the
# browser, allowing us to run the SHUTDOWN command.
string_tb = traceback.format_exception(*sys.exc_info())
if "about:neterror" in string_tb[-1]:
status_obj.set_name("NETERROR")
status_queue.put(pickle.dumps(status_obj))
continue
extra = parse_traceback_for_sentry(string_tb)
extra["exception"] = string_tb[-1]
logger.error(
"BROWSER %i: WebDriverException while executing command"
% browser_params["browser_id"],
exc_info=True,
extra=extra,
)
status_obj.set_name("FAILED")
status_obj.tb = sys.exc_info()
status_queue.put(pickle.dumps(status_obj))
except (ProfileLoadError, BrowserConfigError, AssertionError) as e:
status_obj = Status()
logger.error(
"BROWSER %i: %s thrown, informing parent and raising"
% (browser_params["browser_id"], e.__class__.__name__)
)
status_obj.set_name("CRITICAL")
status_obj.tb = sys.exc_info()
status_queue.put(pickle.dumps(status_obj))
return
except Exception:
status_obj = Status()
string_tb = traceback.format_exception(*sys.exc_info())
extra = parse_traceback_for_sentry(string_tb)
extra["exception"] = tb[-1]
logger.error(
"BROWSER %i: Crash in driver, restarting browser manager"
% browser_params["browser_id"],
exc_info=True,
extra=extra,
)
status_obj.set_name("FAILED")
status_obj.tb = sys.exc_info()
status_queue.put(pickle.dumps(status_obj))
return
|
def BrowserManager(
command_queue, status_queue, browser_params, manager_params, crash_recovery
):
"""
The BrowserManager function runs in each new browser process.
It is responsible for listening to command instructions from
the Task Manager and passing them to the command module to execute
and interface with Selenium. Command execution status is sent back
to the TaskManager.
"""
logger = logging.getLogger("openwpm")
try:
# Start Xvfb (if necessary), webdriver, and browser
driver, prof_folder, browser_settings = deploy_browser.deploy_browser(
status_queue, browser_params, manager_params, crash_recovery
)
if prof_folder[-1] != "/":
prof_folder += "/"
# Read the extension port -- if extension is enabled
# TODO: Initial communication from extension to TM should use sockets
if (
browser_params["browser"] == "firefox"
and browser_params["extension_enabled"]
):
logger.debug(
"BROWSER %i: Looking for extension port information "
"in %s" % (browser_params["browser_id"], prof_folder)
)
elapsed = 0
port = None
ep_filename = os.path.join(prof_folder, "extension_port.txt")
while elapsed < 5:
try:
with open(ep_filename, "rt") as f:
port = int(f.read().strip())
break
except IOError as e:
if e.errno != errno.ENOENT:
raise
time.sleep(0.1)
elapsed += 0.1
if port is None:
# try one last time, allowing all exceptions to propagate
with open(ep_filename, "rt") as f:
port = int(f.read().strip())
logger.debug(
"BROWSER %i: Connecting to extension on port %i"
% (browser_params["browser_id"], port)
)
extension_socket = clientsocket(serialization="json")
extension_socket.connect("127.0.0.1", int(port))
else:
extension_socket = None
logger.debug("BROWSER %i: BrowserManager ready." % browser_params["browser_id"])
# passes the profile folder back to the
# TaskManager to signal a successful startup
status_queue.put(("STATUS", "Browser Ready", (prof_folder, "READY")))
browser_params["profile_path"] = prof_folder
# starts accepting arguments until told to die
while True:
# no command for now -> sleep to avoid pegging CPU on blocking get
if command_queue.empty():
time.sleep(0.001)
continue
command = command_queue.get()
if type(command) is ShutdownCommand:
# Geckodriver creates a copy of the profile (and the original
# temp file created by FirefoxProfile() is deleted).
# We clear the profile attribute here to prevent prints from:
# https://github.com/SeleniumHQ/selenium/blob/4e4160dd3d2f93757cafb87e2a1c20d6266f5554/py/selenium/webdriver/firefox/webdriver.py#L193-L199
if driver.profile and not os.path.isdir(driver.profile.path):
driver.profile = None
driver.quit()
status_queue.put("OK")
return
logger.info(
"BROWSER %i: EXECUTING COMMAND: %s"
% (browser_params["browser_id"], str(command))
)
# attempts to perform an action and return an OK signal
# if command fails for whatever reason, tell the TaskManager to
# kill and restart its worker processes
try:
command_executor.execute_command(
command,
driver,
browser_settings,
browser_params,
manager_params,
extension_socket,
)
status_queue.put("OK")
except WebDriverException:
status_obj = Status()
# We handle WebDriverExceptions separately here because they
# are quite common, and we often still have a handle to the
# browser, allowing us to run the SHUTDOWN command.
string_tb = traceback.format_exception(*sys.exc_info())
if "about:neterror" in string_tb[-1]:
status_obj.set_name("NETERROR")
status_queue.put(pickle.dumps(status_obj))
continue
extra = parse_traceback_for_sentry(string_tb)
extra["exception"] = string_tb[-1]
logger.error(
"BROWSER %i: WebDriverException while executing command"
% browser_params["browser_id"],
exc_info=True,
extra=extra,
)
status_obj.set_name("FAILED")
status_obj.tb = sys.exc_info()
status_queue.put(pickle.dumps(status_obj))
except (ProfileLoadError, BrowserConfigError, AssertionError) as e:
status_obj = Status()
logger.error(
"BROWSER %i: %s thrown, informing parent and raising"
% (browser_params["browser_id"], e.__class__.__name__)
)
status_obj.set_name("CRITICAL")
status_obj.tb = sys.exc_info()
status_queue.put(pickle.dumps(status_obj))
return
except Exception:
status_obj = Status()
string_tb = traceback.format_exception(*sys.exc_info())
extra = parse_traceback_for_sentry(string_tb)
extra["exception"] = tb[-1]
logger.error(
"BROWSER %i: Crash in driver, restarting browser manager"
% browser_params["browser_id"],
exc_info=True,
extra=extra,
)
status_obj.set_name("FAILED")
(status_obj.error_class,status_obj.error_value, status_obj.tb) = sys.exc_info()
status_queue.put(pickle.dumps(status_obj))
return
|
55,613 |
def bool_converter(s):
"""Convert variable to boolean
:param s: a variable
:return: the return of the builtin bool() function except if the variable is equal to a str
representation of the boolean value
"""
answer = bool(s)
if isinstance(s, str):
if s in ('False', 'false', '0'):
answer = False
return answer
|
def bool_converter(s):
"""Return the same as built-in function bool() except for arguments which are
string representations of a boolean value.
:param s: a variable
:return: the return of the builtin bool() function except if the variable is equal to a str
representation of the boolean value
"""
answer = bool(s)
if isinstance(s, str):
if s in ('False', 'false', '0'):
answer = False
return answer
|
25,935 |
def validate_key_import_type(ns):
# Default value of kty is: RSA
kty = getattr(ns, 'kty', None)
crv = getattr(ns, 'curve', None)
if (kty == 'EC' and crv is None) or (kty != 'EC' and crv):
from azure.cli.core.azclierror import ValidationError
raise ValidationError('parameter --curve should be specified when key type is EC.')
|
def validate_key_import_type(ns):
# Default value of kty is: RSA
kty = getattr(ns, 'kty', None)
crv = getattr(ns, 'curve', None)
if (kty == 'EC' and crv is None) or (kty != 'EC' and crv):
from azure.cli.core.azclierror import ValidationError
raise ValidationError('parameter --curve should be specified when key type --kty is EC.')
|
27,287 |
def memoize(fn):
"""
Cache function calls and keep results until all of the arguments are alive.
"""
memo = WeakCache()
@functools.wraps(fn)
def helper(*args):
try:
result = memo[args]
except KeyError:
result = fn(*args)
memo[args] = result
return result
return helper
|
def memoize(fn):
"""
Cache function calls and keep results until all of the arguments are no longer alive.
"""
memo = WeakCache()
@functools.wraps(fn)
def helper(*args):
try:
result = memo[args]
except KeyError:
result = fn(*args)
memo[args] = result
return result
return helper
|
34,305 |
def add_confused_intents_to_report(
report: Dict, target_intents: Iterable[Any], predicted_intents: Iterable[Any]
) -> Dict:
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
cnf_matrix = confusion_matrix(target_intents, predicted_intents)
indices = np.argsort(cnf_matrix, axis=1)
labels = unique_labels(target_intents, predicted_intents)
n_candidates = min(3, len(labels))
for label in labels:
if report.get(label):
report[label]["confused_with"] = {}
for i, label in enumerate(labels):
for j in range(n_candidates):
label_idx = indices[i, -j]
_label = labels[label_idx]
num_hits = int(cnf_matrix[i, label_idx])
if _label != label and num_hits > 0:
report[label]["confused_with"][_label] = num_hits
return report
|
def add_confused_intents_to_report(
report: Dict, target_intents: Iterable[Any], predicted_intents: Iterable[Any]
) -> Dict:
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
cnf_matrix = confusion_matrix(target_intents, predicted_intents)
indices = np.argsort(cnf_matrix, axis=1)
labels = unique_labels(target_intents, predicted_intents)
n_candidates = min(3, len(labels))
for label in labels:
if report.get(label):
report[label]["confused_with"] = {}
for i, label in enumerate(labels):
for j in range(n_candidates):
label_idx = indices[i, -j]
label_of_confused_with = labels[label_idx]
num_hits = int(cnf_matrix[i, label_idx])
if _label != label and num_hits > 0:
report[label]["confused_with"][_label] = num_hits
return report
|
46,411 |
def multi_ts_support(func):
"""
This decorator further adapts the metrics that took as input two univariate/multivariate `TimeSeries` instances,
adding support for equally-sized array of `TimeSeries` instances. The decorator computes the pairwise metric for
`TimeSeries` with the same indices, and returns a float value that is computed as a function of all the
pairwise metrics using a `inter_reduction` subroutine passed as argument to the metric function.
If a 'Sequence[TimeSeries]' is passed as input, this decorator provides also parallelisation of the metric
evaluation regarding different `TimeSeries` (if the `n_jobs` parameter is not set 1).
"""
@wraps(func)
def wrapper_multi_ts_support(*args, **kwargs):
actual_series = kwargs['actual_series'] if 'actual_series' in kwargs else args[0]
pred_series = kwargs['pred_series'] if 'pred_series' in kwargs else args[0] if 'actual_series' in kwargs \
else args[1]
n_jobs = kwargs.pop('n_jobs', 1)
verbose = kwargs.pop('verbose', False)
raise_if_not(isinstance(n_jobs, int), "n_jobs must be an integer")
raise_if_not(isinstance(verbose, bool), "verbose must be a bool")
actual_series = [actual_series] if not isinstance(actual_series, Sequence) else actual_series
pred_series = [pred_series] if not isinstance(pred_series, Sequence) else pred_series
raise_if_not(len(actual_series) == len(pred_series),
"The two TimeSeries arrays must have the same length.", logger)
num_series_in_args = int('actual_series' not in kwargs) + int('pred_series' not in kwargs)
kwargs.pop('actual_series', 0)
kwargs.pop('pred_series', 0)
iterator = _build_tqdm_iterator(iterable=zip(actual_series, pred_series),
verbose=verbose,
total=len(actual_series))
value_list = _parallel_apply(iterator=iterator,
fn=func,
n_jobs=n_jobs,
fn_args=args[num_series_in_args:],
fn_kwargs=kwargs)
if 'inter_reduction' in kwargs:
return kwargs['inter_reduction'](value_list)
else:
return signature(func).parameters['inter_reduction'].default(value_list)
return wrapper_multi_ts_support
|
def multi_ts_support(func):
"""
This decorator further adapts the metrics that took as input two univariate/multivariate `TimeSeries` instances,
adding support for equally-sized sequence of `TimeSeries` instances. The decorator computes the pairwise metric for
`TimeSeries` with the same indices, and returns a float value that is computed as a function of all the
pairwise metrics using a `inter_reduction` subroutine passed as argument to the metric function.
If a 'Sequence[TimeSeries]' is passed as input, this decorator provides also parallelisation of the metric
evaluation regarding different `TimeSeries` (if the `n_jobs` parameter is not set 1).
"""
@wraps(func)
def wrapper_multi_ts_support(*args, **kwargs):
actual_series = kwargs['actual_series'] if 'actual_series' in kwargs else args[0]
pred_series = kwargs['pred_series'] if 'pred_series' in kwargs else args[0] if 'actual_series' in kwargs \
else args[1]
n_jobs = kwargs.pop('n_jobs', 1)
verbose = kwargs.pop('verbose', False)
raise_if_not(isinstance(n_jobs, int), "n_jobs must be an integer")
raise_if_not(isinstance(verbose, bool), "verbose must be a bool")
actual_series = [actual_series] if not isinstance(actual_series, Sequence) else actual_series
pred_series = [pred_series] if not isinstance(pred_series, Sequence) else pred_series
raise_if_not(len(actual_series) == len(pred_series),
"The two TimeSeries arrays must have the same length.", logger)
num_series_in_args = int('actual_series' not in kwargs) + int('pred_series' not in kwargs)
kwargs.pop('actual_series', 0)
kwargs.pop('pred_series', 0)
iterator = _build_tqdm_iterator(iterable=zip(actual_series, pred_series),
verbose=verbose,
total=len(actual_series))
value_list = _parallel_apply(iterator=iterator,
fn=func,
n_jobs=n_jobs,
fn_args=args[num_series_in_args:],
fn_kwargs=kwargs)
if 'inter_reduction' in kwargs:
return kwargs['inter_reduction'](value_list)
else:
return signature(func).parameters['inter_reduction'].default(value_list)
return wrapper_multi_ts_support
|
31,945 |
def extract_using_tarfile(file_path: str, dir_path: str, file_name: str):
file_list = []
tar_type = 'r:'
if '.tar.gz' in file_name:
tar_type = 'r:gz'
tar = tarfile.open(file_path, tar_type)
tar.extractall(dir_path)
for root, _, files in os.walk(dir_path):
for file_ in files:
file_path = os.path.join(root, file_)
file_list.append(file_path)
return file_list
|
def extract_using_tarfile(file_path: str, dir_path: str, file_name: str) -> str:
file_list = []
tar_type = 'r:'
if '.tar.gz' in file_name:
tar_type = 'r:gz'
tar = tarfile.open(file_path, tar_type)
tar.extractall(dir_path)
for root, _, files in os.walk(dir_path):
for file_ in files:
file_path = os.path.join(root, file_)
file_list.append(file_path)
return file_list
|
54,964 |
def tensor_wrapper(obj):
"""Decorator that wraps existing NumPy functions so that they return
a PennyLane :class:`~.tensor`.
Only if the decorated function returns an ``ndarray`` is the
output converted to a :class:`~.tensor`; this avoids superfluous conversion
of scalars and other native-Python types.
"""
@functools.wraps(obj)
def _wrapped(*args, **kwargs):
"""Wrapped NumPy function"""
tensor_kwargs = {}
if "requires_grad" in kwargs:
tensor_kwargs["requires_grad"] = kwargs.pop("requires_grad")
else:
tensor_args = list(extract_tensors(args))
if tensor_args:
# Unless the user specifies otherwise, if all tensors in the argument
# list are non-trainable, the output is also non-trainable.
# NOTE: Use of Python's ``all`` results in an infinite recursion,
# and I'm not sure why. Using ``np.all`` works fine.
tensor_kwargs["requires_grad"] = not _np.all(
[not i.requires_grad for i in tensor_args]
)
# evaluate the original object
res = obj(*args, **kwargs)
if isinstance(res, _np.ndarray):
# only if the output of the object is a ndarray,
# then convert to a PennyLane tensor
res = tensor(res, **tensor_kwargs)
return res
return _wrapped
|
def tensor_wrapper(obj):
"""Loop through an object's symbol table,
a PennyLane :class:`~.tensor`.
Only if the decorated function returns an ``ndarray`` is the
output converted to a :class:`~.tensor`; this avoids superfluous conversion
of scalars and other native-Python types.
"""
@functools.wraps(obj)
def _wrapped(*args, **kwargs):
"""Wrapped NumPy function"""
tensor_kwargs = {}
if "requires_grad" in kwargs:
tensor_kwargs["requires_grad"] = kwargs.pop("requires_grad")
else:
tensor_args = list(extract_tensors(args))
if tensor_args:
# Unless the user specifies otherwise, if all tensors in the argument
# list are non-trainable, the output is also non-trainable.
# NOTE: Use of Python's ``all`` results in an infinite recursion,
# and I'm not sure why. Using ``np.all`` works fine.
tensor_kwargs["requires_grad"] = not _np.all(
[not i.requires_grad for i in tensor_args]
)
# evaluate the original object
res = obj(*args, **kwargs)
if isinstance(res, _np.ndarray):
# only if the output of the object is a ndarray,
# then convert to a PennyLane tensor
res = tensor(res, **tensor_kwargs)
return res
return _wrapped
|
19,990 |
def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Inputs:
img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss)
thresh_value = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = img.shape
totalpx = float(height * width)
# Minimum and maximum square size based upon 12 MP image
minarea = 1000. / 12000000. * totalpx
maxarea = 8000000. / 12000000. * totalpx
# Create gray image for further processing
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold == "otsu":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold == "normal":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold == "adaptgauss":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Threshold ' + str(threshold) + ' is not "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
edges = skimage.feature.canny(threshold)
edges.dtype = 'uint8'
# Compute contours to find the squares of the card
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and minarea < marea[index] < maxarea:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.15 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or 5:
msquarecoords.append(approx)
else: # It's not square
msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
'Area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(locarea)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card
# isolate area measurements
filtered_area = df['Area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers
# Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Extract the starting coordinate
start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['X'].max() - df['X'].min()) / 3
spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
spacingx_long = (df['X'].max() - df['X'].min()) / 5
spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
return df, start_coord, spacing
|
def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Inputs:
rgb_img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss)
thresh_value = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = img.shape
totalpx = float(height * width)
# Minimum and maximum square size based upon 12 MP image
minarea = 1000. / 12000000. * totalpx
maxarea = 8000000. / 12000000. * totalpx
# Create gray image for further processing
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold == "otsu":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold == "normal":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold == "adaptgauss":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Threshold ' + str(threshold) + ' is not "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
edges = skimage.feature.canny(threshold)
edges.dtype = 'uint8'
# Compute contours to find the squares of the card
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and minarea < marea[index] < maxarea:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.15 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or 5:
msquarecoords.append(approx)
else: # It's not square
msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
'Area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(locarea)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card
# isolate area measurements
filtered_area = df['Area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers
# Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Extract the starting coordinate
start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['X'].max() - df['X'].min()) / 3
spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
spacingx_long = (df['X'].max() - df['X'].min()) / 5
spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
return df, start_coord, spacing
|
5,401 |
def path_exists(path, saltenv="base"):
"""
Return ``True`` if a path exists in the state tree, ``False`` otherwise. The path
could refer to a file or directory.
:param str path: The fully qualified path to a file or directory in the state tree.
:param str saltenv: The fileserver environment to search. Default: ``base``
CLI Example:
.. code-block:: bash
salt '*' slsutil.file_exists nginx/defaults.yaml
"""
return file_exists(path, saltenv) or dir_exists(path, saltenv)
|
def path_exists(path, saltenv="base"):
"""
Return ``True`` if a path exists in the state tree, ``False`` otherwise. The path
could refer to a file or directory.
:param str path: The fully qualified path to a file or directory in the state tree.
:param str saltenv: The fileserver environment to search. Default: ``base``
CLI Example:
.. code-block:: bash
salt '*' slsutil.path_exists nginx/defaults.yaml
"""
return file_exists(path, saltenv) or dir_exists(path, saltenv)
|
30,298 |
def send_email(to, subject, body="", bcc=None, cc=None, replyTo=None, htmlBody=None,
attachIDs="", attachCIDs="", attachNames="", from_mailbox=None, manualAttachObj=None):
account = get_account(from_mailbox or ACCOUNT_EMAIL)
bcc = bcc.split(",") if bcc else None
cc = cc.split(",") if cc else None
to = to.split(",") if to else None
manualAttachObj = manualAttachObj if manualAttachObj is not None else []
subject = subject[:252] + '...' if len(subject) > 255 else subject
file_entries_for_attachments = [] # type: list
file_entries_for_attachments_inline = []
attachments_names = [] # type: list
attachments_names_inline = []
if attachIDs:
file_entries_for_attachments = attachIDs.split(",")
if attachNames:
attachments_names = attachNames.split(",")
else:
for att_id in file_entries_for_attachments:
att_name = demisto.getFilePath(att_id)['name']
if isinstance(att_name, list):
att_name = att_name[0]
attachments_names.append(att_name)
if len(file_entries_for_attachments) != len(attachments_names):
raise Exception("attachIDs and attachNames lists should be the same length")
if attachCIDs:
file_entries_for_attachments_inline = attachCIDs.split(",")
for att_id_inline in file_entries_for_attachments_inline:
att_name_inline = demisto.getFilePath(att_id_inline)['name']
if isinstance(att_name_inline, list):
att_name_inline = att_name_inline[0]
attachments_names_inline.append(att_name_inline)
attachments = collect_manual_attachments(manualAttachObj)
for i in range(0, len(file_entries_for_attachments)):
entry_id = file_entries_for_attachments[i]
attachment_name = attachments_names[i]
try:
res = demisto.getFilePath(entry_id)
except Exception as ex:
raise Exception("entry {} does not contain a file: {}".format(entry_id, str(ex)))
file_path = res["path"]
with open(file_path, 'rb') as f:
attachments.append(FileAttachment(content=f.read(), name=attachment_name))
for i in range(0, len(file_entries_for_attachments_inline)):
entry_id = file_entries_for_attachments_inline[i]
attachment_name_inline = attachments_names_inline[i]
try:
res = demisto.getFilePath(entry_id)
except Exception as ex:
raise Exception("entry %s does not contain a file" % entry_id)
file_path = res["path"]
with open(file_path, 'rb') as f:
attachments.append(FileAttachment(content=f.read(), name=attachment_name_inline, is_inline=True,
content_id=attachment_name_inline))
send_email_to_mailbox(account, to, subject, body, bcc, cc, replyTo, htmlBody, attachments)
result_object = {
'from': account.primary_smtp_address,
'to': to,
'subject': subject,
'attachments': attachments_names
}
return {
'Type': entryTypes['note'],
'Contents': result_object,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Sent email', result_object),
}
|
def send_email(to, subject, body="", bcc=None, cc=None, replyTo=None, htmlBody=None,
attachIDs="", attachCIDs="", attachNames="", from_mailbox=None, manualAttachObj=None):
account = get_account(from_mailbox or ACCOUNT_EMAIL)
bcc = bcc.split(",") if bcc else None
cc = cc.split(",") if cc else None
to = to.split(",") if to else None
manualAttachObj = manualAttachObj if manualAttachObj is not None else []
subject = subject[:252] + '...' if len(subject) > 255 else subject
file_entries_for_attachments = [] # type: list
file_entries_for_attachments_inline = []
attachments_names = [] # type: list
attachments_names_inline = []
if attachIDs:
file_entries_for_attachments = attachIDs.split(",")
if attachNames:
attachments_names = attachNames.split(",")
else:
for att_id in file_entries_for_attachments:
att_name = demisto.getFilePath(att_id)['name']
if isinstance(att_name, list):
att_name = att_name[0]
attachments_names.append(att_name)
if len(file_entries_for_attachments) != len(attachments_names):
raise Exception("attachIDs and attachNames lists should be the same length")
if attachCIDs:
file_entries_for_attachments_inline = attachCIDs.split(",")
for att_id_inline in file_entries_for_attachments_inline:
att_name_inline = demisto.getFilePath(att_id_inline)['name']
if isinstance(att_name_inline, list):
att_name_inline = att_name_inline[0]
attachments_names_inline.append(att_name_inline)
attachments = collect_manual_attachments(manualAttachObj)
for i in range(0, len(file_entries_for_attachments)):
entry_id = file_entries_for_attachments[i]
attachment_name = attachments_names[i]
try:
res = demisto.getFilePath(entry_id)
except Exception as ex:
raise Exception("entry {} does not contain a file: {}".format(entry_id, str(ex)))
file_path = res["path"]
with open(file_path, 'rb') as f:
attachments.append(FileAttachment(content=f.read(), name=attachment_name))
for entry_id, attachment_name_inline in file_entries_for_attachments_inline:
entry_id = file_entries_for_attachments_inline[i]
attachment_name_inline = attachments_names_inline[i]
try:
res = demisto.getFilePath(entry_id)
except Exception as ex:
raise Exception("entry %s does not contain a file" % entry_id)
file_path = res["path"]
with open(file_path, 'rb') as f:
attachments.append(FileAttachment(content=f.read(), name=attachment_name_inline, is_inline=True,
content_id=attachment_name_inline))
send_email_to_mailbox(account, to, subject, body, bcc, cc, replyTo, htmlBody, attachments)
result_object = {
'from': account.primary_smtp_address,
'to': to,
'subject': subject,
'attachments': attachments_names
}
return {
'Type': entryTypes['note'],
'Contents': result_object,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Sent email', result_object),
}
|
32,224 |
def target_filter(rule, target):
"""
Args:
rule (dict): A rule from the panorama instance.
target (num): A serial number to filter the rule on
Returns:
if the rule contains the target return True else False.
"""
target_entry = rule.get('target', {}).get('devices', {}).get('entry')
if not isinstance(target_entry, list):
target_entry = [target_entry]
for entry in target_entry:
if entry.get('@name') == target:
return True
return False
|
def target_filter(rule, target):
"""
Args:
rule (dict): A rule from the panorama instance.
target (int/string): A serial number to filter the rule on
Returns:
if the rule contains the target return True else False.
"""
target_entry = rule.get('target', {}).get('devices', {}).get('entry')
if not isinstance(target_entry, list):
target_entry = [target_entry]
for entry in target_entry:
if entry.get('@name') == target:
return True
return False
|
39,024 |
def test_str_limited_good():
max_length = 5
class Model(BaseModel):
v: StrLimited[max_length]
m = Model(v='abcde')
assert m.v == 'abcde'
|
def test_str_limited_good():
max_length = 5
class Model(BaseModel):
v: StrLimited[max_length]
m = Model(v='abcde ')
assert m.v == 'abcde'
|
42,107 |
def upgrade():
bind = op.get_bind()
sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True)
# MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE <tbl_name>
# ADD COLUMN <col_name> ... DEFAULT "FINITE_OR_NAN"', but seemingly Alembic
# does not support such a SQL statement. So first add a column with schema-level
# default value setting, then remove it by `batch_op.alter_column()`.
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.add_column(
sa.Column(
"intermediate_value_type",
sa.Enum("FINITE", "INF_POS", "INF_NEG", "NAN", name="floattypeenum"),
nullable=False,
server_default="FINITE",
),
)
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.alter_column("intermediate_value_type", server_default=None)
session = orm.Session(bind=bind)
try:
records = session.query(IntermediateValueModel).all()
mapping = []
for r in records:
value: float
if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf(
r.intermediate_value
):
value = np.inf
elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf(
r.intermediate_value
):
value = -np.inf
elif np.isnan(r.intermediate_value):
value = np.nan
else:
value = r.intermediate_value
(
sanitized_value,
float_type,
) = IntermediateValueModel._intermediate_value_to_stored_repr(value)
mapping.append(
{
"trial_intermediate_value_id": r.trial_intermediate_value_id,
"intermediate_value_type": float_type,
"intermediate_value": sanitized_value,
}
)
session.bulk_update_mappings(IntermediateValueModel, mapping)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
|
def upgrade():
bind = op.get_bind()
sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True)
# MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE <tbl_name>
# ADD COLUMN <col_name> ... DEFAULT "FINITE_OR_NAN"', but seemingly Alembic
# does not support such a SQL statement. So first add a column with schema-level
# default value setting, then remove it by `batch_op.alter_column()`.
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.add_column(
sa.Column(
"intermediate_value_type",
sa.Enum("FINITE", "INF_POS", "INF_NEG", "NAN", name="floattypeenum"),
nullable=False,
server_default="FINITE",
),
)
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.alter_column("intermediate_value_type", server_default=None)
session = orm.Session(bind=bind)
try:
records = session.query(IntermediateValueModel).all()
mapping = []
for r in records:
value: float
if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf(
r.intermediate_value
):
value = np.inf
elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf(
r.intermediate_value
):
value = -np.inf
elif np.isnan(r.intermediate_value):
value = float("nan")
else:
value = r.intermediate_value
(
sanitized_value,
float_type,
) = IntermediateValueModel._intermediate_value_to_stored_repr(value)
mapping.append(
{
"trial_intermediate_value_id": r.trial_intermediate_value_id,
"intermediate_value_type": float_type,
"intermediate_value": sanitized_value,
}
)
session.bulk_update_mappings(IntermediateValueModel, mapping)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
|
33,036 |
def remove_duplicates(items: List[T]) -> List[T]:
"Return list with any repeats removed"
# We use a dictionary to simulate an ordered set
return list({x: None for x in items})
|
def remove_duplicates(items: List[T]) -> List[T]:
"Return list with any repetitions removed"
# We use a dictionary to simulate an ordered set
return list({x: None for x in items})
|
1,200 |
def read(csa_str):
''' Read CSA header from string `csa_str`
Parameters
----------
csa_str : str
byte string containing CSA header information
Returns
-------
header : dict
header information as dict, where `header` has fields (at least)
``type, n_tags, tags``. ``header['tags']`` is also a dictionary
with one key, value pair for each tag in the header.
'''
csa_len = len(csa_str)
csa_dict = {'tags': {}}
hdr_id = csa_str[:4]
up_str = Unpacker(csa_str, endian='<')
if hdr_id == b'SV10': # CSA2
hdr_type = 2
up_str.ptr = 4 # omit the SV10
csa_dict['unused0'] = up_str.read(4)
else: # CSA1
hdr_type = 1
csa_dict['type'] = hdr_type
csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I')
if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS:
raise CSAReadError('Number of tags `t` should be '
'0 < t <= %s' % MAX_CSA_ITEMS)
for tag_no in range(csa_dict['n_tags']):
name, vm, vr, syngodt, n_items, last3 = \
up_str.unpack('64si4s3i')
vr = nt_str(vr)
name = nt_str(name)
tag = {'n_items': n_items,
'vm': vm, # value multiplicity
'vr': vr, # value representation
'syngodt': syngodt,
'last3': last3,
'tag_no': tag_no}
if vm == 0:
n_values = n_items
else:
n_values = vm
# data converter
converter = _CONVERTERS.get(vr)
# CSA1 specific length modifier
if tag_no == 1:
tag0_n_items = n_items
if n_items > MAX_CSA_ITEMS:
raise CSAReadError('Expected <= {0} tags, got {1}'.format(
MAX_CSA_ITEMS, n_items))
items = []
for item_no in range(n_items):
x0, x1, x2, x3 = up_str.unpack('4i')
ptr = up_str.ptr
if hdr_type == 1: # CSA1 - odd length calculation
item_len = x0 - tag0_n_items
if item_len < 0 or (ptr + item_len) > csa_len:
if item_no < vm:
items.append('')
break
else: # CSA2
item_len = x1
if (ptr + item_len) > csa_len:
raise CSAReadError('Item is too long, '
'aborting read')
if item_no >= n_values:
assert item_len == 0
continue
item = nt_str(up_str.read(item_len))
if converter:
# we may have fewer real items than are given in
# n_items, but we don't know how many - assume that
# we've reached the end when we hit an empty item
if item_len == 0:
n_values = item_no
continue
item = converter(item)
items.append(item)
# go to 4 byte boundary
plus4 = item_len % 4
if plus4 != 0:
up_str.ptr += (4 - plus4)
tag['items'] = items
csa_dict['tags'][name] = tag
return csa_dict
|
def read(csa_str):
''' Read CSA header from string `csa_str`
Parameters
----------
csa_str : str
byte string containing CSA header information
Returns
-------
header : dict
header information as dict, where `header` has fields (at least)
``type, n_tags, tags``. ``header['tags']`` is also a dictionary
with one key, value pair for each tag in the header.
'''
csa_len = len(csa_str)
csa_dict = {'tags': {}}
hdr_id = csa_str[:4]
up_str = Unpacker(csa_str, endian='<')
if hdr_id == b'SV10': # CSA2
hdr_type = 2
up_str.ptr = 4 # omit the SV10
csa_dict['unused0'] = up_str.read(4)
else: # CSA1
hdr_type = 1
csa_dict['type'] = hdr_type
csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I')
if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS:
raise CSAReadError('Number of tags `t` should be '
'0 < t <= %d' % MAX_CSA_ITEMS)
for tag_no in range(csa_dict['n_tags']):
name, vm, vr, syngodt, n_items, last3 = \
up_str.unpack('64si4s3i')
vr = nt_str(vr)
name = nt_str(name)
tag = {'n_items': n_items,
'vm': vm, # value multiplicity
'vr': vr, # value representation
'syngodt': syngodt,
'last3': last3,
'tag_no': tag_no}
if vm == 0:
n_values = n_items
else:
n_values = vm
# data converter
converter = _CONVERTERS.get(vr)
# CSA1 specific length modifier
if tag_no == 1:
tag0_n_items = n_items
if n_items > MAX_CSA_ITEMS:
raise CSAReadError('Expected <= {0} tags, got {1}'.format(
MAX_CSA_ITEMS, n_items))
items = []
for item_no in range(n_items):
x0, x1, x2, x3 = up_str.unpack('4i')
ptr = up_str.ptr
if hdr_type == 1: # CSA1 - odd length calculation
item_len = x0 - tag0_n_items
if item_len < 0 or (ptr + item_len) > csa_len:
if item_no < vm:
items.append('')
break
else: # CSA2
item_len = x1
if (ptr + item_len) > csa_len:
raise CSAReadError('Item is too long, '
'aborting read')
if item_no >= n_values:
assert item_len == 0
continue
item = nt_str(up_str.read(item_len))
if converter:
# we may have fewer real items than are given in
# n_items, but we don't know how many - assume that
# we've reached the end when we hit an empty item
if item_len == 0:
n_values = item_no
continue
item = converter(item)
items.append(item)
# go to 4 byte boundary
plus4 = item_len % 4
if plus4 != 0:
up_str.ptr += (4 - plus4)
tag['items'] = items
csa_dict['tags'][name] = tag
return csa_dict
|
58,318 |
def draw(
G: nx.Graph,
pos: Dict[Hashable, np.ndarray],
lines_func: Callable,
color_by: Hashable = None,
node_color_by: Hashable = None,
lw_by: Hashable = None,
alpha_by: Hashable = None,
ax=None,
encodings_kwargs: Dict = {},
**linefunc_kwargs,
):
"""Draw edges to matplotlib axes.
## Parameters
- `G`: A NetworkX graph.
- `pos`: A dictionary mapping for x,y coordinates of a node.
- `lines_func`: One of the line drawing functions from `nxviz.lines`
- `color_by`: Categorical or quantitative edge attribute key to
color edges by. There are two special value for this parameter
when using directed graphs: "source_node_color" and
"target_node_color". It should be noted that if these values are
set, `node_color_by` also needs to be set.
- `node_color_by`: Node metadata attribute key that has been used to
color nodes.
- `lw_by`: Quantitative edge attribute key to determine line width.
- `alpha_by`: Quantitative edge attribute key to determine transparency.
- `ax`: Matplotlib axes object to plot onto.
- `encodings_kwargs`: A dictionary of kwargs
to determine the visual properties of the edge.
- `linefunc_kwargs`: All other keyword arguments passed in
will be passed onto the appropriate linefunc.
Special keyword arguments for `encodings_kwargs` include:
- `lw_scale`: A scaling factor for all edges' line widths.
Equivalent to multiplying all line widths by this number.
- `alpha_scale`: A scaling factor for all edges' line transparencies.
Equivalent to multiplying all alphas by this number.
The default transparency is 0.1,
so an alpha_scale of any number greater than or equal to 10
will result in 100% opaque lines.
Everything else passed in here will be passed
to the matplotlib Patch constructor;
see `nxviz.lines` for more information.
"""
nt = node_table(G)
et = edge_table(G)
if ax is None:
ax = plt.gca()
if color_by in ("source_node_color", "target_node_color"):
if not isinstance(G, nx.DiGraph):
raise ValueError(
"Special values of `color_by`," " can only be set for directed graphs"
)
elif not node_color_by:
raise ValueError(
"When setting `color_by` to special values,"
"`node_color_by` also needs to be set."
)
edge_color = edge_colors(et, nt, color_by, node_color_by)
encodings_kwargs = deepcopy(encodings_kwargs)
lw = line_width(et, lw_by) * encodings_kwargs.pop("lw_scale", 1.0)
alpha = transparency(et, alpha_by) * encodings_kwargs.pop("alpha_scale", 1.0)
aes_kw = {"facecolor": "none"}
aes_kw.update(encodings_kwargs)
patches = lines_func(
et,
pos,
edge_color=edge_color,
alpha=alpha,
lw=lw,
aes_kw=aes_kw,
**linefunc_kwargs,
)
for patch in patches:
ax.add_patch(patch)
|
def draw(
G: nx.Graph,
pos: Dict[Hashable, np.ndarray],
lines_func: Callable,
color_by: Hashable = None,
node_color_by: Hashable = None,
lw_by: Hashable = None,
alpha_by: Hashable = None,
ax=None,
encodings_kwargs: Dict = {},
**linefunc_kwargs,
):
"""Draw edges to matplotlib axes.
## Parameters
- `G`: A NetworkX graph.
- `pos`: A dictionary mapping for x,y coordinates of a node.
- `lines_func`: One of the line drawing functions from `nxviz.lines`
- `color_by`: Categorical or quantitative edge attribute key to color edges by.
There are two special value for this parameter
when using directed graphs:
"source_node_color" and "target_node_color".
If these values are set, then `node_color_by` also needs to be set.
- `node_color_by`: Node metadata attribute key
that has been used to color nodes.
- `lw_by`: Quantitative edge attribute key to determine line width.
- `alpha_by`: Quantitative edge attribute key to determine transparency.
- `ax`: Matplotlib axes object to plot onto.
- `encodings_kwargs`: A dictionary of kwargs
to determine the visual properties of the edge.
- `linefunc_kwargs`: All other keyword arguments passed in
will be passed onto the appropriate linefunc.
Special keyword arguments for `encodings_kwargs` include:
- `lw_scale`: A scaling factor for all edges' line widths.
Equivalent to multiplying all line widths by this number.
- `alpha_scale`: A scaling factor for all edges' line transparencies.
Equivalent to multiplying all alphas by this number.
The default transparency is 0.1,
so an alpha_scale of any number greater than or equal to 10
will result in 100% opaque lines.
Everything else passed in here will be passed
to the matplotlib Patch constructor;
see `nxviz.lines` for more information.
"""
nt = node_table(G)
et = edge_table(G)
if ax is None:
ax = plt.gca()
if color_by in ("source_node_color", "target_node_color"):
if not isinstance(G, nx.DiGraph):
raise ValueError(
"Special values of `color_by`," " can only be set for directed graphs"
)
elif not node_color_by:
raise ValueError(
"When setting `color_by` to special values,"
"`node_color_by` also needs to be set."
)
edge_color = edge_colors(et, nt, color_by, node_color_by)
encodings_kwargs = deepcopy(encodings_kwargs)
lw = line_width(et, lw_by) * encodings_kwargs.pop("lw_scale", 1.0)
alpha = transparency(et, alpha_by) * encodings_kwargs.pop("alpha_scale", 1.0)
aes_kw = {"facecolor": "none"}
aes_kw.update(encodings_kwargs)
patches = lines_func(
et,
pos,
edge_color=edge_color,
alpha=alpha,
lw=lw,
aes_kw=aes_kw,
**linefunc_kwargs,
)
for patch in patches:
ax.add_patch(patch)
|
28,026 |
def __register_export(parser):
"""
Add argparser subcommand for the "export run by run name action"
"""
parser.add_argument('-n', '--name',
type=str,
dest="name",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
required=True,
help="Name of the run where findings are to "
"be exporter")
|
def __register_export(parser):
"""
Add argparser subcommand for the "export run by run name action"
"""
parser.add_argument('-n', '--name',
type=str,
dest="name",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
required=True,
help="Name of the analysis run.")
# __add_filtering_arguments(group, DEFAULT_FILTER_VALUES)
|
57,601 |
def fetch_production(zone_key='AM', session=None, target_datetime=None, logger=None):
r = session or requests.session()
url = 'http://epso.am/poweren.htm'
response = r.get(url)
response.encoding = 'utf-8'
html_doc = response.text
soup = BeautifulSoup(html_doc, 'html.parser')
data_string = soup.find(text=re.compile('var'))
data_split = data_string.split('\r\n')
gas_tes = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", data_split[10])
gas_total = float(gas_tes[0])
hydro_ges = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", data_split[11])
hydro_altern = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", data_split[8])
hydro_total = float(hydro_ges[0])+float(hydro_altern[0])
nuclear_atom = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", data_split[9])
nuclear_total = float(nuclear_atom[0])
time_string = dparser.parse(data_split[14].split()[3],fuzzy=True)
date_time = time_string.replace(tzinfo=tz.gettz('Asia/Yerevan'))
#Operating solar, wind and biomass plants exist in small numbers, but are not reported yet
data = {
'zoneKey': zone_key,
'datetime': date_time,
'production': {
'gas': gas_total,
'hydro': hydro_total,
'nuclear': nuclear_total,
'coal': 0,
'geothermal': 0,
'oil': 0
},
'storage': {
'hydro storage': 0,
'battery storage': 0
},
'source': 'http://epso.am/poweren.htm'
}
return data
|
def fetch_production(zone_key='AM', session=None, target_datetime=None, logger=None):
r = session or requests.session()
url = 'http://epso.am/poweren.htm'
response = r.get(url)
response.encoding = 'utf-8'
html_doc = response.text
soup = BeautifulSoup(html_doc, 'html.parser')
data_string = soup.find(text=re.compile('var'))
data_split = data_string.split('\r\n')
gas_tes = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", data_split[10])
gas_total = float(gas_tes[0])
hydro_ges = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", data_split[11])
hydro_altern = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", data_split[8])
hydro_total = float(hydro_ges[0])+float(hydro_altern[0])
nuclear_atom = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", data_split[9])
nuclear_total = float(nuclear_atom[0])
yerevan = tz.gettz('Asia/Yerevan')
date_time = dparser.parse(data_split[14].split()[3], default=datetime.now(yerevan), fuzzy=True)
date_time = time_string.replace(tzinfo=tz.gettz('Asia/Yerevan'))
#Operating solar, wind and biomass plants exist in small numbers, but are not reported yet
data = {
'zoneKey': zone_key,
'datetime': date_time,
'production': {
'gas': gas_total,
'hydro': hydro_total,
'nuclear': nuclear_total,
'coal': 0,
'geothermal': 0,
'oil': 0
},
'storage': {
'hydro storage': 0,
'battery storage': 0
},
'source': 'http://epso.am/poweren.htm'
}
return data
|
44,860 |
def test_spark_udf(spark, model_path):
mlflow.pyfunc.save_model(
path=model_path,
loader_module=__name__,
code_path=[os.path.dirname(tests.__file__)],
)
with mock.patch("mlflow.pyfunc._warn_dependency_requirement_mismatches") as mock_check_fn:
reloaded_pyfunc_model = mlflow.pyfunc.load_model(model_path)
mock_check_fn.assert_called_once()
pandas_df = pd.DataFrame(data=np.ones((10, 10)), columns=[str(i) for i in range(10)])
spark_df = spark.createDataFrame(pandas_df)
# Test all supported return types
type_map = {
"float": (FloatType(), np.number),
"int": (IntegerType(), np.int32),
"double": (DoubleType(), np.number),
"long": (LongType(), int),
"string": (StringType(), None),
"bool": (BooleanType(), bool),
}
for tname, tdef in type_map.items():
spark_type, np_type = tdef
prediction_df = reloaded_pyfunc_model.predict(pandas_df)
for is_array in [True, False]:
t = ArrayType(spark_type) if is_array else spark_type
if tname == "string":
expected = prediction_df.applymap(str)
else:
expected = prediction_df.select_dtypes(np_type)
if tname == "float":
expected = expected.astype(np.float32)
if tname == "bool":
expected = expected.astype(bool)
expected = [list(row[1]) if is_array else row[1][0] for row in expected.iterrows()]
pyfunc_udf = spark_udf(spark, model_path, result_type=t)
new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
actual = list(new_df.select("prediction").toPandas()["prediction"])
assert expected == actual
if not is_array:
pyfunc_udf = spark_udf(spark, model_path, result_type=tname)
new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
actual = list(new_df.select("prediction").toPandas()["prediction"])
assert expected == actual
|
def test_spark_udf(spark, model_path):
mlflow.pyfunc.save_model(
path=model_path,
loader_module=__name__,
code_path=[os.path.dirname(tests.__file__)],
)
with mock.patch("mlflow.pyfunc._warn_dependency_requirement_mismatches") as mock_check_fn:
reloaded_pyfunc_model = mlflow.pyfunc.load_model(model_path)
mock_check_fn.assert_called_once()
pandas_df = pd.DataFrame(data=np.ones((10, 10)), columns=[str(i) for i in range(10)])
spark_df = spark.createDataFrame(pandas_df)
# Test all supported return types
type_map = {
"float": (FloatType(), np.number),
"int": (IntegerType(), np.int32),
"double": (DoubleType(), np.number),
"long": (LongType(), int),
"string": (StringType(), None),
"boolean": (BooleanType(), bool),
}
for tname, tdef in type_map.items():
spark_type, np_type = tdef
prediction_df = reloaded_pyfunc_model.predict(pandas_df)
for is_array in [True, False]:
t = ArrayType(spark_type) if is_array else spark_type
if tname == "string":
expected = prediction_df.applymap(str)
else:
expected = prediction_df.select_dtypes(np_type)
if tname == "float":
expected = expected.astype(np.float32)
if tname == "bool":
expected = expected.astype(bool)
expected = [list(row[1]) if is_array else row[1][0] for row in expected.iterrows()]
pyfunc_udf = spark_udf(spark, model_path, result_type=t)
new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
actual = list(new_df.select("prediction").toPandas()["prediction"])
assert expected == actual
if not is_array:
pyfunc_udf = spark_udf(spark, model_path, result_type=tname)
new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
actual = list(new_df.select("prediction").toPandas()["prediction"])
assert expected == actual
|
30,682 |
def validate_response(client, url, method='GET'):
"""
Get response from Chronicle Search API and validate it.
:param client: object of client class
:type client: object of client class
:param url: url
:type url: str
:param method: HTTP request method
:type method: str
:return: response
"""
for _ in range(3):
raw_response = client.http_client.request(url, method)
if not raw_response:
raise ValueError('Technical Error while making API call to Chronicle. Empty response received')
if raw_response[0].status == 500:
raise ValueError('Internal server error occurred, please try again later')
if raw_response[0].status == 429:
demisto.debug('API Rate limit exceeded. Retrying in {} seconds...'.format(1))
time.sleep(1)
continue
if raw_response[0].status != 200:
return_error(
'Status code: {}\nError: {}'.format(raw_response[0].status, parse_error_message(raw_response[1])))
try:
response = json.loads(raw_response[1])
return response
except json.decoder.JSONDecodeError:
raise ValueError('Invalid response format while making API call to Chronicle. Response not in JSON format')
else:
raise ValueError('API rate limit exceeded. Try after sometime.')
|
def validate_response(client, url, method='GET'):
"""
Get response from Chronicle Search API and validate it.
:param client: object of client class
:type client: object of client class
:param url: url
:type url: str
:param method: HTTP request method
:type method: str
:return: response
"""
for _ in range(3):
raw_response = client.http_client.request(url, method)
if not raw_response:
raise ValueError('Technical Error while making API call to Chronicle. Empty response received')
if raw_response[0].status == 500:
raise ValueError('Internal server error occurred, please try again later')
if raw_response[0].status == 429:
demisto.debug('API Rate limit exceeded. Retrying in {} seconds...'.format(1))
time.sleep(1)
continue
if raw_response[0].status != 200:
return_error(
'Status code: {}\nError: {}'.format(raw_response[0].status, parse_error_message(raw_response[1])))
try:
response = json.loads(raw_response[1])
return response
except json.decoder.JSONDecodeError:
raise ValueError('Invalid response format while making API call to Chronicle. Response not in JSON format')
else:
raise ValueError('API rate limit exceeded. Try again later.')
|
32,259 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
LOG(f'Command being called is {demisto.command()}')
try:
self_deployed = params.get('self_deployed', False)
client_credentials = params.get('client_credentials', False)
auth_and_token_url = params.get('auth_id') or params.get('credentials', {}).get('identifier') # client_id
enc_key = params.get('enc_key') or (params.get('credentials') or {}).get('password') # client_secret
certificate_thumbprint = params.get('certificate_thumbprint')
private_key = params.get('private_key')
self_deployed = True if client_credentials else self_deployed
if client_credentials and not enc_key:
raise DemistoException("Client Secret must be provided for client credentials flow.")
elif not self_deployed and not enc_key:
raise DemistoException('Key must be provided. For further information see '
'https://xsoar.pan.dev/docs/reference/articles/microsoft-integrations---authentication') # noqa: E501
elif not enc_key and not (certificate_thumbprint and private_key):
raise DemistoException('Key or Certificate Thumbprint and Private Key must be provided.')
client = Client(
self_deployed=self_deployed,
auth_and_token_url=auth_and_token_url, # client_id or auth_id
refresh_token=params.get('refresh_token'), # tenant_id or token
enc_key=enc_key, # client_secret or enc_key
redirect_uri=params.get('redirect_uri', ''),
auth_code=params.get('auth_code') if not client_credentials else '',
subscription_id=params.get('subscriptionID'),
resource_group_name=params.get('resourceGroupName'),
workspace_name=params.get('workspaceName'),
verify=not params.get('insecure', False),
proxy=params.get('proxy', False),
certificate_thumbprint=certificate_thumbprint,
private_key=private_key,
client_credentials=client_credentials
)
commands = {
'azure-log-analytics-execute-query': execute_query_command,
'azure-log-analytics-list-saved-searches': list_saved_searches_command,
'azure-log-analytics-get-saved-search-by-id': get_saved_search_by_id_command,
'azure-log-analytics-create-or-update-saved-search': create_or_update_saved_search_command,
'azure-log-analytics-delete-saved-search': delete_saved_search_command
}
if demisto.command() == 'test-module':
# cannot use test module due to the lack of ability to set refresh token to integration context
raise Exception("Please use !azure-log-analytics-test instead")
elif demisto.command() == 'azure-log-analytics-test':
test_connection(client, params)
elif demisto.command() in commands:
return_results(commands[demisto.command()](client, demisto.args())) # type: ignore
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
LOG(f'Command being called is {demisto.command()}')
try:
self_deployed = params.get('self_deployed', False)
client_credentials = params.get('client_credentials', False)
auth_and_token_url = params.get('auth_id') or params.get('credentials', {}).get('identifier') # client_id
enc_key = params.get('enc_key') or (params.get('credentials') or {}).get('password') # client_secret
certificate_thumbprint = params.get('certificate_thumbprint')
private_key = params.get('private_key')
self_deployed = self_deployed or client_credentials
if client_credentials and not enc_key:
raise DemistoException("Client Secret must be provided for client credentials flow.")
elif not self_deployed and not enc_key:
raise DemistoException('Key must be provided. For further information see '
'https://xsoar.pan.dev/docs/reference/articles/microsoft-integrations---authentication') # noqa: E501
elif not enc_key and not (certificate_thumbprint and private_key):
raise DemistoException('Key or Certificate Thumbprint and Private Key must be provided.')
client = Client(
self_deployed=self_deployed,
auth_and_token_url=auth_and_token_url, # client_id or auth_id
refresh_token=params.get('refresh_token'), # tenant_id or token
enc_key=enc_key, # client_secret or enc_key
redirect_uri=params.get('redirect_uri', ''),
auth_code=params.get('auth_code') if not client_credentials else '',
subscription_id=params.get('subscriptionID'),
resource_group_name=params.get('resourceGroupName'),
workspace_name=params.get('workspaceName'),
verify=not params.get('insecure', False),
proxy=params.get('proxy', False),
certificate_thumbprint=certificate_thumbprint,
private_key=private_key,
client_credentials=client_credentials
)
commands = {
'azure-log-analytics-execute-query': execute_query_command,
'azure-log-analytics-list-saved-searches': list_saved_searches_command,
'azure-log-analytics-get-saved-search-by-id': get_saved_search_by_id_command,
'azure-log-analytics-create-or-update-saved-search': create_or_update_saved_search_command,
'azure-log-analytics-delete-saved-search': delete_saved_search_command
}
if demisto.command() == 'test-module':
# cannot use test module due to the lack of ability to set refresh token to integration context
raise Exception("Please use !azure-log-analytics-test instead")
elif demisto.command() == 'azure-log-analytics-test':
test_connection(client, params)
elif demisto.command() in commands:
return_results(commands[demisto.command()](client, demisto.args())) # type: ignore
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
1,234 |
def is_fancy(sliceobj):
""" Returns True if sliceobj is attempting fancy indexing
Parameters
----------
sliceobj : object
something that can be used to slice an array as in ``arr[sliceobj]``
Returns
-------
tf: bool
True if sliceobj represents fancy indexing, False for basic indexing
"""
if not isinstance(sliceobj, tuple):
sliceobj = (sliceobj,)
for slicer in sliceobj:
if hasattr(slicer, 'dtype') and slicer.ndim > 0: # ndarray always fancy
return True
# slice or Ellipsis or None OK for basic
if isinstance(slicer, slice) or slicer in (None, Ellipsis):
continue
try:
int(slicer)
except TypeError:
return True
return False
|
def is_fancy(sliceobj):
""" Returns True if sliceobj is attempting fancy indexing
Parameters
----------
sliceobj : object
something that can be used to slice an array as in ``arr[sliceobj]``
Returns
-------
tf: bool
True if sliceobj represents fancy indexing, False for basic indexing
"""
if not isinstance(sliceobj, tuple):
sliceobj = (sliceobj,)
for slicer in sliceobj:
if getattr(slicer, 'ndim', 0) > 0: # ndarray always fancy, but scalars are safe
return True
# slice or Ellipsis or None OK for basic
if isinstance(slicer, slice) or slicer in (None, Ellipsis):
continue
try:
int(slicer)
except TypeError:
return True
return False
|
52,288 |
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise SCTArgumentParser.error(parser, f"You need to provide a mask with -method {method}.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Input mask dimension: {dim}. Input dimension for the mask should be 3.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method in ['diff', 'mult']:
index_vol = range(data.shape[3])
elif method in ['single']:
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean in ROI
data_mean = np.mean(data_2vol, axis=3)
mean_in_roi = np.average(data_mean, weights=mask)
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
_, std_in_roi = weighted_avg_and_std(data_sub, mask)
# Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the number of volumes with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise RuntimeError("A noise mask is mandatory with '-method single'.")
# Compute mean in ROI
mean_in_roi = np.average(data3d, weights=mask)
# Compute standard deviation in background
std_in_roi = np.std(data3d[mask_noise])
# Compute SNR, correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi = np.sqrt((4 - np.pi) / 2) * mean_in_roi / std_in_roi
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
|
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise SCTArgumentParser.error(parser, f"You need to provide a mask with -method {method}.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Input mask dimension: {dim}. Input dimension for the mask should be 3.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method in ['diff', 'mult']:
index_vol = range(data.shape[3])
elif method in ['single']:
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean in ROI
data_mean = np.mean(data_2vol, axis=3)
mean_in_roi = np.average(data_mean, weights=mask)
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
_, std_in_roi = weighted_avg_and_std(data_sub, mask)
# Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the index of the volume with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise RuntimeError("A noise mask is mandatory with '-method single'.")
# Compute mean in ROI
mean_in_roi = np.average(data3d, weights=mask)
# Compute standard deviation in background
std_in_roi = np.std(data3d[mask_noise])
# Compute SNR, correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi = np.sqrt((4 - np.pi) / 2) * mean_in_roi / std_in_roi
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
|
13,567 |
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted Ritz values as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
19,433 |
def get_versions(args, name):
"""Returns a list of versions and hashes for a package.
Also returns a BuildSystemGuesser object.
Returns default values if no URL is provided.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
name (str): The name of the package
Returns:
tuple: versions and hashes, and a BuildSystemGuesser object
"""
# Default version with hash
hashed_versions = """\
# FIXME: Add proper versions and checksums here.
# version('1.2.3', '0123456789abcdef0123456789abcdef')"""
# Default version without hash
unhashed_versions = """\
# FIXME: Add proper versions here.
# version('1.2.4')"""
# Default git-based version
git_versions = """\
# FIXME: Add proper versions referencing branch/tag/commit here
# version('main', branch='main)"""
# Default guesser
guesser = BuildSystemGuesser()
valid_url = True
url, git = get_url_and_git(args)
has_git_option = args.commit is not None or \
args.tag is not None or \
args.branch is not None
git_single_version = (len(args.branch or []) +
len(args.commit or []) +
len(args.tag or [])) == 1
git_single_version = git_single_version and (args.version is not None)
if git:
if has_git_option:
versions = []
version_tpl = " version('{0}', {1}='{2}')"
if args.branch is not None:
for br in args.branch:
versions.append(version_tpl.format(args.version
if git_single_version
else br, 'branch', br))
if args.tag is not None:
for tag in args.tag:
versions.append(version_tpl.format(args.version
if git_single_version
else tag, 'tag', tag))
if args.commit is not None:
if not git_single_version:
versions.append(' # FIXME: add proper version(s) here')
for commit in args.commit:
# Use short commit id if version not specified
versions.append(version_tpl.format(args.version
if git_single_version
else commit[:7],
'commit', commit))
else:
versions = [git_versions]
return "\n".join(versions), guesser
try:
spack.util.url.require_url_format(args.url)
if args.url.startswith('file://'):
valid_url = False # No point in spidering these
except (ValueError, TypeError):
valid_url = False
if args.url is not None and args.template != 'bundle' and valid_url:
# Find available versions
try:
url_dict = spack.util.web.find_versions_of_archive(args.url)
except UndetectableVersionError:
# Use fake versions
tty.warn("Couldn't detect version in: {0}".format(args.url))
return hashed_versions, guesser
if not url_dict:
# If no versions were found, revert to what the user provided
version = parse_version(args.url)
url_dict = {version: args.url}
else:
if args.version is not None:
# Replace autodetected version with user-provided one
for ver, url in url_dict.items():
if url == args.url:
del url_dict[ver]
url_dict[args.version] = args.url
break
versions = spack.stage.get_checksums_for_versions(
url_dict, name, first_stage_function=guesser,
keep_stage=args.keep_stage,
batch=(args.batch or len(url_dict) == 1))
else:
versions = unhashed_versions
return versions, guesser
|
def get_versions(args, name):
"""Returns a list of versions and hashes for a package.
Also returns a BuildSystemGuesser object.
Returns default values if no URL is provided.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
name (str): The name of the package
Returns:
tuple: versions and hashes, and a BuildSystemGuesser object
"""
# Default version with hash
hashed_versions = """\
# FIXME: Add proper versions and checksums here.
# version('1.2.3', '0123456789abcdef0123456789abcdef')"""
# Default version without hash
unhashed_versions = """\
# FIXME: Add proper versions here.
# version('1.2.4')"""
# Default git-based version
git_versions = """\
# FIXME: Add proper versions referencing branch/tag/commit here
# version('main', branch='main)"""
# Default guesser
guesser = BuildSystemGuesser()
valid_url = True
url, git = get_url_and_git(args)
has_git_option = args.commit or args.tag or args.branch
git_single_version = (len(args.branch or []) +
len(args.commit or []) +
len(args.tag or [])) == 1
git_single_version = git_single_version and (args.version is not None)
if git:
if has_git_option:
versions = []
version_tpl = " version('{0}', {1}='{2}')"
if args.branch is not None:
for br in args.branch:
versions.append(version_tpl.format(args.version
if git_single_version
else br, 'branch', br))
if args.tag is not None:
for tag in args.tag:
versions.append(version_tpl.format(args.version
if git_single_version
else tag, 'tag', tag))
if args.commit is not None:
if not git_single_version:
versions.append(' # FIXME: add proper version(s) here')
for commit in args.commit:
# Use short commit id if version not specified
versions.append(version_tpl.format(args.version
if git_single_version
else commit[:7],
'commit', commit))
else:
versions = [git_versions]
return "\n".join(versions), guesser
try:
spack.util.url.require_url_format(args.url)
if args.url.startswith('file://'):
valid_url = False # No point in spidering these
except (ValueError, TypeError):
valid_url = False
if args.url is not None and args.template != 'bundle' and valid_url:
# Find available versions
try:
url_dict = spack.util.web.find_versions_of_archive(args.url)
except UndetectableVersionError:
# Use fake versions
tty.warn("Couldn't detect version in: {0}".format(args.url))
return hashed_versions, guesser
if not url_dict:
# If no versions were found, revert to what the user provided
version = parse_version(args.url)
url_dict = {version: args.url}
else:
if args.version is not None:
# Replace autodetected version with user-provided one
for ver, url in url_dict.items():
if url == args.url:
del url_dict[ver]
url_dict[args.version] = args.url
break
versions = spack.stage.get_checksums_for_versions(
url_dict, name, first_stage_function=guesser,
keep_stage=args.keep_stage,
batch=(args.batch or len(url_dict) == 1))
else:
versions = unhashed_versions
return versions, guesser
|
25,569 |
def channel_open_with_the_same_node(
channels_to_open: List[ChannelNew],
target_to_depositqueue: Dict[Tuple[str, str], JoinableQueue],
) -> None:
"""As of 0.100.5 channels cannot be open in parallel, starting multiple
opens at the same time can lead to the HTTP request timing out. Therefore
here channels are open one after the other. (Issue #5446).
"""
for channel_open in channels_to_open:
channel = channel_details(
channel_open.endpoint1, channel_open.token_address, channel_open.participant2
)
if channel is None:
channel_open_request = {
"token_address": channel_open.token_address,
"partner_address": channel_open.participant2,
"total_deposit": channel_open.minimum_capacity1,
}
log.info(f"Opening {channel_open}")
url_channel_open = f"{channel_open.endpoint1}/api/{API_VERSION}/channels"
response = requests.put(url_channel_open, json=channel_open_request)
if not is_successful_reponse(response):
raise RuntimeError(f"An error ocurrent while opening channel {channel_open}")
else:
deposit = ChannelDeposit(
channel_open.token_address,
channel_open.participant2,
channel_open.endpoint1,
channel_open.minimum_capacity1,
)
channel_deposit_if_necessary(channel, deposit)
# A deposit only makes sense after the channel is opened.
deposit = ChannelDeposit(
channel_open.token_address,
channel_open.participant1,
channel_open.endpoint2,
channel_open.minimum_capacity2,
)
log.info(f"Queueing {deposit}")
target_to_depositqueue[(channel_open.token_address, channel_open.participant2)].put(
deposit
)
|
def channel_open_with_the_same_node(
channels_to_open: List[ChannelNew],
target_to_depositqueue: Dict[Tuple[str, str], JoinableQueue],
) -> None:
"""As of 0.100.5 channels cannot be open in parallel, starting multiple
opens at the same time can lead to the HTTP request timing out. Therefore
here channels are opened one after the other. (Issue #5446).
"""
for channel_open in channels_to_open:
channel = channel_details(
channel_open.endpoint1, channel_open.token_address, channel_open.participant2
)
if channel is None:
channel_open_request = {
"token_address": channel_open.token_address,
"partner_address": channel_open.participant2,
"total_deposit": channel_open.minimum_capacity1,
}
log.info(f"Opening {channel_open}")
url_channel_open = f"{channel_open.endpoint1}/api/{API_VERSION}/channels"
response = requests.put(url_channel_open, json=channel_open_request)
if not is_successful_reponse(response):
raise RuntimeError(f"An error ocurrent while opening channel {channel_open}")
else:
deposit = ChannelDeposit(
channel_open.token_address,
channel_open.participant2,
channel_open.endpoint1,
channel_open.minimum_capacity1,
)
channel_deposit_if_necessary(channel, deposit)
# A deposit only makes sense after the channel is opened.
deposit = ChannelDeposit(
channel_open.token_address,
channel_open.participant1,
channel_open.endpoint2,
channel_open.minimum_capacity2,
)
log.info(f"Queueing {deposit}")
target_to_depositqueue[(channel_open.token_address, channel_open.participant2)].put(
deposit
)
|
8,865 |
def subreddit_sorting(bot, trigger, s, sorting):
if sorting == 'new':
submissions = list(s.new())
elif sorting == 'top':
submissions = list(s.top())
elif sorting == 'hot':
submissions = list(s.hot())
elif sorting == 'controversial':
submissions = list(s.controversial())
elif sorting == 'gilded':
submissions = list(s.gilded())
elif sorting == 'rising':
submissions = list(s.rising())
elif sorting == 'sticky':
try:
submissions = [s.sticky()]
except prawcore.exceptions.NotFound:
bot.say("r/" + s.display_name + " appears to not have a stickied post!")
return
elif sorting == 'random':
submissions = [s.random()] or []
else:
return
if not len(submissions):
bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!")
return NOLIMIT
if sorting != 'sticky':
submissions_filter = []
for submission in submissions:
if not submission.stickied:
submissions_filter.append(submission)
submissions = submissions_filter
submission = submissions[0]
link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission)
say_post_info(
bot, trigger, re.match(post_url, link).group(1), False, True)
|
def subreddit_sorting(bot, trigger, s, sorting):
if sorting == 'new':
submissions = list(s.new())
elif sorting == 'top':
submissions = list(s.top())
elif sorting == 'hot':
submissions = list(s.hot(limit=10))
elif sorting == 'controversial':
submissions = list(s.controversial())
elif sorting == 'gilded':
submissions = list(s.gilded())
elif sorting == 'rising':
submissions = list(s.rising())
elif sorting == 'sticky':
try:
submissions = [s.sticky()]
except prawcore.exceptions.NotFound:
bot.say("r/" + s.display_name + " appears to not have a stickied post!")
return
elif sorting == 'random':
submissions = [s.random()] or []
else:
return
if not len(submissions):
bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!")
return NOLIMIT
if sorting != 'sticky':
submissions_filter = []
for submission in submissions:
if not submission.stickied:
submissions_filter.append(submission)
submissions = submissions_filter
submission = submissions[0]
link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission)
say_post_info(
bot, trigger, re.match(post_url, link).group(1), False, True)
|
49,569 |
def test_set_index_nan_partition():
d[d.a > 3].set_index("a") # Set index with 1 null partition
d[d.a > 1].set_index("a", sorted=True) # Set sorted index with 0 null partitions
d[d.a > 3].set_index("a", sorted=True) # Set sorted index with 1 null partition
|
def test_set_index_nan_partition():
d[d.a > 3].set_index("a") # Set index with 1 null partition
d[d.a > 1].set_index("a", sorted=True) # Set sorted index with 0 null partitions
a = d[d.a > 3].set_index("a", sorted=True) # Set sorted index with 1 null partition
assert_eq(a, a)
|
17,852 |
def find_nonfinite_values_prj(projection, slc_idx):
"""Allows the user to easily obtain the Z, x, y coordinates of nonfinite
values for a given 2D projection in a 3D array.
Parameters
----------
projection : 2D nd.array
The 2D projection for a given 3D data array.
slc_idx : int
The projection index inside the main 3D array.
Returns
-------
The indices for the current projection used, the x coordinates, and y
coordinates of all non-finite values.
"""
# Determining where the nonfinite values are
boolean_of_nonfinite = ~np.isfinite(projection)
# Obtaining there x, y locations
idx_of_nonfinite_prj = np.nonzero(boolean_of_nonfinite)
# Creating an identical shaped array for the z position in the 3D array the prj comes from
idx_of_nonfinite_data = np.asarray(
[slc_idx] * len(idx_of_nonfinite_prj[0]))
return idx_of_nonfinite_data, idx_of_nonfinite_prj[0], idx_of_nonfinite_prj[1]
|
def _find_nonfinite_values_prj(projection, slc_idx):
"""Allows the user to easily obtain the Z, x, y coordinates of nonfinite
values for a given 2D projection in a 3D array.
Parameters
----------
projection : 2D nd.array
The 2D projection for a given 3D data array.
slc_idx : int
The projection index inside the main 3D array.
Returns
-------
The indices for the current projection used, the x coordinates, and y
coordinates of all non-finite values.
"""
# Determining where the nonfinite values are
boolean_of_nonfinite = ~np.isfinite(projection)
# Obtaining there x, y locations
idx_of_nonfinite_prj = np.nonzero(boolean_of_nonfinite)
# Creating an identical shaped array for the z position in the 3D array the prj comes from
idx_of_nonfinite_data = np.asarray(
[slc_idx] * len(idx_of_nonfinite_prj[0]))
return idx_of_nonfinite_data, idx_of_nonfinite_prj[0], idx_of_nonfinite_prj[1]
|
31,800 |
def list_blobs_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
List Blobs under the specified container.
Args:
client (Client): Azure Blob Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
container_name = args.get('container_name')
limit = args.get('limit', '50')
prefix = args.get('prefix')
page = arg_to_number(args.get('page', '1'))
marker = ''
readable_message = f'Blobs List:\n Current page size: {limit}\n Showing page {page} out others that may exist'
if page > 1:
offset = int(limit) * (page - 1)
response = client.list_blobs_request(container_name, str(offset), prefix, marker)
tree = ET.ElementTree(ET.fromstring(response))
root = tree.getroot()
marker = root.findtext('NextMarker')
if not marker:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureStorageBlob.Blob',
outputs=[],
raw_response=[]
)
response = client.list_blobs_request(container_name, limit, prefix, marker)
tree = ET.ElementTree(ET.fromstring(response))
root = tree.getroot()
raw_response = []
outputs = []
for element in root.iter('Blob'):
outputs.append({'container_name': container_name, 'blob_name': element.findtext('Name')})
data = {'Name': element.findtext('Name')}
properties = {}
for blob_property in element.findall('Properties'):
for attribute in blob_property:
properties[attribute.tag] = attribute.text
data['Properties'] = properties
raw_response.append(data)
readable_output = tableToMarkdown(
readable_message,
outputs,
headers=['blob_name'],
headerTransform=string_to_table_header
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageBlob.Blob',
outputs_key_field=['container_name', 'blob_name'],
outputs=outputs,
raw_response=raw_response
)
return command_results
|
def list_blobs_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
List Blobs under the specified container.
Args:
client (Client): Azure Blob Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
container_name = args.get('container_name')
limit = args.get('limit') or '50'
prefix = args.get('prefix')
page = arg_to_number(args.get('page', '1'))
marker = ''
readable_message = f'Blobs List:\n Current page size: {limit}\n Showing page {page} out others that may exist'
if page > 1:
offset = int(limit) * (page - 1)
response = client.list_blobs_request(container_name, str(offset), prefix, marker)
tree = ET.ElementTree(ET.fromstring(response))
root = tree.getroot()
marker = root.findtext('NextMarker')
if not marker:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureStorageBlob.Blob',
outputs=[],
raw_response=[]
)
response = client.list_blobs_request(container_name, limit, prefix, marker)
tree = ET.ElementTree(ET.fromstring(response))
root = tree.getroot()
raw_response = []
outputs = []
for element in root.iter('Blob'):
outputs.append({'container_name': container_name, 'blob_name': element.findtext('Name')})
data = {'Name': element.findtext('Name')}
properties = {}
for blob_property in element.findall('Properties'):
for attribute in blob_property:
properties[attribute.tag] = attribute.text
data['Properties'] = properties
raw_response.append(data)
readable_output = tableToMarkdown(
readable_message,
outputs,
headers=['blob_name'],
headerTransform=string_to_table_header
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageBlob.Blob',
outputs_key_field=['container_name', 'blob_name'],
outputs=outputs,
raw_response=raw_response
)
return command_results
|
6,358 |
def parse():
import os
import sys
import argparse
from pudb import VERSION
version_info = "pudb:%(prog)s v" + VERSION
if sys.argv[1:] == ["-v"]:
print(version_info % {"prog": os.path.basename(__file__)})
sys.exit(os.EX_OK)
parser = argparse.ArgumentParser(
usage="%(prog)s [options] [-m] SCRIPT-OR-MODULE-TO-RUN [SCRIPT_ARGS]",
epilog=version_info
)
parser.add_argument("-s", "--steal-output", action="store_true"),
# note: we're implementing -m as a boolean flag, mimicking pdb's behavior,
# and makes it possible without much fuss to support cases like:
# python -m pudb -m http.server -h
# where the -h will be passed to the http.server module
parser.add_argument("-m", "--module", action="store_true",
help="Debug as module or package instead of as a script")
parser.add_argument("-le", "--log-errors", nargs=1, metavar="FILE",
help="Log internal errors to the given file")
parser.add_argument("--pre-run", metavar="COMMAND",
help="Run command before each program run",
default="")
parser.add_argument("--version", action="version", version=version_info)
parser.add_argument("script_args", nargs=argparse.REMAINDER,
help="Arguments to pass to script or module")
return parser
|
def get_argparse_parser():
import os
import sys
import argparse
from pudb import VERSION
version_info = "pudb:%(prog)s v" + VERSION
if sys.argv[1:] == ["-v"]:
print(version_info % {"prog": os.path.basename(__file__)})
sys.exit(os.EX_OK)
parser = argparse.ArgumentParser(
usage="%(prog)s [options] [-m] SCRIPT-OR-MODULE-TO-RUN [SCRIPT_ARGS]",
epilog=version_info
)
parser.add_argument("-s", "--steal-output", action="store_true"),
# note: we're implementing -m as a boolean flag, mimicking pdb's behavior,
# and makes it possible without much fuss to support cases like:
# python -m pudb -m http.server -h
# where the -h will be passed to the http.server module
parser.add_argument("-m", "--module", action="store_true",
help="Debug as module or package instead of as a script")
parser.add_argument("-le", "--log-errors", nargs=1, metavar="FILE",
help="Log internal errors to the given file")
parser.add_argument("--pre-run", metavar="COMMAND",
help="Run command before each program run",
default="")
parser.add_argument("--version", action="version", version=version_info)
parser.add_argument("script_args", nargs=argparse.REMAINDER,
help="Arguments to pass to script or module")
return parser
|
53,749 |
def test_changepoint_with_X1_X2():
X = np.linspace(0, 100, 100).reshape(100, 1)
base_k1 = gpflow.kernels.Matern32(lengthscales=0.2)
base_k2 = gpflow.kernels.Matern32(lengthscales=2.0)
k = gpflow.kernels.ChangePoints([base_k1, base_k2], [0.0], steepness=5.0)
K = k(X)
assert K.shape == [100, 100]
N = 25
X2 = np.linspace(0, 50, N).reshape(N, 1)
K = k(X, X2)
assert K.shape == [100, 25]
|
def test_changepoint_with_X1_X2():
N = 100
X = np.linspace(0, 100, N).reshape(N, 1)
base_k1 = gpflow.kernels.Matern32(lengthscales=0.2)
base_k2 = gpflow.kernels.Matern32(lengthscales=2.0)
k = gpflow.kernels.ChangePoints([base_k1, base_k2], [0.0], steepness=5.0)
K = k(X)
assert K.shape == [N, N]
N2 = 25
X2 = np.linspace(0, 50, N2).reshape(N2, 1)
K = k(X, X2)
assert K.shape == [N, N2]
|
4,317 |
def _check_surface_size(surf):
"""Check that the coordinate limits are reasonable."""
sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)
if (sizes < 0.05).any():
raise RuntimeError(
f'Dimensions of the surface {_bem_surf_name[surf["id"]]} seem too '
f'small ({1000 * sizes.min():9.5f}). Maybe the the unit of measure'
' is meters instead of mm')
|
def _check_surface_size(surf):
"""Check that the coordinate limits are reasonable."""
sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)
if (sizes < 0.05).any():
raise RuntimeError(
f'Dimensions of the surface {_bem_surf_name[surf["id"]]} seem too '
f'small ({1000 * sizes.min():9.5f}). Maybe the unit of measure'
' is meters instead of mm')
|
43,444 |
def RandomLayer(weights, wires, ratio_imprim=0.3, imprimitive=CNOT, rotations=None, seed=42):
r"""A layer of randomly chosen single qubit rotations and 2-qubit entangling gates, acting
on randomly chosen qubits.
The number :math:`k` of single qubit rotations is inferred from the first dimension of ``weights``.
This is an example of two 4-qubit random layers with four Pauli-y/Pauli-z rotations :math:`R_y, R_z`,
controlled-Z gates as imprimitives, as well as ``ratio_imprim=0.3``:
.. figure:: ../../_static/layer_rnd.png
:align: center
:width: 60%
:target: javascript:void(0);
.. note::
Using the default seed (or any other fixed integer seed) generates one and the same circuit in every
quantum node. To generate different circuit architectures, either use a random seed, or use ``seed=None``
together with the ``cache=False`` option when creating a quantum node.
.. warning::
If you use a random number generator anywhere inside the quantum function without the ``cache=False`` option,
a new random circuit architecture will be created every time the quantum node is evaluated.
Args:
weights (array[float]): array of weights of shape ``(k,)``
wires (Sequence[int]): sequence of qubit indices that the template acts on
Keyword Args:
ratio_imprim (float): value between 0 and 1 that determines the ratio of imprimitive to rotation gates
imprimitive (pennylane.ops.Operation): two-qubit gate to use, defaults to :class:`~pennylane.ops.CNOT`
rotations (list[pennylane.ops.Operation]): List of Pauli-X, Pauli-Y and/or Pauli-Z gates. The frequency
determines how often a particular rotation type is used. Defaults to the use of all three
rotations with equal frequency.
seed (int): seed to generate random architecture
"""
if len(wires) < 2:
raise ValueError("RandomLayer requires at least two wires or subsystems to apply "
"the imprimitive gates.")
if seed is not None:
np.random.seed(seed)
if rotations is None:
rotations = [RX, RY, RZ]
i = 0
while i < len(weights):
if np.random.random() > ratio_imprim:
gate = np.random.choice(rotations)
wire = np.random.choice(wires)
gate(weights[i], wires=wire)
i += 1
else:
on_wires = np.random.permutation(wires)[:2]
on_wires = list(on_wires)
imprimitive(wires=on_wires)
|
def RandomLayer(weights, wires, ratio_imprim=0.3, imprimitive=CNOT, rotations=None, seed=42):
r"""A layer of randomly chosen single qubit rotations and 2-qubit entangling gates, acting
on randomly chosen qubits.
The number :math:`k` of single qubit rotations is inferred from the first dimension of ``weights``.
This is an example of two 4-qubit random layers with four Pauli-y/Pauli-z rotations :math:`R_y, R_z`,
controlled-Z gates as imprimitives, as well as ``ratio_imprim=0.3``:
.. figure:: ../../_static/layer_rnd.png
:align: center
:width: 60%
:target: javascript:void(0);
.. note::
Using the default seed (or any other fixed integer seed) generates one and the same circuit in every
quantum node. To generate different circuit architectures, either use a different random seed, or use ``seed=None``
together with the ``cache=False`` option when creating a quantum node.
.. warning::
If you use a random number generator anywhere inside the quantum function without the ``cache=False`` option,
a new random circuit architecture will be created every time the quantum node is evaluated.
Args:
weights (array[float]): array of weights of shape ``(k,)``
wires (Sequence[int]): sequence of qubit indices that the template acts on
Keyword Args:
ratio_imprim (float): value between 0 and 1 that determines the ratio of imprimitive to rotation gates
imprimitive (pennylane.ops.Operation): two-qubit gate to use, defaults to :class:`~pennylane.ops.CNOT`
rotations (list[pennylane.ops.Operation]): List of Pauli-X, Pauli-Y and/or Pauli-Z gates. The frequency
determines how often a particular rotation type is used. Defaults to the use of all three
rotations with equal frequency.
seed (int): seed to generate random architecture
"""
if len(wires) < 2:
raise ValueError("RandomLayer requires at least two wires or subsystems to apply "
"the imprimitive gates.")
if seed is not None:
np.random.seed(seed)
if rotations is None:
rotations = [RX, RY, RZ]
i = 0
while i < len(weights):
if np.random.random() > ratio_imprim:
gate = np.random.choice(rotations)
wire = np.random.choice(wires)
gate(weights[i], wires=wire)
i += 1
else:
on_wires = np.random.permutation(wires)[:2]
on_wires = list(on_wires)
imprimitive(wires=on_wires)
|
8,649 |
def find_config(config_dir, name, extension='.cfg'):
"""Build the absolute path for the given configuration file ``name``
:param str config_dir: path to the configuration directory
:param str name: configuration file ``name``
:param str extension: configuration file's extension (default to ``.cfg``)
:return: the path of the configuration file, either in the current
directory or from the ``config_dir`` directory
This function tries different location:
* the current directory,
* the ``config_dir`` directory with the ``extension`` suffix,
* the ``config_dir`` directory without a suffix,
Example::
>>> from sopel import run_script
>>> os.listdir()
['local.cfg', 'extra.ini']
>>> os.listdir(config.DEFAULT_HOMEDIR)
['config.cfg', 'extra.ini', 'module.cfg', 'README']
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local.cfg')
'local.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local')
'/home/username/.sopel/local'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'config')
'/home/username/.sopel/config.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'extra', '.ini')
'/home/username/.sopel/extra.ini'
"""
if os.path.isfile(name):
return name
name_ext = name + extension
for config in enumerate_configs(config_dir, extension):
if name_ext == config:
return os.path.join(config_dir, name_ext)
return os.path.join(config_dir, name)
|
def find_config(config_dir, name, extension='.cfg'):
"""Build the absolute path for the given configuration file ``name``
:param str config_dir: path to the configuration directory
:param str name: configuration file ``name``
:param str extension: configuration file's extension (default to ``.cfg``)
:return: the path of the configuration file, either in the current
directory or from the ``config_dir`` directory
This function tries different location:
* the current directory
* the ``config_dir`` directory with the ``extension`` suffix,
* the ``config_dir`` directory without a suffix,
Example::
>>> from sopel import run_script
>>> os.listdir()
['local.cfg', 'extra.ini']
>>> os.listdir(config.DEFAULT_HOMEDIR)
['config.cfg', 'extra.ini', 'module.cfg', 'README']
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local.cfg')
'local.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local')
'/home/username/.sopel/local'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'config')
'/home/username/.sopel/config.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'extra', '.ini')
'/home/username/.sopel/extra.ini'
"""
if os.path.isfile(name):
return name
name_ext = name + extension
for config in enumerate_configs(config_dir, extension):
if name_ext == config:
return os.path.join(config_dir, name_ext)
return os.path.join(config_dir, name)
|
15,232 |
def whitespace(value: Any) -> str:
"""Validate result contains only whitespace."""
if isinstance(value, str) and _WS.fullmatch(value):
return value
raise vol.Invalid(f"contain non-whitespace: {value}")
|
def whitespace(value: Any) -> str:
"""Validate result contains only whitespace."""
if isinstance(value, str) and _WS.fullmatch(value):
return value
raise vol.Invalid(f"contains non-whitespace: {value}")
|
43,534 |
def _get_default_args(func):
"""Get the default arguments of a function.
Args:
func (callable): a function
Returns:
dict[str->tuple]: mapping from argument name to (positional idx, default value)
"""
signature = inspect.signature(func)
return {
k: (idx, v.default)
for idx, (k, v) in enumerate(signature.parameters.items())
if v.default is not inspect.Parameter.empty
}
|
def _get_default_args(func):
"""Get the default arguments of a function.
Args:
func (callable): a function
Returns:
dict[str, tuple]: mapping from argument name to (positional idx, default value)
"""
signature = inspect.signature(func)
return {
k: (idx, v.default)
for idx, (k, v) in enumerate(signature.parameters.items())
if v.default is not inspect.Parameter.empty
}
|
50,293 |
def tag_torrent(infohash, tags_db, tags=None, suggested_tags=None):
if tags is None:
tags_count = random.randint(2, 6)
tags = []
while len(tags) < tags_count:
tag = get_random_word(min_length=MIN_TAG_LENGTH)
if tag not in tags:
tags.append(tag)
if suggested_tags is None:
suggested_tags_count = random.randint(1, 3)
suggested_tags = []
while len(suggested_tags) < suggested_tags_count:
tag = get_random_word(min_length=MIN_TAG_LENGTH)
if tag not in suggested_tags:
suggested_tags.append(tag)
def _add_operation(_tag, _op, _key):
operation = TagOperation(infohash=infohash, tag=_tag, operation=_op, clock=0,
creator_public_key=_key.pub().key_to_bin())
operation.clock = tags_db.get_clock(operation) + 1
tags_db.add_tag_operation(operation, b"")
# Give each torrent some tags
for tag in tags:
for key in [random_key_1, random_key_2]: # Each tag should be proposed by two unique users
_add_operation(tag, TagOperationEnum.ADD, key)
# Make sure we have some suggestions
for tag in suggested_tags:
_add_operation(tag, TagOperationEnum.ADD, random_key_3)
_add_operation(tag, TagOperationEnum.REMOVE, random_key_2)
|
def tag_torrent(infohash, tags_db, tags=None, suggested_tags=None):
if tags is None:
tags_count = random.randint(2, 6)
tags = generate_sentence_with_fixed_word_length(tags_count, MIN_TAG_LENGTH)
if suggested_tags is None:
suggested_tags_count = random.randint(1, 3)
suggested_tags = generate_sentence_with_fixed_word_length(suggested_tags_count, MIN_TAG_LENGTH)
def _add_operation(_tag, _op, _key):
operation = TagOperation(infohash=infohash, tag=_tag, operation=_op, clock=0,
creator_public_key=_key.pub().key_to_bin())
operation.clock = tags_db.get_clock(operation) + 1
tags_db.add_tag_operation(operation, b"")
# Give each torrent some tags
for tag in tags:
for key in [random_key_1, random_key_2]: # Each tag should be proposed by two unique users
_add_operation(tag, TagOperationEnum.ADD, key)
# Make sure we have some suggestions
for tag in suggested_tags:
_add_operation(tag, TagOperationEnum.ADD, random_key_3)
_add_operation(tag, TagOperationEnum.REMOVE, random_key_2)
|
32,597 |
def create_issue_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Creates an issue.
Args:
client (Client): Client to perform calls to GitLab services.
args (Dict[str, Any]): XSOAR arguments:
- 'state': The state of the issue.
- 'labels': Retrieve only issues with the given labels.
- 'assignee_username': Retrieve issues by assignee username.
Returns:
(CommandResults).
"""
labels = args.get('labels', '')
headers = ['Iid', 'Title', 'CreatedAt', 'CreatedBy', 'UpdatedAt', 'Milstone', 'State', 'Assignee']
title = args.get('title', '')
description = args.get('description', '')
response = client.create_issue_request(labels, title, description)
human_readable_dict = {
'Iid': response.get('iid'),
'Title': response.get('title'),
'CreatedAt': response.get('created_at', ' '),
'CreatedBy': response.get('autor.name', ' '),
'UpdatedAt': response.get('updated_at', ' '),
'Milstone': response.get('milestone.title', ' '),
'State': response.get('state', ' '),
'Assignee': response.get('assignee.name', ' ')
}
human_readable = tableToMarkdown('Create Issue', human_readable_dict, headers=headers, removeNull=True)
return CommandResults(
outputs_prefix='GitLab.Issue',
outputs_key_field='Iid',
readable_output=human_readable,
outputs=response,
raw_response=response
)
|
def create_issue_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Creates an issue.
Args:
client (Client): Client to perform calls to GitLab services.
args (Dict[str, Any]): XSOAR arguments:
- 'state': The state of the issue.
- 'labels': Retrieve only issues with the given labels.
- 'assignee_username': Retrieve issues by assignee username.
Returns:
(CommandResults).
"""
labels = args.get('labels', '')
headers = ['Iid', 'Title', 'CreatedAt', 'CreatedBy', 'UpdatedAt', 'Milstone', 'State', 'Assignee']
title = args.get('title', '')
description = args.get('description', '')
response = client.create_issue_request(labels, title, description)
human_readable_dict = {
'Iid': response.get('iid'),
'Title': response.get('title'),
'CreatedAt': response.get('created_at', ''),
'CreatedBy': response.get('autor.name', ' '),
'UpdatedAt': response.get('updated_at', ' '),
'Milstone': response.get('milestone.title', ' '),
'State': response.get('state', ' '),
'Assignee': response.get('assignee.name', ' ')
}
human_readable = tableToMarkdown('Create Issue', human_readable_dict, headers=headers, removeNull=True)
return CommandResults(
outputs_prefix='GitLab.Issue',
outputs_key_field='Iid',
readable_output=human_readable,
outputs=response,
raw_response=response
)
|
34,166 |
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.nlu.convert as convert
data_parser = subparsers.add_parser(
"data",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Utils for the Rasa training files.",
)
data_parser.set_defaults(func=lambda _: data_parser.print_help(None))
data_subparsers = data_parser.add_subparsers()
convert_parser = data_subparsers.add_parser(
"convert",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts Rasa data between different formats.",
)
convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None))
convert_subparsers = convert_parser.add_subparsers()
convert_nlu_parser = convert_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts NLU data between markdown and json.",
)
convert_nlu_parser.set_defaults(func=convert.main)
arguments.set_convert_arguments(convert_nlu_parser)
split_parser = data_subparsers.add_parser(
"split",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Splits Rasa data in training and test data.",
)
split_parser.set_defaults(func=lambda _: split_parser.print_help(None))
split_subparsers = split_parser.add_subparsers()
nlu_split_parser = split_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Performs a split of your NLU data according to the specified "
"percentages.",
)
nlu_split_parser.set_defaults(func=split_nlu_data)
arguments.set_split_arguments(nlu_split_parser)
|
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.nlu.convert as convert
data_parser = subparsers.add_parser(
"data",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Utils for the Rasa training files.",
)
data_parser.set_defaults(func=lambda _: data_parser.print_help(None))
data_subparsers = data_parser.add_subparsers()
convert_parser = data_subparsers.add_parser(
"convert",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts Rasa data between different formats.",
)
convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None))
convert_subparsers = convert_parser.add_subparsers()
convert_nlu_parser = convert_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Converts NLU data between Markdown and json formats.",
)
convert_nlu_parser.set_defaults(func=convert.main)
arguments.set_convert_arguments(convert_nlu_parser)
split_parser = data_subparsers.add_parser(
"split",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Splits Rasa data in training and test data.",
)
split_parser.set_defaults(func=lambda _: split_parser.print_help(None))
split_subparsers = split_parser.add_subparsers()
nlu_split_parser = split_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Performs a split of your NLU data according to the specified "
"percentages.",
)
nlu_split_parser.set_defaults(func=split_nlu_data)
arguments.set_split_arguments(nlu_split_parser)
|
6,392 |
def execute(filters=None):
columns = get_columns(filters)
item_details = get_fifo_queue(filters)
to_date = filters["to_date"]
data = []
for item, item_dict in iteritems(item_details):
item_dict['fifo_queue'] = [item for item in item_dict if item[1]]
fifo_queue = sorted(item_dict["fifo_queue"], key=lambda x: x[1])
details = item_dict["details"]
if not fifo_queue or (not item_dict.get("total_qty")): continue
average_age = get_average_age(fifo_queue, to_date)
earliest_age = date_diff(to_date, fifo_queue[0][1])
latest_age = date_diff(to_date, fifo_queue[-1][1])
row = [details.name, details.item_name,
details.description, details.item_group, details.brand]
if filters.get("show_warehouse_wise_stock"):
row.append(details.warehouse)
row.extend([item_dict.get("total_qty"), average_age,
earliest_age, latest_age, details.stock_uom])
data.append(row)
return columns, data
|
def execute(filters=None):
columns = get_columns(filters)
item_details = get_fifo_queue(filters)
to_date = filters["to_date"]
data = []
for item, item_dict in iteritems(item_details):
temp_fifo = [d for d in item_dict['fifo_queue'] if d[1]]
fifo_queue = sorted(item_dict["fifo_queue"], key=lambda x: x[1])
details = item_dict["details"]
if not fifo_queue or (not item_dict.get("total_qty")): continue
average_age = get_average_age(fifo_queue, to_date)
earliest_age = date_diff(to_date, fifo_queue[0][1])
latest_age = date_diff(to_date, fifo_queue[-1][1])
row = [details.name, details.item_name,
details.description, details.item_group, details.brand]
if filters.get("show_warehouse_wise_stock"):
row.append(details.warehouse)
row.extend([item_dict.get("total_qty"), average_age,
earliest_age, latest_age, details.stock_uom])
data.append(row)
return columns, data
|
29,312 |
def update_story(
committer_id, story_id, change_list, commit_message):
"""Updates a story. Commits changes.
# NOTE: This function should not be called on its own. Access it
# through `topic_services.update_story_and_topic_summary`.
Args:
committer_id: str. The id of the user who is performing the update
action.
story_id: str. The story id.
change_list: list(StoryChange). These changes are applied in sequence to
produce the resulting story.
commit_message: str or None. A description of changes made to the
story.
Raises:
ValueError. Expected a commit message but received none.
ValidationError. Exploration is already linked to a different story.
ValidationError. Story Url Fragment is not unique across the site.
"""
if not commit_message:
raise ValueError('Expected a commit message but received none.')
old_story = story_fetchers.get_story_by_id(story_id)
new_story, exp_ids_removed_from_story, exp_ids_added_to_story = (
apply_change_list(story_id, change_list))
story_is_published = is_story_published_and_present_in_topic(new_story)
exploration_context_models_to_be_deleted = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_removed_from_story))
exploration_context_models_to_be_deleted = [
model for model in exploration_context_models_to_be_deleted
if model is not None]
exploration_context_models_collisions_list = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_added_to_story))
for context_model in exploration_context_models_collisions_list:
if context_model is not None and context_model.story_id != story_id:
raise utils.ValidationError(
'The exploration with ID %s is already linked to story '
'with ID %s' % (context_model.id, context_model.story_id))
if (
old_story.url_fragment != new_story.url_fragment and
does_story_exist_with_url_fragment(new_story.url_fragment)):
raise utils.ValidationError(
'Story Url Fragment is not unique across the site.')
_save_story(
committer_id, new_story, commit_message, change_list,
story_is_published)
create_story_summary(new_story.id)
if story_is_published:
opportunity_services.update_exploration_opportunities(
old_story, new_story)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(
exp_ids_removed_from_story)
exp_models.ExplorationContextModel.delete_multi(
exploration_context_models_to_be_deleted)
new_exploration_context_models = [exp_models.ExplorationContextModel(
id=exp_id,
story_id=story_id
) for exp_id in exp_ids_added_to_story]
exp_models.ExplorationContextModel.update_timestamps_multi(
new_exploration_context_models)
exp_models.ExplorationContextModel.put_multi(new_exploration_context_models)
|
def update_story(
committer_id, story_id, change_list, commit_message):
"""Updates a story. Commits changes.
# NOTE: This function should not be called on its own. Access it
# through `topic_services.update_story_and_topic_summary`.
Args:
committer_id: str. The id of the user who is performing the update
action.
story_id: str. The story id.
change_list: list(StoryChange). These changes are applied in sequence to
produce the resulting story.
commit_message: str or None. A description of changes made to the
story.
Raises:
ValueError. Expected a commit message but received None.
ValidationError. Exploration is already linked to a different story.
ValidationError. Story Url Fragment is not unique across the site.
"""
if not commit_message:
raise ValueError('Expected a commit message but received none.')
old_story = story_fetchers.get_story_by_id(story_id)
new_story, exp_ids_removed_from_story, exp_ids_added_to_story = (
apply_change_list(story_id, change_list))
story_is_published = is_story_published_and_present_in_topic(new_story)
exploration_context_models_to_be_deleted = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_removed_from_story))
exploration_context_models_to_be_deleted = [
model for model in exploration_context_models_to_be_deleted
if model is not None]
exploration_context_models_collisions_list = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_added_to_story))
for context_model in exploration_context_models_collisions_list:
if context_model is not None and context_model.story_id != story_id:
raise utils.ValidationError(
'The exploration with ID %s is already linked to story '
'with ID %s' % (context_model.id, context_model.story_id))
if (
old_story.url_fragment != new_story.url_fragment and
does_story_exist_with_url_fragment(new_story.url_fragment)):
raise utils.ValidationError(
'Story Url Fragment is not unique across the site.')
_save_story(
committer_id, new_story, commit_message, change_list,
story_is_published)
create_story_summary(new_story.id)
if story_is_published:
opportunity_services.update_exploration_opportunities(
old_story, new_story)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(
exp_ids_removed_from_story)
exp_models.ExplorationContextModel.delete_multi(
exploration_context_models_to_be_deleted)
new_exploration_context_models = [exp_models.ExplorationContextModel(
id=exp_id,
story_id=story_id
) for exp_id in exp_ids_added_to_story]
exp_models.ExplorationContextModel.update_timestamps_multi(
new_exploration_context_models)
exp_models.ExplorationContextModel.put_multi(new_exploration_context_models)
|
27,523 |
def snake_and_shadow(name):
"""
Converts the given name into Pythonic form. Firstly it converts CamelCase names to snake_case. Secondly it looks to
see if the name matches a known built-in and if it does it appends an underscore to the name.
:param name: The parameter name
:type name: str
:return:
"""
snake = inflection.underscore(name)
all_reserved = []
all_reserved.extend(list(builtins.__dict__.keys()))
all_reserved.extend(keyword.kwlist)
if snake in all_reserved:
return f"{snake}_"
return snake
|
def snake_and_shadow(name):
"""
Converts the given name into Pythonic form. Firstly it converts CamelCase names to snake_case. Secondly it looks to
see if the name matches a known built-in and if it does it appends an underscore to the name.
:param name: The parameter name
:type name: str
:return:
"""
snake = inflection.underscore(name)
if snake in builtins.__dict__ or keyword.iskeyword(snake):
return f"{snake}_"
return snake
|
54,177 |
def remove_chrs_from_bam(bam, chrs, chrsz, nth=1, out_dir=''):
assert(len(chrs)>0)
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
suffix = 'no_{}'.format('_'.join(chrs))
final_bam = '{}.{}.bam'.format(prefix, suffix)
# tmp_bam = '{}.{}.tmp.bam'.format(prefix, suffix)
# tmp_header_sam = '{}.{}.header.tmp.sam'.format(prefix, suffix)
tmp_chrsz = '{}.{}.tmp.chrsz'.format(prefix, suffix)
# make a temp chrsz file
cmd0 = 'zcat -f {chrsz} |'
cmd0 += 'grep -v -P \'^({chrs})\\s\' | '
cmd0 += 'awk \'BEGIN{{OFS="\\t"}} {{print $1,0,$2}}\' > {tmp_chrsz}'
cmd0 = cmd0.format(
chrsz=chrsz,
chrs='|'.join(chrs),
tmp_chrsz=tmp_chrsz)
run_shell_cmd(cmd0)
# remove chrs from BAM
cmd1 = 'samtools view -b -L {tmp_chrsz} {bam} -@ {nth} > {final_bam}'
cmd1 = cmd1.format(
tmp_chrsz=tmp_chrsz,
bam=bam,
nth=nth,
final_bam=final_bam)
# tmp_bam=tmp_bam)
run_shell_cmd(cmd1)
rm_f(tmp_chrsz)
# # make a temp header
# cmd2 = 'samtools view -H {bam} > {tmp_header_sam}'
# cmd2 = cmd2.format(
# bam=bam,
# tmp_header_sam=tmp_header_sam)
# run_shell_cmd(cmd2)
# # update header
# cmd3 = 'samtools reheader {tmp_header_sam} {tmp_bam} > {final_bam}'
# cmd3 = cmd3.format(
# tmp_header_sam=tmp_header_sam,
# tmp_bam=tmp_bam,
# final_bam=final_bam)
# run_shell_cmd(cmd3)
# rm_f([tmp_bam, tmp_header_sam, tmp_chrsz])
return final_bam
|
def remove_chrs_from_bam(bam, chrs, chrsz, nth=1, out_dir=''):
assert(len(chrs)>0)
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
suffix = 'no_{}'.format('_'.join(chrs))
final_bam = '{}.{}.bam'.format(prefix, suffix)
# tmp_bam = '{}.{}.tmp.bam'.format(prefix, suffix)
# tmp_header_sam = '{}.{}.header.tmp.sam'.format(prefix, suffix)
tmp_chrsz = '{}.{}.tmp.chrsz'.format(prefix, suffix)
# make a temp chrsz file
cmd0 = 'zcat -f {chrsz} |'
cmd0 += 'grep -v -P \'^({chrs})\\s\' | '
cmd0s = [
'zcat -f {chrsz} |',
'grep -v -P \'^({chrs})\\s\' |',
'awk \'BEGIN{{OFS="\\t"}} {{print $1,0,$2}}\' > {tmp_chrsz}',
]
cmd0 = ' | '.join(cmd0s)
cmd0 = cmd0.format(
chrsz=chrsz,
chrs='|'.join(chrs),
tmp_chrsz=tmp_chrsz)
run_shell_cmd(cmd0)
# remove chrs from BAM
cmd1 = 'samtools view -b -L {tmp_chrsz} {bam} -@ {nth} > {final_bam}'
cmd1 = cmd1.format(
tmp_chrsz=tmp_chrsz,
bam=bam,
nth=nth,
final_bam=final_bam)
# tmp_bam=tmp_bam)
run_shell_cmd(cmd1)
rm_f(tmp_chrsz)
# # make a temp header
# cmd2 = 'samtools view -H {bam} > {tmp_header_sam}'
# cmd2 = cmd2.format(
# bam=bam,
# tmp_header_sam=tmp_header_sam)
# run_shell_cmd(cmd2)
# # update header
# cmd3 = 'samtools reheader {tmp_header_sam} {tmp_bam} > {final_bam}'
# cmd3 = cmd3.format(
# tmp_header_sam=tmp_header_sam,
# tmp_bam=tmp_bam,
# final_bam=final_bam)
# run_shell_cmd(cmd3)
# rm_f([tmp_bam, tmp_header_sam, tmp_chrsz])
return final_bam
|
30,893 |
def get_indicators(client, indicator_type: [str], last_run_id: Optional[bool] = None, limit: Optional[int] = None)\
-> Tuple[str, list]:
""" Retrieving indicators from the API
Args:
client: OpenCTI Client object.
indicator_type: List of indicators types to return.
last_run_id: The last id from the previous call to use pagination.
limit: the max indicators to fetch
Returns:
new_last_run: the id of the last indicator
indicators: list of indicators
"""
if 'all' in indicator_type:
indicator_type = ['user-account', 'domain', 'email-address', 'file-md5', 'file-sha1', 'file-sha256', 'hostname',
'ipv4-addr', 'ipv6-addr', 'registry-key-value', 'url']
observables = client.stix_observable.list(types=indicator_type, first=limit, after=last_run_id, withPagination=True)
new_last_run = observables.get('pagination').get('endCursor')
indicators = [
{
"value": item['observable_value'],
"type": XSOHR_TYPES.get(item['entity_type']),
"rawJSON": item,
"fields": {
"tags": [tag.get('value') for tag in item.get('tags')],
"description": item.get('description')
}
}for item in observables.get('entities')
]
return new_last_run, indicators
|
def get_indicators(client, indicator_type: [str], last_run_id: Optional[int] = None, limit: Optional[int] = None)\
-> Tuple[str, list]:
""" Retrieving indicators from the API
Args:
client: OpenCTI Client object.
indicator_type: List of indicators types to return.
last_run_id: The last id from the previous call to use pagination.
limit: the max indicators to fetch
Returns:
new_last_run: the id of the last indicator
indicators: list of indicators
"""
if 'all' in indicator_type:
indicator_type = ['user-account', 'domain', 'email-address', 'file-md5', 'file-sha1', 'file-sha256', 'hostname',
'ipv4-addr', 'ipv6-addr', 'registry-key-value', 'url']
observables = client.stix_observable.list(types=indicator_type, first=limit, after=last_run_id, withPagination=True)
new_last_run = observables.get('pagination').get('endCursor')
indicators = [
{
"value": item['observable_value'],
"type": XSOHR_TYPES.get(item['entity_type']),
"rawJSON": item,
"fields": {
"tags": [tag.get('value') for tag in item.get('tags')],
"description": item.get('description')
}
}for item in observables.get('entities')
]
return new_last_run, indicators
|
14,147 |
def _build_sig_attr_dict(signals, name_map=None):
if name_map is None:
name_map = {}
if isinstance(signals, dict):
return signals
else:
return {sig: (name_map[sig] if sig in name_map else sig)
for sig in signals}
|
def _build_sig_attr_dict(signals, name_map=None):
if name_map is None:
name_map = {}
if isinstance(signals, dict):
return signals
else:
return {sig: name_map.get(sig, sig)
for sig in signals}
|
43,682 |
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian:
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
33,706 |
def detect_reporter(func):
"""Use checkpointing if any arg has "reporter" and args = 2"""
func_sig = inspect.signature(func)
use_reporter = True
try:
func_sig.bind({}, reporter=None)
except Exception as e:
logger.debug(str(e))
use_reporter = False
return use_reporter
|
def detect_reporter(func):
"""Use reporter if any arg has "reporter" and args = 2"""
func_sig = inspect.signature(func)
use_reporter = True
try:
func_sig.bind({}, reporter=None)
except Exception as e:
logger.debug(str(e))
use_reporter = False
return use_reporter
|
37,451 |
def frames_configuration(frame_channels: List[List[PulseChannel]],
frame_frequencies: List[float],
sample_duration: float,
frame_indices: List[int] = None) -> Union[dict, None]:
"""
Ties together the frames of the backend and the frequencies of the frames.
Args:
frame_channels: A List of lists. Sublist i is a list of channel names
that frame i will broadcast on.
frame_frequencies: A list of starting frequencies for each frame.
sample_duration: time of a sample.
frame_indices: The indices of the frames. If None is given these will be
in ascending order starting from 0.
Returns:
frames_config: A dictionary with the frame index as key and the values are
a dict which can be used to initialized a ResolvedFrame.
Raises:
QiskitError: if the number of frame frequencies is not the same as the number
of frames, i.e. the length of frame_channels.
"""
if len(frame_frequencies) != len(frame_channels):
raise QiskitError(f'Number of frames {len(frame_channels)} is incompatible with '
f'the number of frame initial frequencies {len(frame_frequencies)}.')
frames_config = {}
for idx, channels in enumerate(frame_channels):
if frame_indices:
index = frame_indices[idx]
else:
index = idx
frames_config[index] = {
'frame': Frame(index),
'phase': 0.0,
'frequency': frame_frequencies[idx],
'channels': channels,
'sample_duration': sample_duration
}
return frames_config
|
def frames_configuration(frame_channels: List[List[PulseChannel]],
frame_frequencies: List[float],
dt: float,
frame_indices: List[int] = None) -> Union[dict, None]:
"""
Ties together the frames of the backend and the frequencies of the frames.
Args:
frame_channels: A List of lists. Sublist i is a list of channel names
that frame i will broadcast on.
frame_frequencies: A list of starting frequencies for each frame.
sample_duration: time of a sample.
frame_indices: The indices of the frames. If None is given these will be
in ascending order starting from 0.
Returns:
frames_config: A dictionary with the frame index as key and the values are
a dict which can be used to initialized a ResolvedFrame.
Raises:
QiskitError: if the number of frame frequencies is not the same as the number
of frames, i.e. the length of frame_channels.
"""
if len(frame_frequencies) != len(frame_channels):
raise QiskitError(f'Number of frames {len(frame_channels)} is incompatible with '
f'the number of frame initial frequencies {len(frame_frequencies)}.')
frames_config = {}
for idx, channels in enumerate(frame_channels):
if frame_indices:
index = frame_indices[idx]
else:
index = idx
frames_config[index] = {
'frame': Frame(index),
'phase': 0.0,
'frequency': frame_frequencies[idx],
'channels': channels,
'sample_duration': sample_duration
}
return frames_config
|
878 |
def _orbits_transversals_from_bsgs(base, strong_gens_distr,
transversals_only=False, slp=False):
"""
Compute basic orbits and transversals from a base and strong generating set.
Explanation
===========
The generators are provided as distributed across the basic stabilizers.
If the optional argument ``transversals_only`` is set to True, only the
transversals are returned.
Parameters
==========
``base`` - The base.
``strong_gens_distr`` - Strong generators distributed by membership in basic
stabilizers.
``transversals_only`` - bool
A flag switching between returning only the
transversals/ both orbits and transversals.
``slp`` -
If ``True``, return a list of dictionaries containing the
generator presentations of the elements of the transversals,
i.e. the list of indices of generators from `strong_gens_distr[i]`
such that their product is the relevant transversal element.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _distribute_gens_by_base
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> (S.base, strong_gens_distr)
([0, 1], [[(0 1 2), (2)(0 1), (1 2)], [(1 2)]])
See Also
========
_distribute_gens_by_base, _handle_precomputed_bsgs
"""
from sympy.combinatorics.perm_groups import _orbit_transversal
base_len = len(base)
degree = strong_gens_distr[0][0].size
transversals = [None]*base_len
slps = [None]*base_len
if transversals_only is False:
basic_orbits = [None]*base_len
for i in range(base_len):
transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],
base[i], pairs=True, slp=True)
transversals[i] = dict(transversals[i])
if transversals_only is False:
basic_orbits[i] = list(transversals[i].keys())
if transversals_only:
return transversals
else:
if not slp:
return basic_orbits, transversals
return basic_orbits, transversals, slps
|
def _orbits_transversals_from_bsgs(base, strong_gens_distr,
transversals_only=False, slp=False):
"""
Compute basic orbits and transversals from a base and strong generating set.
Explanation
===========
The generators are provided as distributed across the basic stabilizers.
If the optional argument ``transversals_only`` is set to True, only the
transversals are returned.
Parameters
==========
``base`` - The base.
``strong_gens_distr`` - Strong generators distributed by membership in basic
stabilizers.
``transversals_only`` - bool
A flag switching between returning only the
transversals and both orbits and transversals.
``slp`` -
If ``True``, return a list of dictionaries containing the
generator presentations of the elements of the transversals,
i.e. the list of indices of generators from `strong_gens_distr[i]`
such that their product is the relevant transversal element.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _distribute_gens_by_base
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> (S.base, strong_gens_distr)
([0, 1], [[(0 1 2), (2)(0 1), (1 2)], [(1 2)]])
See Also
========
_distribute_gens_by_base, _handle_precomputed_bsgs
"""
from sympy.combinatorics.perm_groups import _orbit_transversal
base_len = len(base)
degree = strong_gens_distr[0][0].size
transversals = [None]*base_len
slps = [None]*base_len
if transversals_only is False:
basic_orbits = [None]*base_len
for i in range(base_len):
transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],
base[i], pairs=True, slp=True)
transversals[i] = dict(transversals[i])
if transversals_only is False:
basic_orbits[i] = list(transversals[i].keys())
if transversals_only:
return transversals
else:
if not slp:
return basic_orbits, transversals
return basic_orbits, transversals, slps
|
38,794 |
def getallnodes(state='all', partitions=None):
rt = runtime.runtime()
nodes = {}
if partitions is None:
partitions = rt.system.partitions
for part in rt.system.partitions:
if part not in partitions:
continue
# This job will not be submitted, it's used only to filter
# the nodes based on the partition configuration
job = Job.create(part.scheduler,
part.launcher_type(),
name='placeholder-job',
sched_access=part.access,
sched_options=rt.jobs_cli_options)
available_nodes = part.scheduler.allnodes()
available_nodes = part.scheduler.filternodes(job, available_nodes)
getlogger().debug(
f'Total available nodes for {part.name}: {len(available_nodes)}'
)
if state.casefold() != 'all':
available_nodes = {n for n in available_nodes
if n.in_state(state)}
getlogger().debug(
f'[F] Selecting nodes in state {state!r}: '
f'available nodes now: {len(available_nodes)}'
)
nodes[part.fullname] = [n.name for n in available_nodes]
return nodes
|
def getallnodes(state='all', partitions=None):
rt = runtime.runtime()
nodes = {}
if partitions is None:
partitions = rt.system.partitions
for part in rt.system.partitions:
if part not in partitions:
continue
# This job will not be submitted, it's used only to filter
# the nodes based on the partition configuration
dummy_job = Job.create(part.scheduler,
part.launcher_type(),
name='placeholder-job',
sched_access=part.access,
sched_options=rt.jobs_cli_options)
available_nodes = part.scheduler.allnodes()
available_nodes = part.scheduler.filternodes(job, available_nodes)
getlogger().debug(
f'Total available nodes for {part.name}: {len(available_nodes)}'
)
if state.casefold() != 'all':
available_nodes = {n for n in available_nodes
if n.in_state(state)}
getlogger().debug(
f'[F] Selecting nodes in state {state!r}: '
f'available nodes now: {len(available_nodes)}'
)
nodes[part.fullname] = [n.name for n in available_nodes]
return nodes
|
2,981 |
def _validate_date_like_dtype(dtype) -> None:
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError(f"{e}")
if typ != "generic" and typ != "ns":
raise ValueError(
f"{repr(dtype.name)} is too specific of a frequency, "
f"try passing {repr(dtype.type.__name__)}"
)
|
def _validate_date_like_dtype(dtype) -> None:
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError(e)
if typ != "generic" and typ != "ns":
raise ValueError(
f"{repr(dtype.name)} is too specific of a frequency, "
f"try passing {repr(dtype.type.__name__)}"
)
|
13,516 |
def load(f, **options):
dbcImportEncoding = options.get("dbcImportEncoding", 'iso-8859-1')
dbcCommentEncoding = options.get("dbcImportCommentEncoding", dbcImportEncoding)
float_factory = options.get('float_factory', default_float_factory)
i = 0
class FollowUps(object):
nothing, signalComment, frameComment, boardUnitComment, globalComment = list(
range(5))
followUp = FollowUps.nothing
comment = ""
signal = None
frame = None
boardUnit = None
db = CanMatrix()
for line in f:
i = i + 1
l = line.strip()
if l.__len__() == 0:
continue
try:
if followUp == FollowUps.signalComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if signal is not None:
signal.addComment(comment[0:-2])
continue
elif followUp == FollowUps.frameComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if frame is not None:
frame.addComment(comment[0:-2])
continue
elif followUp == FollowUps.boardUnitComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if boardUnit is not None:
boardUnit.addComment(comment[0:-2])
continue
decoded = l.decode(dbcImportEncoding).strip()
if decoded.startswith("BO_ "):
regexp = re.compile(r"^BO_ ([^\ ]+) ([^\ ]+) *: ([^\ ]+) ([^\ ]+)")
temp = regexp.match(decoded)
# db.frames.addFrame(Frame(temp.group(1), temp.group(2), temp.group(3), temp.group(4)))
frame = Frame(temp.group(2), id=int(temp.group(1)), size=int(temp.group(3)), transmitters=temp.group(4).split())
db.frames.append(frame)
elif decoded.startswith("SG_ "):
pattern = r"^SG_ +(\w+) *: *(\d+)\|(\d+)@(\d+)([\+|\-]) +\(([0-9.+\-eE]+),([0-9.+\-eE]+)\) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(.*)\" +(.*)"
regexp = re.compile(pattern)
temp = regexp.match(decoded)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp_raw = regexp_raw.match(l)
if temp:
receiver = list(map(str.strip, [str(b) for b in temp.group(11).split(',')]))
extras = {}
# if float_factory is not None:
# extras['float_factory'] = float_factory
tempSig = Signal(
temp.group(1),
startBit=int(temp.group(2)),
size=int(temp.group(3)),
is_little_endian=(int(temp.group(4)) == 1),
is_signed=(temp.group(5) == '-'),
factor=temp.group(6),
offset=temp.group(7),
min=temp.group(8),
max=temp.group(9),
unit=temp_raw.group(10).decode(dbcImportEncoding),
receiver=receiver,
**extras
)
if not tempSig.is_little_endian:
# startbit of motorola coded signals are MSB in dbc
tempSig.setStartbit(int(temp.group(2)), bitNumbering=1)
frame.addSignal(tempSig)
# db.frames.addSignalToLastFrame(tempSig)
else:
pattern = r"^SG_ +(\w+) +(\w+) *: *(\d+)\|(\d+)@(\d+)([\+|\-]) +\(([0-9.+\-eE]+),([0-9.+\-eE]+)\) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(.*)\" +(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
receiverTemp = [str(b) for b in temp.group(12).split(',')]
receiver = list(map(str.strip, receiverTemp))
multiplex = temp.group(2)
is_complex_multiplexed = False
if multiplex == 'M':
multiplex = 'Multiplexor'
elif multiplex.endswith('M'):
is_complex_multiplexed = True
multiplex = multiplex[:-1]
if multiplex != 'Multiplexor':
try:
multiplex = int(multiplex[1:])
except:
raise Exception('error decoding line',line)
extras = {}
# if float_factory is not None:
# extras['float_factory'] = float_factory
tempSig = Signal(
temp.group(1),
startBit=int(temp.group(3)),
size=int(temp.group(4)),
is_little_endian=(int(temp.group(5)) == 1),
is_signed=(temp.group(6) == '-'),
factor=temp.group(7),
offset=temp.group(8),
min=temp.group(9),
max=temp.group(10),
unit=temp_raw.group(11).decode(dbcImportEncoding),
receiver=receiver,
multiplex=multiplex,
**extras
)
if is_complex_multiplexed:
tempSig.is_multiplexer = True
tempSig.multiplex = 'Multiplexor'
if not tempSig.is_little_endian:
# startbit of motorola coded signals are MSB in dbc
tempSig.setStartbit(int(temp.group(3)), bitNumbering=1)
frame.addSignal(tempSig)
if is_complex_multiplexed:
frame.is_complex_multiplexed = True
elif decoded.startswith("BO_TX_BU_ "):
regexp = re.compile(r"^BO_TX_BU_ ([0-9]+) *: *(.+);")
temp = regexp.match(decoded)
botschaft = db.frameById(temp.group(1))
for bu in temp.group(2).split(','):
botschaft.addTransmitter(bu)
elif decoded.startswith("CM_ SG_ "):
pattern = r"^CM_ +SG_ +(\w+) +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
botschaft = db.frameById(temp.group(1))
signal = botschaft.signalByName(temp.group(2))
if signal:
try:
signal.addComment(temp_raw.group(3).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +SG_ +(\w+) +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
botschaft = db.frameById(temp.group(1))
signal = botschaft.signalByName(temp.group(2))
try:
comment = temp_raw.group(3).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.signalComment
elif decoded.startswith("CM_ BO_ "):
pattern = r"^CM_ +BO_ +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frame = db.frameById(temp.group(1))
if frame:
try:
frame.addComment(temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +BO_ +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frame = db.frameById(temp.group(1))
try:
comment = temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.frameComment
elif decoded.startswith("CM_ BU_ "):
pattern = r"^CM_ +BU_ +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
boardUnit = db.boardUnitByName(temp.group(1))
if boardUnit:
try:
boardUnit.addComment(temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +BU_ +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
boardUnit = db.boardUnitByName(temp.group(1))
if boardUnit:
try:
comment = temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.boardUnitComment
elif decoded.startswith("BU_:"):
pattern = r"^BU_\:(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
if temp:
myTempListe = temp.group(1).split(' ')
for ele in myTempListe:
if len(ele.strip()) > 1:
db.boardUnits.append(BoardUnit(ele))
elif decoded.startswith("VAL_ "):
regexp = re.compile(r"^VAL_ +(\w+) +(\w+) +(.*);")
temp = regexp.match(decoded)
if temp:
botschaftId = temp.group(1)
signal = temp.group(2)
tempList = temp.group(3).split('"')
if botschaftId.isnumeric(): # value for Frame
try:
bo = db.frameById(botschaftId)
sg = bo.signalByName(signal)
for i in range(math.floor(len(tempList) / 2)):
val = tempList[i * 2 + 1]
if sg:
sg.addValues(tempList[i * 2], val)
except:
logger.error("Error with Line: " + str(tempList))
else:
logger.info("Warning: enviroment variables currently not supported")
elif decoded.startswith("VAL_TABLE_ "):
regexp = re.compile(r"^VAL_TABLE_ +(\w+) +(.*);")
temp = regexp.match(decoded)
if temp:
tableName = temp.group(1)
tempList = temp.group(2).split('"')
try:
valHash = {}
for i in range(math.floor(len(tempList) / 2)):
val = tempList[i * 2 + 1]
valHash[tempList[i * 2].strip()] = val.strip()
except:
logger.error("Error with Line: " + str(tempList))
db.addValueTable(tableName, valHash)
else:
logger.debug(l)
elif decoded.startswith("BA_DEF_ SG_ "):
pattern = r"^BA_DEF_ +SG_ +\"([A-Za-z0-9\-_]+)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addSignalDefines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_DEF_ BO_ "):
pattern = r"^BA_DEF_ +BO_ +\"([A-Za-z0-9\-_]+)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addFrameDefines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_DEF_ BU_ "):
pattern = r"^BA_DEF_ +BU_ +\"([A-Za-z0-9\-_]+)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addBUDefines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_DEF_ "):
pattern = r"^BA_DEF_ +\"([A-Za-z0-9\-_]+)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addGlobalDefines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_ "):
regexp = re.compile(r"^BA_ +\"[A-Za-z0-9[\-_ .]+\" +(.+)")
tempba = regexp.match(decoded)
if tempba.group(1).strip().startswith("BO_ "):
regexp = re.compile(r"^BA_ +\"(.*)\" +BO_ +(\w+) +(.+);")
temp = regexp.match(decoded)
db.frameById(int(temp.group(2))).addAttribute(
temp.group(1), temp.group(3))
elif tempba.group(1).strip().startswith("SG_ "):
regexp = re.compile(r"^BA_ +\"(.*)\" +SG_ +(\w+) +(\w+) +(.+);")
temp = regexp.match(decoded)
if temp!=None:
db.frameById(int(temp.group(2))).signalByName(
temp.group(3)).addAttribute(temp.group(1), temp.group(4))
elif tempba.group(1).strip().startswith("BU_ "):
regexp = re.compile(r"^BA_ +\"(.*)\" +BU_ +(\w+) +(.+);")
temp = regexp.match(decoded)
db.boardUnitByName(
temp.group(2)).addAttribute(
temp.group(1),
temp.group(3))
else:
regexp = re.compile(
r"^BA_ +\"([A-Za-z0-9\-_]+)\" +([\"A-Za-z0-9\-_\.]+);")
temp = regexp.match(decoded)
if temp:
db.addAttribute(temp.group(1), temp.group(2))
elif decoded.startswith("SIG_GROUP_ "):
regexp = re.compile(r"^SIG_GROUP_ +(\w+) +(\w+) +(\w+) +\:(.*);")
temp = regexp.match(decoded)
frame = db.frameById(temp.group(1))
if frame is not None:
signalArray = temp.group(4).split(' ')
frame.addSignalGroup(temp.group(2), temp.group(3), signalArray)
elif decoded.startswith("SIG_VALTYPE_ "):
regexp = re.compile(r"^SIG_VALTYPE_ +(\w+) +(\w+)\s*\:(.*);")
temp = regexp.match(decoded)
frame = db.frameById(temp.group(1))
if frame:
signal = frame.signalByName(temp.group(2))
signal.is_float = True
# SIG_VALTYPE_ 0 float : 1;
elif decoded.startswith("BA_DEF_DEF_ "):
pattern = r"^BA_DEF_DEF_ +\"([A-Za-z0-9\-_\.]+)\" +(.+)\;"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addDefineDefault(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("SG_MUL_VAL_ "):
pattern = r"^SG_MUL_VAL_ +([0-9]+) +([A-Za-z0-9\-_]+) +([A-Za-z0-9\-_]+) +([0-9]+)\-([0-9]+) *;"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frameId = temp.group(1)
signalName = temp.group(2)
muxerForSignal = temp.group(3)
muxValMin = int(temp.group(4))
muxValMax = int(temp.group(4))
frame = db.frameById(frameId)
if frame is not None:
signal = frame.signalByName(signalName)
frame.is_complex_multiplexed = True
signal.muxerForSignal = muxerForSignal
signal.muxValMin = muxValMin
signal.muxValMax = muxValMax
elif decoded.startswith("EV_ "):
pattern = r"^EV_ +([A-Za-z0-9\-_]+) *\: +([0-9]+) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(\w*)\" +([0-9.+\-eE]+) +([0-9.+\-eE]+) +([A-Za-z0-9\-_]+) +(.*);"
regexp = re.compile(pattern)
temp = regexp.match(decoded)
varName = temp.group(1)
varType = temp.group(2)
min = temp.group(3)
max = temp.group(4)
unit = temp.group(5)
initialValue = temp.group(6)
evId = temp.group(7)
accessType = temp.group(8)
accessNodes = temp.group(9).split(",")
db.addEnvVar( varName, {"varType": varType, "min" : min, "max" : max,
"unit" : unit, "initialValue" : initialValue, "evId" : evId,
"accessType" : accessType, "accessNodes" : accessNodes})
except:
print ("error with line no: %d" % i)
print (line)
# else:
# print("Unrecocniced line: " + l + " (%d) " % i)
# Backtracking
for frame in db.frames:
# receiver is only given in the signals, so do propagate the receiver
# to the frame:
frame.updateReceiver()
# extended-flag is implicite in canid, thus repair this:
if frame.id > 0x80000000:
frame.id -= 0x80000000
frame.extended = 1
if "VFrameFormat" in frame.attributes and "_FD" in frame.attributes["VFrameFormat"]:
frame.is_fd = True
for define in db.globalDefines:
if db.globalDefines[define].type == "STRING":
if define in db.attributes:
db.attributes[define] = db.attributes[define][1:-1]
for define in db.buDefines:
if db.buDefines[define].type == "STRING":
for ecu in db.boardUnits:
if define in ecu.attributes:
ecu.attributes[define] = ecu.attributes[define][1:-1]
for define in db.frameDefines:
if db.frameDefines[define].type == "STRING":
for frame in db.frames:
if define in frame.attributes:
frame.attributes[define] = frame.attributes[define][1:-1]
for define in db.signalDefines:
if db.signalDefines[define].type == "STRING":
for frame in db.frames:
for signal in frame.signals:
if define in signal.attributes:
signal.attributes[define] = signal.attributes[define][1:-1]
db.EnumAttribs2Values()
db.updateEcuList()
db.delEcu("Vector__XXX")
return db
|
def load(f, **options):
dbcImportEncoding = options.get("dbcImportEncoding", 'iso-8859-1')
dbcCommentEncoding = options.get("dbcImportCommentEncoding", dbcImportEncoding)
float_factory = options.get('float_factory', default_float_factory)
i = 0
class FollowUps(object):
nothing, signalComment, frameComment, boardUnitComment, globalComment = list(
range(5))
followUp = FollowUps.nothing
comment = ""
signal = None
frame = None
boardUnit = None
db = CanMatrix()
for line in f:
i = i + 1
l = line.strip()
if l.__len__() == 0:
continue
try:
if followUp == FollowUps.signalComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if signal is not None:
signal.addComment(comment[0:-2])
continue
elif followUp == FollowUps.frameComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if frame is not None:
frame.addComment(comment[0:-2])
continue
elif followUp == FollowUps.boardUnitComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if boardUnit is not None:
boardUnit.addComment(comment[0:-2])
continue
decoded = l.decode(dbcImportEncoding).strip()
if decoded.startswith("BO_ "):
regexp = re.compile(r"^BO_ ([^\ ]+) ([^\ ]+) *: ([^\ ]+) ([^\ ]+)")
temp = regexp.match(decoded)
# db.frames.addFrame(Frame(temp.group(1), temp.group(2), temp.group(3), temp.group(4)))
frame = Frame(temp.group(2), id=int(temp.group(1)), size=int(temp.group(3)), transmitters=temp.group(4).split())
db.frames.append(frame)
elif decoded.startswith("SG_ "):
pattern = r"^SG_ +(\w+) *: *(\d+)\|(\d+)@(\d+)([\+|\-]) +\(([0-9.+\-eE]+),([0-9.+\-eE]+)\) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(.*)\" +(.*)"
regexp = re.compile(pattern)
temp = regexp.match(decoded)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp_raw = regexp_raw.match(l)
if temp:
receiver = list(map(str.strip, [str(b) for b in temp.group(11).split(',')]))
extras = {}
# if float_factory is not None:
# extras['float_factory'] = float_factory
tempSig = Signal(
temp.group(1),
startBit=int(temp.group(2)),
size=int(temp.group(3)),
is_little_endian=(int(temp.group(4)) == 1),
is_signed=(temp.group(5) == '-'),
factor=temp.group(6),
offset=temp.group(7),
min=temp.group(8),
max=temp.group(9),
unit=temp_raw.group(10).decode(dbcImportEncoding),
receiver=receiver,
**extras
)
if not tempSig.is_little_endian:
# startbit of motorola coded signals are MSB in dbc
tempSig.setStartbit(int(temp.group(2)), bitNumbering=1)
frame.addSignal(tempSig)
# db.frames.addSignalToLastFrame(tempSig)
else:
pattern = r"^SG_ +(\w+) +(\w+) *: *(\d+)\|(\d+)@(\d+)([\+|\-]) +\(([0-9.+\-eE]+),([0-9.+\-eE]+)\) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(.*)\" +(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
receiver = [b.strip() for b in temp.group(12).split(',')]
receiver = list(map(str.strip, receiverTemp))
multiplex = temp.group(2)
is_complex_multiplexed = False
if multiplex == 'M':
multiplex = 'Multiplexor'
elif multiplex.endswith('M'):
is_complex_multiplexed = True
multiplex = multiplex[:-1]
if multiplex != 'Multiplexor':
try:
multiplex = int(multiplex[1:])
except:
raise Exception('error decoding line',line)
extras = {}
# if float_factory is not None:
# extras['float_factory'] = float_factory
tempSig = Signal(
temp.group(1),
startBit=int(temp.group(3)),
size=int(temp.group(4)),
is_little_endian=(int(temp.group(5)) == 1),
is_signed=(temp.group(6) == '-'),
factor=temp.group(7),
offset=temp.group(8),
min=temp.group(9),
max=temp.group(10),
unit=temp_raw.group(11).decode(dbcImportEncoding),
receiver=receiver,
multiplex=multiplex,
**extras
)
if is_complex_multiplexed:
tempSig.is_multiplexer = True
tempSig.multiplex = 'Multiplexor'
if not tempSig.is_little_endian:
# startbit of motorola coded signals are MSB in dbc
tempSig.setStartbit(int(temp.group(3)), bitNumbering=1)
frame.addSignal(tempSig)
if is_complex_multiplexed:
frame.is_complex_multiplexed = True
elif decoded.startswith("BO_TX_BU_ "):
regexp = re.compile(r"^BO_TX_BU_ ([0-9]+) *: *(.+);")
temp = regexp.match(decoded)
botschaft = db.frameById(temp.group(1))
for bu in temp.group(2).split(','):
botschaft.addTransmitter(bu)
elif decoded.startswith("CM_ SG_ "):
pattern = r"^CM_ +SG_ +(\w+) +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
botschaft = db.frameById(temp.group(1))
signal = botschaft.signalByName(temp.group(2))
if signal:
try:
signal.addComment(temp_raw.group(3).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +SG_ +(\w+) +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
botschaft = db.frameById(temp.group(1))
signal = botschaft.signalByName(temp.group(2))
try:
comment = temp_raw.group(3).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.signalComment
elif decoded.startswith("CM_ BO_ "):
pattern = r"^CM_ +BO_ +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frame = db.frameById(temp.group(1))
if frame:
try:
frame.addComment(temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +BO_ +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frame = db.frameById(temp.group(1))
try:
comment = temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.frameComment
elif decoded.startswith("CM_ BU_ "):
pattern = r"^CM_ +BU_ +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
boardUnit = db.boardUnitByName(temp.group(1))
if boardUnit:
try:
boardUnit.addComment(temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +BU_ +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
boardUnit = db.boardUnitByName(temp.group(1))
if boardUnit:
try:
comment = temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.boardUnitComment
elif decoded.startswith("BU_:"):
pattern = r"^BU_\:(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
if temp:
myTempListe = temp.group(1).split(' ')
for ele in myTempListe:
if len(ele.strip()) > 1:
db.boardUnits.append(BoardUnit(ele))
elif decoded.startswith("VAL_ "):
regexp = re.compile(r"^VAL_ +(\w+) +(\w+) +(.*);")
temp = regexp.match(decoded)
if temp:
botschaftId = temp.group(1)
signal = temp.group(2)
tempList = temp.group(3).split('"')
if botschaftId.isnumeric(): # value for Frame
try:
bo = db.frameById(botschaftId)
sg = bo.signalByName(signal)
for i in range(math.floor(len(tempList) / 2)):
val = tempList[i * 2 + 1]
if sg:
sg.addValues(tempList[i * 2], val)
except:
logger.error("Error with Line: " + str(tempList))
else:
logger.info("Warning: enviroment variables currently not supported")
elif decoded.startswith("VAL_TABLE_ "):
regexp = re.compile(r"^VAL_TABLE_ +(\w+) +(.*);")
temp = regexp.match(decoded)
if temp:
tableName = temp.group(1)
tempList = temp.group(2).split('"')
try:
valHash = {}
for i in range(math.floor(len(tempList) / 2)):
val = tempList[i * 2 + 1]
valHash[tempList[i * 2].strip()] = val.strip()
except:
logger.error("Error with Line: " + str(tempList))
db.addValueTable(tableName, valHash)
else:
logger.debug(l)
elif decoded.startswith("BA_DEF_ SG_ "):
pattern = r"^BA_DEF_ +SG_ +\"([A-Za-z0-9\-_]+)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addSignalDefines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_DEF_ BO_ "):
pattern = r"^BA_DEF_ +BO_ +\"([A-Za-z0-9\-_]+)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addFrameDefines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_DEF_ BU_ "):
pattern = r"^BA_DEF_ +BU_ +\"([A-Za-z0-9\-_]+)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addBUDefines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_DEF_ "):
pattern = r"^BA_DEF_ +\"([A-Za-z0-9\-_]+)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addGlobalDefines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_ "):
regexp = re.compile(r"^BA_ +\"[A-Za-z0-9[\-_ .]+\" +(.+)")
tempba = regexp.match(decoded)
if tempba.group(1).strip().startswith("BO_ "):
regexp = re.compile(r"^BA_ +\"(.*)\" +BO_ +(\w+) +(.+);")
temp = regexp.match(decoded)
db.frameById(int(temp.group(2))).addAttribute(
temp.group(1), temp.group(3))
elif tempba.group(1).strip().startswith("SG_ "):
regexp = re.compile(r"^BA_ +\"(.*)\" +SG_ +(\w+) +(\w+) +(.+);")
temp = regexp.match(decoded)
if temp!=None:
db.frameById(int(temp.group(2))).signalByName(
temp.group(3)).addAttribute(temp.group(1), temp.group(4))
elif tempba.group(1).strip().startswith("BU_ "):
regexp = re.compile(r"^BA_ +\"(.*)\" +BU_ +(\w+) +(.+);")
temp = regexp.match(decoded)
db.boardUnitByName(
temp.group(2)).addAttribute(
temp.group(1),
temp.group(3))
else:
regexp = re.compile(
r"^BA_ +\"([A-Za-z0-9\-_]+)\" +([\"A-Za-z0-9\-_\.]+);")
temp = regexp.match(decoded)
if temp:
db.addAttribute(temp.group(1), temp.group(2))
elif decoded.startswith("SIG_GROUP_ "):
regexp = re.compile(r"^SIG_GROUP_ +(\w+) +(\w+) +(\w+) +\:(.*);")
temp = regexp.match(decoded)
frame = db.frameById(temp.group(1))
if frame is not None:
signalArray = temp.group(4).split(' ')
frame.addSignalGroup(temp.group(2), temp.group(3), signalArray)
elif decoded.startswith("SIG_VALTYPE_ "):
regexp = re.compile(r"^SIG_VALTYPE_ +(\w+) +(\w+)\s*\:(.*);")
temp = regexp.match(decoded)
frame = db.frameById(temp.group(1))
if frame:
signal = frame.signalByName(temp.group(2))
signal.is_float = True
# SIG_VALTYPE_ 0 float : 1;
elif decoded.startswith("BA_DEF_DEF_ "):
pattern = r"^BA_DEF_DEF_ +\"([A-Za-z0-9\-_\.]+)\" +(.+)\;"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.addDefineDefault(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("SG_MUL_VAL_ "):
pattern = r"^SG_MUL_VAL_ +([0-9]+) +([A-Za-z0-9\-_]+) +([A-Za-z0-9\-_]+) +([0-9]+)\-([0-9]+) *;"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frameId = temp.group(1)
signalName = temp.group(2)
muxerForSignal = temp.group(3)
muxValMin = int(temp.group(4))
muxValMax = int(temp.group(4))
frame = db.frameById(frameId)
if frame is not None:
signal = frame.signalByName(signalName)
frame.is_complex_multiplexed = True
signal.muxerForSignal = muxerForSignal
signal.muxValMin = muxValMin
signal.muxValMax = muxValMax
elif decoded.startswith("EV_ "):
pattern = r"^EV_ +([A-Za-z0-9\-_]+) *\: +([0-9]+) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(\w*)\" +([0-9.+\-eE]+) +([0-9.+\-eE]+) +([A-Za-z0-9\-_]+) +(.*);"
regexp = re.compile(pattern)
temp = regexp.match(decoded)
varName = temp.group(1)
varType = temp.group(2)
min = temp.group(3)
max = temp.group(4)
unit = temp.group(5)
initialValue = temp.group(6)
evId = temp.group(7)
accessType = temp.group(8)
accessNodes = temp.group(9).split(",")
db.addEnvVar( varName, {"varType": varType, "min" : min, "max" : max,
"unit" : unit, "initialValue" : initialValue, "evId" : evId,
"accessType" : accessType, "accessNodes" : accessNodes})
except:
print ("error with line no: %d" % i)
print (line)
# else:
# print("Unrecocniced line: " + l + " (%d) " % i)
# Backtracking
for frame in db.frames:
# receiver is only given in the signals, so do propagate the receiver
# to the frame:
frame.updateReceiver()
# extended-flag is implicite in canid, thus repair this:
if frame.id > 0x80000000:
frame.id -= 0x80000000
frame.extended = 1
if "VFrameFormat" in frame.attributes and "_FD" in frame.attributes["VFrameFormat"]:
frame.is_fd = True
for define in db.globalDefines:
if db.globalDefines[define].type == "STRING":
if define in db.attributes:
db.attributes[define] = db.attributes[define][1:-1]
for define in db.buDefines:
if db.buDefines[define].type == "STRING":
for ecu in db.boardUnits:
if define in ecu.attributes:
ecu.attributes[define] = ecu.attributes[define][1:-1]
for define in db.frameDefines:
if db.frameDefines[define].type == "STRING":
for frame in db.frames:
if define in frame.attributes:
frame.attributes[define] = frame.attributes[define][1:-1]
for define in db.signalDefines:
if db.signalDefines[define].type == "STRING":
for frame in db.frames:
for signal in frame.signals:
if define in signal.attributes:
signal.attributes[define] = signal.attributes[define][1:-1]
db.EnumAttribs2Values()
db.updateEcuList()
db.delEcu("Vector__XXX")
return db
|
52,454 |
def test_ngram_sizes(en_tokenizer):
# test that the range suggester works well
size_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2, 3])
range_suggester = registry.misc.get("spacy.ngram_range_suggester.v1")(
min_size=1, max_size=3
)
docs = [
en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"]
]
ngrams_1 = size_suggester(docs)
ngrams_2 = range_suggester(docs)
assert_equal(ngrams_1.lengths, [1, 3, 6, 9, 12])
assert_equal(ngrams_1.lengths, ngrams_2.lengths)
assert_equal(ngrams_1.data, ngrams_2.data)
# one more variation
range_suggester = registry.misc.get("spacy.ngram_range_suggester.v1")(
min_size=2, max_size=4
)
ngrams_3 = range_suggester(docs)
assert_equal(ngrams_3.lengths, [0, 1, 3, 6, 9])
|
def test_ngram_sizes(en_tokenizer):
# test that the range suggester works well
size_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2, 3])
suggester_factory = registry.misc.get("spacy.ngram_range_suggester.v1")
range_suggester = suggester_factory(min_size=1, max_size=3)
docs = [
en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"]
]
ngrams_1 = size_suggester(docs)
ngrams_2 = range_suggester(docs)
assert_equal(ngrams_1.lengths, [1, 3, 6, 9, 12])
assert_equal(ngrams_1.lengths, ngrams_2.lengths)
assert_equal(ngrams_1.data, ngrams_2.data)
# one more variation
range_suggester = registry.misc.get("spacy.ngram_range_suggester.v1")(
min_size=2, max_size=4
)
ngrams_3 = range_suggester(docs)
assert_equal(ngrams_3.lengths, [0, 1, 3, 6, 9])
|
55,724 |
def imread(filename: str):
"""custom imaplementation of imread to avoid skimage dependecy"""
ext = os.path.splitext(filename)[1]
if ext in [".tif", "tiff", ".lsm"]:
import tifffile
image = tifffile.imread(filename)
else:
import imageio
image = imageio.imread(filename)
if not hasattr(image, 'ndim'):
return image
if image.ndim > 2:
if image.shape[-1] not in (3, 4) and image.shape[-3] in (3, 4):
image = np.swapaxes(image, -1, -3)
image = np.swapaxes(image, -2, -3)
return image
|
def imread(filename: str):
"""Custom implementation of imread to avoid skimage dependency.
Parameters
----------
filename : string
The path from which to read the image.
Returns
-------
data : np.ndarray
The image data.
"""
ext = os.path.splitext(filename)[1]
if ext in [".tif", "tiff", ".lsm"]:
import tifffile
image = tifffile.imread(filename)
else:
import imageio
image = imageio.imread(filename)
if not hasattr(image, 'ndim'):
return image
if image.ndim > 2:
if image.shape[-1] not in (3, 4) and image.shape[-3] in (3, 4):
image = np.swapaxes(image, -1, -3)
image = np.swapaxes(image, -2, -3)
return image
|
33,502 |
def delete_and_await_stack(**kwargs):
cloudformation = aws_stack.connect_to_service("cloudformation")
response = cloudformation.delete_stack(**kwargs)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
result = await_stack_completion(kwargs["StackName"])
return result
|
def delete_and_await_stack(**kwargs):
cloudformation = aws_stack.connect_to_service("cloudformation")
response = cloudformation.delete_stack(**kwargs)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
return await_stack_completion(kwargs["StackName"])
|
38,512 |
def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray:
"""
For a given grid compute the star shape center for each cell.
The algorithm computes the half space intersections, by using the above method
half_space_pt,
of the spaces defined by the cell faces and the face normals.
This is a wrapper method that operate on a grid.
Parameters
----------
g: pp.Grid
the grid
as_nan: bool, optional
Decide whether, in case some cells are not star-shaped return nan as
new center. Otherwise an exception is raised (default behaviour).
Returns
-------
np.ndarray
The new cell centers.
"""
# no need for 1d or 0d grids
if g.dim < 2:
return g.cell_centers
# retrieve the faces and nodes
faces, _, sgn = sps.find(g.cell_faces)
nodes, _, _ = sps.find(g.face_nodes)
# shift the nodes close to the origin, to avoid numerical problems when coordinates are
# too big
xn = g.nodes.copy()
xn_shift = np.average(xn, axis=1)
xn -= np.tile(xn_shift, (xn.shape[1], 1)).T
# compute the star shape cell centers by constructing the half spaces of each cell
# given by its faces and related normals
cell_centers = np.zeros((3, g.num_cells))
for c in np.arange(g.num_cells):
loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
loc_n = g.face_nodes.indptr[faces_loc]
# make the normals coherent
normal = np.multiply(
sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc])
)
x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]]
coords = np.concatenate((x0, x1), axis=1)
# compute a point in the half space intersection of all cell faces
try:
cell_centers[:, c] = pp.half_space.half_space_interior_point(
normal, (x1 + x0) / 2.0, coords
)
except ValueError:
# the cell is not star-shaped
if as_nan:
cell_centers[:, c] = np.array([np.nan, np.nan, np.nan])
else:
raise ValueError(
"Cell not star-shaped impossible to compute the centre"
)
# shift back the computed cell centers and return them
return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
|
def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray:
"""
For a given grid compute the star shape center for each cell.
The algorithm computes the half space intersections, by using the above method
half_space_pt,
of the spaces defined by the cell faces and the face normals.
This is a wrapper method that operates on a grid.
Parameters
----------
g: pp.Grid
the grid
as_nan: bool, optional
Decide whether, in case some cells are not star-shaped return nan as
new center. Otherwise an exception is raised (default behaviour).
Returns
-------
np.ndarray
The new cell centers.
"""
# no need for 1d or 0d grids
if g.dim < 2:
return g.cell_centers
# retrieve the faces and nodes
faces, _, sgn = sps.find(g.cell_faces)
nodes, _, _ = sps.find(g.face_nodes)
# shift the nodes close to the origin, to avoid numerical problems when coordinates are
# too big
xn = g.nodes.copy()
xn_shift = np.average(xn, axis=1)
xn -= np.tile(xn_shift, (xn.shape[1], 1)).T
# compute the star shape cell centers by constructing the half spaces of each cell
# given by its faces and related normals
cell_centers = np.zeros((3, g.num_cells))
for c in np.arange(g.num_cells):
loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
loc_n = g.face_nodes.indptr[faces_loc]
# make the normals coherent
normal = np.multiply(
sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc])
)
x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]]
coords = np.concatenate((x0, x1), axis=1)
# compute a point in the half space intersection of all cell faces
try:
cell_centers[:, c] = pp.half_space.half_space_interior_point(
normal, (x1 + x0) / 2.0, coords
)
except ValueError:
# the cell is not star-shaped
if as_nan:
cell_centers[:, c] = np.array([np.nan, np.nan, np.nan])
else:
raise ValueError(
"Cell not star-shaped impossible to compute the centre"
)
# shift back the computed cell centers and return them
return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
|
28,555 |
def plot_bpv(
data,
kind="u_value",
t_stat="median",
bpv=True,
mean=True,
reference="samples",
n_ref=100,
hdi_prob=0.94,
color="C0",
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
ax=None,
backend=None,
plot_ref_kwargs=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior/prior predictive data.
kind : str
Type of plot to display (u_value, p_value, t_stat). Defaults to u_value.
t_stat : str, float, or callable
T statistics to compute from the observations and predictive distributions. Allowed strings
are "mean", "median" or "std". Defaults to "median". Alternative a quantile can be passed
as a float (or str) in the interval (0, 1). Finally a user defined function is also
acepted, see examples section for details.
bpv : bool
If True (default) add the bayesian p_value to the legend when kind = t_stat.
mean : bool
Whether or not to plot the mean T statistic. Defaults to True.
reference : str
How to compute the distributions used as reference for u_values or p_values. Allowed values
are "analytical" (default) and "samples". Use `None` to do not plot any reference.
n_ref : int, optional
Number of reference distributions to sample when `reference=samples`
hdi_prob: float, optional
Probability for the highest density interval for the analytical reference distribution when
computing u_values. Should be in the interval (0, 1]. Defaults to
0.94.
color : str
Matplotlib color
figsize : tuple
Figure size. If None it will be defined automatically.
textsize : float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names : list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the variables by `~`
when you want to exclude them from the plot.
filter_vars : {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
legend : bool
Add legend to figure. By default True.
ax : numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend : str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
plot_ref_kwargs : dict, optional
Extra keyword arguments to control how reference is represented. Passed to `plt.plot` or
`plt.axhspan`(when `kind=u_value` and `reference=analytical`).
backend_kwargs : bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
Plot Bayesian p_values.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="p_value")
Plot custom t statistic comparison.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="t_stat", t_stat=lambda x:np.percentile(x, q=50, axis=-1))
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in ("{}_predictive".format(group), "observed_data"):
if not hasattr(data, groups):
raise TypeError('`data` argument must have the group "{group}"'.format(group=groups))
if kind.lower() not in ("t_stat", "u_value", "p_value"):
raise TypeError("`kind` argument must be either `t_stat`, `u_value`, or `p_value`")
if reference is not None:
if reference.lower() not in ("analytical", "samples"):
raise TypeError(
"`reference` argument must be either `analytical`, `samples`, or `None`"
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_t_stats",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, _, _, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
if plot_ref_kwargs is None:
plot_ref_kwargs = {}
if kind == "p_value" and reference == "analytical":
plot_ref_kwargs.setdefault("color", "k")
plot_ref_kwargs.setdefault("linestyle", "--")
else:
plot_ref_kwargs.setdefault("alpha", 0.1)
plot_ref_kwargs.setdefault("color", color)
if backend == "bokeh":
color = to_hex(color)
plot_ref_kwargs.pop("color")
if kind == "p_value" and reference == "analytical":
plot_ref_kwargs.pop("linestyle")
plot_ref_kwargs.setdefault("line_dash", "dashed")
plot_ref_kwargs.setdefault("color", "black")
else:
plot_ref_kwargs.setdefault("color", color)
bpvplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
total_pp_samples=total_pp_samples,
kind=kind,
bpv=bpv,
t_stat=t_stat,
reference=reference,
n_ref=n_ref,
hdi_prob=hdi_prob,
mean=mean,
color=color,
figsize=figsize,
ax_labelsize=ax_labelsize,
markersize=markersize,
linewidth=linewidth,
plot_ref_kwargs=plot_ref_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_bpv", "bpvplot", backend)
axes = plot(**bpvplot_kwargs)
return axes
|
def plot_bpv(
data,
kind="u_value",
t_stat="median",
bpv=True,
mean=True,
reference="samples",
n_ref=100,
hdi_prob=0.94,
color="C0",
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
ax=None,
backend=None,
plot_ref_kwargs=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior/prior predictive data.
kind : str
Type of plot to display (u_value, p_value, t_stat). Defaults to u_value.
t_stat : str, float, or callable
T statistics to compute from the observations and predictive distributions. Allowed strings
are "mean", "median" or "std". Defaults to "median". Alternative a quantile can be passed
as a float (or str) in the interval (0, 1). Finally a user defined function is also
acepted, see examples section for details.
bpv : bool
If True (default) add the bayesian p_value to the legend when kind = t_stat.
mean : bool
Whether or not to plot the mean T statistic. Defaults to True.
reference : str
How to compute the distributions used as reference for u_values or p_values. Allowed values
are "analytical" (default) and "samples". Use `None` to do not plot any reference.
n_ref : int, optional
Number of reference distributions to sample when `reference=samples`. Defaults to 100.
hdi_prob: float, optional
Probability for the highest density interval for the analytical reference distribution when
computing u_values. Should be in the interval (0, 1]. Defaults to
0.94.
color : str
Matplotlib color
figsize : tuple
Figure size. If None it will be defined automatically.
textsize : float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names : list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the variables by `~`
when you want to exclude them from the plot.
filter_vars : {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
legend : bool
Add legend to figure. By default True.
ax : numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend : str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
plot_ref_kwargs : dict, optional
Extra keyword arguments to control how reference is represented. Passed to `plt.plot` or
`plt.axhspan`(when `kind=u_value` and `reference=analytical`).
backend_kwargs : bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
Plot Bayesian p_values.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="p_value")
Plot custom t statistic comparison.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="t_stat", t_stat=lambda x:np.percentile(x, q=50, axis=-1))
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in ("{}_predictive".format(group), "observed_data"):
if not hasattr(data, groups):
raise TypeError('`data` argument must have the group "{group}"'.format(group=groups))
if kind.lower() not in ("t_stat", "u_value", "p_value"):
raise TypeError("`kind` argument must be either `t_stat`, `u_value`, or `p_value`")
if reference is not None:
if reference.lower() not in ("analytical", "samples"):
raise TypeError(
"`reference` argument must be either `analytical`, `samples`, or `None`"
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_t_stats",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, _, _, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
if plot_ref_kwargs is None:
plot_ref_kwargs = {}
if kind == "p_value" and reference == "analytical":
plot_ref_kwargs.setdefault("color", "k")
plot_ref_kwargs.setdefault("linestyle", "--")
else:
plot_ref_kwargs.setdefault("alpha", 0.1)
plot_ref_kwargs.setdefault("color", color)
if backend == "bokeh":
color = to_hex(color)
plot_ref_kwargs.pop("color")
if kind == "p_value" and reference == "analytical":
plot_ref_kwargs.pop("linestyle")
plot_ref_kwargs.setdefault("line_dash", "dashed")
plot_ref_kwargs.setdefault("color", "black")
else:
plot_ref_kwargs.setdefault("color", color)
bpvplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
total_pp_samples=total_pp_samples,
kind=kind,
bpv=bpv,
t_stat=t_stat,
reference=reference,
n_ref=n_ref,
hdi_prob=hdi_prob,
mean=mean,
color=color,
figsize=figsize,
ax_labelsize=ax_labelsize,
markersize=markersize,
linewidth=linewidth,
plot_ref_kwargs=plot_ref_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_bpv", "bpvplot", backend)
axes = plot(**bpvplot_kwargs)
return axes
|
3,407 |
def _fetch_tags_or_values_per_ids(
projects: Sequence[Project],
metric_names: Optional[Sequence[str]],
referrer: str,
column: str = "tags.key",
) -> Tuple[Sequence[Union[Tag, TagValue]], Optional[str]]:
"""
Function that takes as input projects, metric_names, and a column, and based on the column
selection, either returns tags or tag values for the combination of projects and metric_names
selected or in the case of no metric_names passed, returns basically all the tags or the tag
values available for those projects. In addition, when exactly one metric name is passed in
metric_names, then the type (i.e. mapping to the entity) is also returned
"""
try:
metric_ids = _get_metrics_filter_ids(metric_names)
except MetricDoesNotExistInIndexer:
raise InvalidParams(
f"Some or all of the metric names in {metric_names} do not exist in the indexer"
)
else:
where = [Condition(Column("metric_id"), Op.IN, list(metric_ids))] if metric_ids else []
tag_or_value_ids_per_metric_id = defaultdict(list)
# This dictionary is required as a mapping from an entity to the ids available in it to
# validate that constituent metrics of a SingleEntityDerivedMetric actually span a single
# entity by validating that the ids of the constituent metrics all lie in the same entity
supported_metric_ids_in_entities = {}
for metric_type in ("counter", "set", "distribution"):
entity_key = METRIC_TYPE_TO_ENTITY[metric_type]
rows = run_metrics_query(
entity_key=entity_key,
select=[Column("metric_id"), Column(column)],
where=where,
groupby=[Column("metric_id"), Column(column)],
referrer=referrer,
projects=projects,
org_id=projects[0].organization_id,
)
for row in rows:
metric_id = row["metric_id"]
if column.startswith("tags["):
value_id = row[column]
if value_id > 0:
tag_or_value_ids_per_metric_id[metric_id].append(value_id)
else:
tag_or_value_ids_per_metric_id[metric_id].extend(row[column])
supported_metric_ids_in_entities.setdefault(metric_type, []).append(row["metric_id"])
# If we get not results back from snuba, then just return an empty set
if not tag_or_value_ids_per_metric_id:
if metric_names:
error_str = f"The following metrics {metric_names} do not exist in the dataset"
else:
error_str = "Dataset contains no metric data for your project selection"
raise InvalidParams(error_str)
tag_or_value_id_lists = tag_or_value_ids_per_metric_id.values()
if metric_names:
# If there are metric_ids that map to the metric_names provided as an arg that were not
# found in the dataset, then we raise an instance of InvalidParams exception
if metric_ids != set(tag_or_value_ids_per_metric_id.keys()):
# This can occur for metric names that don't have an equivalent in the dataset.
raise InvalidParams(
f"Not all the requested metrics or the constituent metrics in {metric_names} have "
f"data in the dataset"
)
# At this point, we are sure that every metric_name/metric_id that was requested is
# present in the dataset, and now we need to check that for all derived metrics requested
# (if any are requested) are setup correctly i.e. constituent of
# SingularEntityDerivedMetric actually span a single entity
_validate_requested_derived_metrics_in_input_metrics(
metric_names=metric_names,
supported_metric_ids_in_entities=supported_metric_ids_in_entities,
)
# Only return tags/tag values that occur in all metrics
tag_or_value_ids = set.intersection(*map(set, tag_or_value_id_lists))
else:
tag_or_value_ids = {tag_id for ids in tag_or_value_id_lists for tag_id in ids}
if column.startswith("tags["):
tag_id = column.split("tags[")[1].split("]")[0]
tags_or_values = [
{"key": reverse_resolve(int(tag_id)), "value": reverse_resolve(value_id)}
for value_id in tag_or_value_ids
]
tags_or_values.sort(key=lambda tag: (tag["key"], tag["value"]))
else:
tags_or_values = [{"key": reverse_resolve(tag_id)} for tag_id in tag_or_value_ids]
tags_or_values.sort(key=itemgetter("key"))
if metric_names and len(metric_names) == 1:
metric_type = list(supported_metric_ids_in_entities.keys())[0]
return tags_or_values, metric_type
return tags_or_values, None
|
def _fetch_tags_or_values_per_ids(
projects: Sequence[Project],
metric_names: Optional[Sequence[str]],
referrer: str,
column: str = "tags.key",
) -> Tuple[Union[Sequence[Tag], Sequence[TagValue]], Optional[str]]:
"""
Function that takes as input projects, metric_names, and a column, and based on the column
selection, either returns tags or tag values for the combination of projects and metric_names
selected or in the case of no metric_names passed, returns basically all the tags or the tag
values available for those projects. In addition, when exactly one metric name is passed in
metric_names, then the type (i.e. mapping to the entity) is also returned
"""
try:
metric_ids = _get_metrics_filter_ids(metric_names)
except MetricDoesNotExistInIndexer:
raise InvalidParams(
f"Some or all of the metric names in {metric_names} do not exist in the indexer"
)
else:
where = [Condition(Column("metric_id"), Op.IN, list(metric_ids))] if metric_ids else []
tag_or_value_ids_per_metric_id = defaultdict(list)
# This dictionary is required as a mapping from an entity to the ids available in it to
# validate that constituent metrics of a SingleEntityDerivedMetric actually span a single
# entity by validating that the ids of the constituent metrics all lie in the same entity
supported_metric_ids_in_entities = {}
for metric_type in ("counter", "set", "distribution"):
entity_key = METRIC_TYPE_TO_ENTITY[metric_type]
rows = run_metrics_query(
entity_key=entity_key,
select=[Column("metric_id"), Column(column)],
where=where,
groupby=[Column("metric_id"), Column(column)],
referrer=referrer,
projects=projects,
org_id=projects[0].organization_id,
)
for row in rows:
metric_id = row["metric_id"]
if column.startswith("tags["):
value_id = row[column]
if value_id > 0:
tag_or_value_ids_per_metric_id[metric_id].append(value_id)
else:
tag_or_value_ids_per_metric_id[metric_id].extend(row[column])
supported_metric_ids_in_entities.setdefault(metric_type, []).append(row["metric_id"])
# If we get not results back from snuba, then just return an empty set
if not tag_or_value_ids_per_metric_id:
if metric_names:
error_str = f"The following metrics {metric_names} do not exist in the dataset"
else:
error_str = "Dataset contains no metric data for your project selection"
raise InvalidParams(error_str)
tag_or_value_id_lists = tag_or_value_ids_per_metric_id.values()
if metric_names:
# If there are metric_ids that map to the metric_names provided as an arg that were not
# found in the dataset, then we raise an instance of InvalidParams exception
if metric_ids != set(tag_or_value_ids_per_metric_id.keys()):
# This can occur for metric names that don't have an equivalent in the dataset.
raise InvalidParams(
f"Not all the requested metrics or the constituent metrics in {metric_names} have "
f"data in the dataset"
)
# At this point, we are sure that every metric_name/metric_id that was requested is
# present in the dataset, and now we need to check that for all derived metrics requested
# (if any are requested) are setup correctly i.e. constituent of
# SingularEntityDerivedMetric actually span a single entity
_validate_requested_derived_metrics_in_input_metrics(
metric_names=metric_names,
supported_metric_ids_in_entities=supported_metric_ids_in_entities,
)
# Only return tags/tag values that occur in all metrics
tag_or_value_ids = set.intersection(*map(set, tag_or_value_id_lists))
else:
tag_or_value_ids = {tag_id for ids in tag_or_value_id_lists for tag_id in ids}
if column.startswith("tags["):
tag_id = column.split("tags[")[1].split("]")[0]
tags_or_values = [
{"key": reverse_resolve(int(tag_id)), "value": reverse_resolve(value_id)}
for value_id in tag_or_value_ids
]
tags_or_values.sort(key=lambda tag: (tag["key"], tag["value"]))
else:
tags_or_values = [{"key": reverse_resolve(tag_id)} for tag_id in tag_or_value_ids]
tags_or_values.sort(key=itemgetter("key"))
if metric_names and len(metric_names) == 1:
metric_type = list(supported_metric_ids_in_entities.keys())[0]
return tags_or_values, metric_type
return tags_or_values, None
|
17,297 |
def _async_get_imported_entries_indices(
current_entries: list[ConfigEntry],
) -> tuple[dict[str, ConfigEntry], dict[int, ConfigEntry]]:
"""Return a dict of the entries by name."""
# For backwards compat, its possible the first bridge is using the default
# name.
entries_by_name: dict[str, ConfigEntry] = {}
entries_by_port: dict[int, ConfigEntry] = {}
for entry in current_entries:
if entry.source != SOURCE_IMPORT:
continue
entries_by_name[entry.data.get(CONF_NAME, BRIDGE_NAME)] = entry
entries_by_port[entry.data.get(CONF_PORT, DEFAULT_PORT)] = entry
return entries_by_name, entries_by_port
|
def _async_get_imported_entries_indices(
current_entries: list[ConfigEntry],
) -> tuple[dict[str, ConfigEntry], dict[int, ConfigEntry]]:
"""Return a dicts of the entries by name and port."""
# For backwards compat, its possible the first bridge is using the default
# name.
entries_by_name: dict[str, ConfigEntry] = {}
entries_by_port: dict[int, ConfigEntry] = {}
for entry in current_entries:
if entry.source != SOURCE_IMPORT:
continue
entries_by_name[entry.data.get(CONF_NAME, BRIDGE_NAME)] = entry
entries_by_port[entry.data.get(CONF_PORT, DEFAULT_PORT)] = entry
return entries_by_name, entries_by_port
|
5,839 |
def directionalmean(samples, axis=0, nan_policy='propagate'):
"""
Computes the directional mean of a sample of vectors.
Serves as equivalent of the sample mean for directional data whose
magnitude is irrelevant, e. g. unit vectors.
Parameters
----------
samples : array_like
Input array. Must at least be two-dimensional.
axis : int, optional
Axis along which directional means are computed. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
directionalmean : ndarray
Directional mean.
Notes
-----
This uses a definition of directional mean from [1]_.
Essentially, the calculation is as follows.
.. code-block:: python
mean=samples.mean()
directionalmean = mean/np.linalg.norm(mean)
References
----------
.. [1] Mardia, Jupp. (2000). *Directional Statistics*
(p. 163). Wiley.
Examples
--------
>>> data = np.array([[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0.]])
>>> directionalmean(data)
array([1., 0., 0.])
The `regular`sample mean in contrast does not lie on the unit sphere.
>>> data.mean(axis=0)
array([0.8660254, 0., 0.])
"""
samples = np.asarray(samples)
if samples.ndim < 2:
raise ValueError("samples must at least be two-dimensional. "
"Instead samples has shape: %r." % samples.shape)
contains_nan, nan_policy = _contains_nan(samples, nan_policy)
if contains_nan and nan_policy == 'omit':
mean = np.nanmean(samples, axis = axis)
else:
mean = np.mean(samples, axis = axis)
directional_mean = mean/np.linalg.norm(mean)
return directional_mean
|
def directionalmean(samples, axis=0, nan_policy='propagate'):
"""
Computes the directional mean of a sample of vectors.
Serves as equivalent of the sample mean for directional data whose
magnitude is irrelevant, e. g. unit vectors.
Parameters
----------
samples : array_like
Input array. Must at least be two-dimensional.
axis : int, optional
Axis along which directional means are computed. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
directionalmean : ndarray
Directional mean.
Notes
-----
This uses a definition of directional mean from [1]_.
Essentially, the calculation is as follows.
.. code-block:: python
mean=samples.mean()
directionalmean = mean/np.linalg.norm(mean)
References
----------
.. [1] Mardia, Jupp. (2000). *Directional Statistics*
(p. 163). Wiley.
Examples
--------
>>> data = np.array([[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0.]])
>>> directionalmean(data)
array([1., 0., 0.])
The `regular`sample mean in contrast does not lie on the unit sphere.
>>> data.mean(axis=0)
array([0.8660254, 0., 0.])
"""
samples = np.asarray(samples)
if samples.ndim < 2:
raise ValueError("samples must at least be two-dimensional. "
"Instead samples has shape: %r." % samples.shape)
contains_nan, nan_policy = _contains_nan(samples, nan_policy)
if contains_nan and nan_policy == 'omit':
mean = np.nanmean(samples, axis = axis)
else:
mean = np.mean(samples, axis = axis)
directional_mean = mean / np.linalg.norm(mean)
return directional_mean
|
18,860 |
def test_internal_config_cache():
config = spack.config.Configuration()
config.push_scope(spack.config.InternalConfigScope('internal', {
'config': {
'build_jobs': 10
}
}))
config.clear_caches()
assert config.get('config:build_jobs') == 10
|
def test_internal_config_scope_cache_clearing():
config = spack.config.Configuration()
config.push_scope(spack.config.InternalConfigScope('internal', {
'config': {
'build_jobs': 10
}
}))
config.clear_caches()
assert config.get('config:build_jobs') == 10
|
27,952 |
def perform_build_command(logfile, command, context, keep_link, silent=False):
"""
Build the project and create a log file.
"""
LOG.info("Starting build ...")
try:
original_env_file = os.environ['CODECHECKER_ORIGINAL_BUILD_ENV']
LOG.debug_analyzer('Loading original build env from: %s',
original_env_file)
with open(original_env_file, 'rb') as env_file:
original_env = pickle.load(env_file)
# The env is loaded we do not need it anymore.
# Remove temporary directory.
try:
tmp_dir, _ = os.path.split(original_env_file)
shutil.rmtree(tmp_dir)
except OSError as ex:
if ex.errno != errno.ENOENT:
LOG.warning('Failed to remove temporary directory: %s '
'Manual cleanup is required.', tmp_dir)
except Exception as ex:
LOG.warning(str(ex))
LOG.warning('Failed to get saved original_env'
'using a current copy for logging.')
original_env = os.environ.copy()
# Run user's commands with intercept.
if host_check.check_intercept(original_env):
LOG.debug_analyzer("with intercept ...")
final_command = command
command = ' '.join(["intercept-build",
"--cdb", logfile,
"sh -c \"" + final_command + "\""])
log_env = original_env
LOG.debug_analyzer(command)
# Run user's commands in shell.
else:
# TODO: better platform detection.
if platform.system() == 'Linux':
LOG.debug_analyzer("with ld logger ...")
open(logfile, 'a').close() # Same as linux's touch.
log_env = env.get_log_env(logfile, context, original_env)
if 'CC_LOGGER_GCC_LIKE' not in log_env:
log_env['CC_LOGGER_GCC_LIKE'] = 'gcc:g++:clang:clang++:cc:c++'
if keep_link or ('CC_LOGGER_KEEP_LINK' in log_env and
log_env['CC_LOGGER_KEEP_LINK'] == 'true'):
log_env['CC_LOGGER_KEEP_LINK'] = 'true'
else:
LOG.error("Intercept-build is required"
" to run CodeChecker in OS X.")
sys.exit(1)
LOG.debug_analyzer(log_env)
try:
ret_code = execute_buildcmd(command, silent, log_env)
if ret_code == 0:
LOG.info("Build finished successfully.")
LOG.debug_analyzer("The logfile is: %s", logfile)
else:
LOG.info("Build failed.")
sys.exit(ret_code)
except Exception as ex:
LOG.error("Calling original build command failed.")
LOG.error(str(ex))
sys.exit(1)
finally:
# Removing flock lock file.
logfile_lock = logfile + '.lock'
if os.path.exists(logfile_lock):
os.remove(logfile_lock)
|
def perform_build_command(logfile, command, context, keep_link, silent=False):
"""
Build the project and create a log file.
"""
LOG.info("Starting build ...")
try:
original_env_file = os.environ['CODECHECKER_ORIGINAL_BUILD_ENV']
LOG.debug_analyzer('Loading original build env from: %s',
original_env_file)
with open(original_env_file, 'rb') as env_file:
original_env = pickle.load(env_file)
# The env is loaded we do not need it anymore.
# Remove temporary directory.
try:
tmp_dir, _ = os.path.split(original_env_file)
shutil.rmtree(tmp_dir)
except OSError as ex:
if ex.errno != errno.ENOENT:
LOG.warning('Failed to remove temporary directory: %s. '
'Manual cleanup is required.', tmp_dir)
except Exception as ex:
LOG.warning(str(ex))
LOG.warning('Failed to get saved original_env'
'using a current copy for logging.')
original_env = os.environ.copy()
# Run user's commands with intercept.
if host_check.check_intercept(original_env):
LOG.debug_analyzer("with intercept ...")
final_command = command
command = ' '.join(["intercept-build",
"--cdb", logfile,
"sh -c \"" + final_command + "\""])
log_env = original_env
LOG.debug_analyzer(command)
# Run user's commands in shell.
else:
# TODO: better platform detection.
if platform.system() == 'Linux':
LOG.debug_analyzer("with ld logger ...")
open(logfile, 'a').close() # Same as linux's touch.
log_env = env.get_log_env(logfile, context, original_env)
if 'CC_LOGGER_GCC_LIKE' not in log_env:
log_env['CC_LOGGER_GCC_LIKE'] = 'gcc:g++:clang:clang++:cc:c++'
if keep_link or ('CC_LOGGER_KEEP_LINK' in log_env and
log_env['CC_LOGGER_KEEP_LINK'] == 'true'):
log_env['CC_LOGGER_KEEP_LINK'] = 'true'
else:
LOG.error("Intercept-build is required"
" to run CodeChecker in OS X.")
sys.exit(1)
LOG.debug_analyzer(log_env)
try:
ret_code = execute_buildcmd(command, silent, log_env)
if ret_code == 0:
LOG.info("Build finished successfully.")
LOG.debug_analyzer("The logfile is: %s", logfile)
else:
LOG.info("Build failed.")
sys.exit(ret_code)
except Exception as ex:
LOG.error("Calling original build command failed.")
LOG.error(str(ex))
sys.exit(1)
finally:
# Removing flock lock file.
logfile_lock = logfile + '.lock'
if os.path.exists(logfile_lock):
os.remove(logfile_lock)
|
39,987 |
def _use_external_script_type(client) -> bool:
if client.features.model == "1" and client.version > (1, 10, 5):
return True
if client.features.model == "T" and client.version > (2, 4, 3):
return True
return False
|
def _use_external_script_type(client) -> bool:
if client.features.model == "1" and client.version >= (1, 11, 1):
return True
if client.features.model == "T" and client.version > (2, 4, 3):
return True
return False
|
31,220 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
fetch_type = demisto.params().get('fetch_type')
# get the service API url
base_url = urljoin(demisto.params()['url'], demisto.params()['api_version'])
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'gh-get-message':
return_results(gh_get_message_command(client, demisto.args()))
elif demisto.command() == 'gh-get-policy':
return_results(gh_get_policy_command(client, demisto.args()))
elif demisto.command() == 'gh-set-policy':
return_results(gh_set_policy_command(client, demisto.args()))
elif demisto.command() == 'gh-remediate-message':
return_results(gh_remediate_message_command(client, demisto.args()))
elif demisto.command() == 'gh-revert-remediate-message':
return_results(gh_revert_remediate_message_command(client, demisto.args()))
elif demisto.command() == 'gh-search-message':
return_results(gh_search_message_command(client, demisto.args()))
elif demisto.command() == 'fetch-incidents':
if fetch_type is not None:
last_run = demisto.getLastRun()
demisto.info("GOT LAST RUN: {}".format(last_run))
if not last_run.get("counter"):
counter = 0
else:
counter = int(last_run.get("counter"))
if counter % 3 == 0:
if fetch_type == "phishing":
incidents = gh_get_phish_reports_command(client)
elif fetch_type == "quarantine":
incidents = gh_get_quarantine_release_command(client)
else:
incidents_phish = gh_get_phish_reports_command(client)
incidents_quarantine = gh_get_quarantine_release_command(client)
incidents = incidents_phish
incidents.extend(incidents_quarantine)
demisto.incidents(incidents)
counter += 1
demisto.setLastRun({'max_phish_id': str(counter)})
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main():
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
fetch_type = demisto.params().get('fetch_type')
# get the service API url
base_url = urljoin(demisto.params()['url'], demisto.params()['api_version'])
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'gh-get-message':
return_results(gh_get_message_command(client, demisto.args()))
elif demisto.command() == 'gh-get-policy':
return_results(gh_get_policy_command(client, demisto.args()))
elif demisto.command() == 'gh-set-policy':
return_results(gh_set_policy_command(client, demisto.args()))
elif demisto.command() == 'gh-remediate-message':
return_results(gh_remediate_message_command(client, demisto.args()))
elif demisto.command() == 'gh-revert-remediate-message':
return_results(gh_revert_remediate_message_command(client, demisto.args()))
elif demisto.command() == 'gh-search-message':
return_results(gh_search_message_command(client, demisto.args()))
elif demisto.command() == 'fetch-incidents':
if fetch_type is not None:
last_run = demisto.getLastRun()
demisto.info("GOT LAST RUN: {}".format(last_run))
if not last_run.get("counter"):
counter = 0
else:
counter = int(last_run.get("counter"))
if counter % 3 == 0:
if fetch_type == "phishing":
incidents = gh_get_phish_reports_command(client)
elif fetch_type == "quarantine":
incidents = gh_get_quarantine_release_command(client)
else:
incidents_phish = gh_get_phish_reports_command(client)
incidents_quarantine = gh_get_quarantine_release_command(client)
incidents = incidents_phish
incidents.extend(incidents_quarantine)
demisto.incidents(incidents)
counter += 1
demisto.setLastRun({'max_phish_id': str(counter)})
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
31,795 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'])
verify_certificate = not demisto.params().get('insecure', False)
feature_mapping = demisto.params().get('feature_mapping', None)
proxy = demisto.params().get('proxy', False)
default_job_id = demisto.params().get('default_job_id', -1)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
api_key=api_key,
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
default_job_id=default_job_id
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result_test = test_module(client, feature_mapping)
return_results(result_test)
elif demisto.command() == "arcanna-get-jobs":
result_get_jobs = get_jobs(client)
return_results(result_get_jobs)
elif demisto.command() == "arcanna-send-event":
result_send_event = post_event(client, demisto.args())
return_results(result_send_event)
elif demisto.command() == "arcanna-get-event-status":
result_get_event = get_event_status(client, demisto.args())
return_results(result_get_event)
elif demisto.command() == "arcanna-get-default-job-id":
result_get_default_id = get_default_job_id(client)
return_results(result_get_default_id)
elif demisto.command() == "arcanna-set-default-job-id":
result_set_default_id = set_default_job_id(client, demisto.args())
return_results(result_set_default_id)
elif demisto.command() == "arcanna-send-event-feedback":
result_send_feedback = send_event_feedback(client, feature_mapping, demisto.args())
return_results(result_send_feedback)
elif demisto.command() == "arcanna-send-bulk-events":
result_bulk = send_bulk_events(client, feature_mapping, demisto.args())
return_results(result_bulk)
elif demisto.command() == "arcanna-get-feedback-field":
result_feedback_field = get_feedback_field(demisto.params())
return_results(result_feedback_field)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'])
verify_certificate = not demisto.params().get('insecure', False)
feature_mapping = demisto.params().get('feature_mapping')
proxy = demisto.params().get('proxy', False)
default_job_id = demisto.params().get('default_job_id', -1)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
api_key=api_key,
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
default_job_id=default_job_id
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result_test = test_module(client, feature_mapping)
return_results(result_test)
elif demisto.command() == "arcanna-get-jobs":
result_get_jobs = get_jobs(client)
return_results(result_get_jobs)
elif demisto.command() == "arcanna-send-event":
result_send_event = post_event(client, demisto.args())
return_results(result_send_event)
elif demisto.command() == "arcanna-get-event-status":
result_get_event = get_event_status(client, demisto.args())
return_results(result_get_event)
elif demisto.command() == "arcanna-get-default-job-id":
result_get_default_id = get_default_job_id(client)
return_results(result_get_default_id)
elif demisto.command() == "arcanna-set-default-job-id":
result_set_default_id = set_default_job_id(client, demisto.args())
return_results(result_set_default_id)
elif demisto.command() == "arcanna-send-event-feedback":
result_send_feedback = send_event_feedback(client, feature_mapping, demisto.args())
return_results(result_send_feedback)
elif demisto.command() == "arcanna-send-bulk-events":
result_bulk = send_bulk_events(client, feature_mapping, demisto.args())
return_results(result_bulk)
elif demisto.command() == "arcanna-get-feedback-field":
result_feedback_field = get_feedback_field(demisto.params())
return_results(result_feedback_field)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
31,732 |
def get_employee_information_command(client, args):
email_address = str(args.get('email_address', ''))
response = client.get_employee_information_request(email_address)
command_results = CommandResults(
outputs_prefix='AbnormalSecurity.EmployeeDetails',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
|
def get_employee_information_command(client, args):
email_address = str(args.get('email_address', ''))
response = client.get_employee_information_request(email_address)
command_results = CommandResults(
outputs_prefix='AbnormalSecurity.Employee',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
|
30,993 |
def check_for_mirrors():
"""
Checks for newly created mirrors and handles the mirroring process
"""
integration_context = get_integration_context(SYNC_CONTEXT)
if integration_context.get('mirrors'):
mirrors = json.loads(integration_context['mirrors'])
updated_mirrors = []
updated_users = []
for mirror in mirrors:
if not mirror['mirrored']:
investigation_id = mirror['investigation_id']
demisto.info(f'Mirroring: {investigation_id}')
mirror = mirrors.pop(mirrors.index(mirror))
if mirror['mirror_to'] and mirror['mirror_direction'] and mirror['mirror_type']:
mirror_type = mirror['mirror_type']
auto_close = mirror['auto_close']
direction = mirror['mirror_direction']
channel_id = mirror['channel_id']
if isinstance(auto_close, str):
auto_close = bool(strtobool(auto_close))
users: List[Dict] = demisto.mirrorInvestigation(investigation_id,
f'{mirror_type}:{direction}', auto_close)
if mirror_type != 'none':
try:
invited_users = invite_to_mirrored_channel(channel_id, users)
updated_users.extend(invited_users)
except Exception as error:
demisto.error(f"There was an error while invite to mirror channel: {error}")
mirror['mirrored'] = True
updated_mirrors.append(mirror)
else:
demisto.info(f'Could not mirror {investigation_id}')
if updated_mirrors:
context = {'mirrors': updated_mirrors}
if updated_users:
context['users'] = updated_users
set_to_integration_context_with_retries(context, OBJECTS_TO_KEYS, SYNC_CONTEXT)
|
def check_for_mirrors():
"""
Checks for newly created mirrors and handles the mirroring process
"""
integration_context = get_integration_context(SYNC_CONTEXT)
if integration_context.get('mirrors'):
mirrors = json.loads(integration_context['mirrors'])
updated_mirrors = []
updated_users = []
for mirror in mirrors:
if not mirror['mirrored']:
investigation_id = mirror['investigation_id']
demisto.info(f'Mirroring: {investigation_id}')
mirror = mirrors.pop(mirrors.index(mirror))
if mirror['mirror_to'] and mirror['mirror_direction'] and mirror['mirror_type']:
mirror_type = mirror['mirror_type']
auto_close = mirror['auto_close']
direction = mirror['mirror_direction']
channel_id = mirror['channel_id']
if isinstance(auto_close, str):
auto_close = bool(strtobool(auto_close))
users: List[Dict] = demisto.mirrorInvestigation(investigation_id,
f'{mirror_type}:{direction}', auto_close)
if mirror_type != 'none':
try:
invited_users = invite_to_mirrored_channel(channel_id, users)
updated_users.extend(invited_users)
except Exception as error:
demisto.error(f"Could not invite investigation users to the mirrored channel: {error}")
mirror['mirrored'] = True
updated_mirrors.append(mirror)
else:
demisto.info(f'Could not mirror {investigation_id}')
if updated_mirrors:
context = {'mirrors': updated_mirrors}
if updated_users:
context['users'] = updated_users
set_to_integration_context_with_retries(context, OBJECTS_TO_KEYS, SYNC_CONTEXT)
|
39,617 |
def _compile_build_meta(build_lib, version, pg_config, runstate_dir,
shared_dir, version_suffix):
from edb.common import verutils
parsed_version = verutils.parse_version(version)
vertuple = list(parsed_version._asdict().values())
vertuple[2] = int(vertuple[2])
if version_suffix:
vertuple[4] = tuple(version_suffix.split('.'))
vertuple = tuple(vertuple)
pg_config_path = pathlib.Path(pg_config)
if not pg_config_path.is_absolute():
pg_config_path = f"_ROOT / {str(pg_config_path)!r}"
else:
pg_config_path = repr(str(pg_config_path))
runstate_dir_path = pathlib.Path(runstate_dir)
if not runstate_dir_path.is_absolute():
runstate_dir_path = f"_ROOT / {str(runstate_dir_path)!r}"
else:
runstate_dir_path = repr(str(runstate_dir_path))
shared_dir_path = pathlib.Path(shared_dir)
if not shared_dir_path.is_absolute():
shared_dir_path = f"_ROOT / {str(shared_dir_path)!r}"
else:
shared_dir_path = repr(str(shared_dir_path))
content = textwrap.dedent('''\
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# THIS FILE HAS BEEN AUTOMATICALLY GENERATED.
#
import pathlib
_ROOT = pathlib.Path(__file__).parent
PG_CONFIG_PATH = {pg_config_path}
RUNSTATE_DIR = {runstate_dir_path}
SHARED_DATA_DIR = {shared_dir_path}
VERSION = {version!r}
''').format(
version=vertuple,
pg_config_path=pg_config_path,
runstate_dir_path=runstate_dir_path,
shared_dir_path=shared_dir_path,
)
directory = build_lib / 'edb'
if not directory.exists():
directory.mkdir(parents=True)
with open(directory / '_buildmeta.py', 'w+t') as f:
f.write(content)
|
def _compile_build_meta(build_lib, version, pg_config, runstate_dir,
shared_dir, version_suffix):
from edb.common import verutils
parsed_version = verutils.parse_version(version)
vertuple = list(parsed_version._asdict().values())
vertuple[2] = int(vertuple[2])
if version_suffix:
vertuple[4] = tuple(version_suffix.split('.'))
vertuple = tuple(vertuple)
pg_config_path = pathlib.Path(pg_config)
if not pg_config_path.is_absolute():
pg_config_path = f"_ROOT / {str(pg_config_path)!r}"
else:
pg_config_path = repr(str(pg_config_path))
runstate_dir_path = pathlib.Path(runstate_dir)
if not runstate_dir_path.is_absolute():
runstate_dir_path = f"{_ROOT} / {str(runstate_dir_path)!r}"
else:
runstate_dir_path = repr(str(runstate_dir_path))
shared_dir_path = pathlib.Path(shared_dir)
if not shared_dir_path.is_absolute():
shared_dir_path = f"_ROOT / {str(shared_dir_path)!r}"
else:
shared_dir_path = repr(str(shared_dir_path))
content = textwrap.dedent('''\
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# THIS FILE HAS BEEN AUTOMATICALLY GENERATED.
#
import pathlib
_ROOT = pathlib.Path(__file__).parent
PG_CONFIG_PATH = {pg_config_path}
RUNSTATE_DIR = {runstate_dir_path}
SHARED_DATA_DIR = {shared_dir_path}
VERSION = {version!r}
''').format(
version=vertuple,
pg_config_path=pg_config_path,
runstate_dir_path=runstate_dir_path,
shared_dir_path=shared_dir_path,
)
directory = build_lib / 'edb'
if not directory.exists():
directory.mkdir(parents=True)
with open(directory / '_buildmeta.py', 'w+t') as f:
f.write(content)
|
23,649 |
def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
|
def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models ``'haydavies'``, ``'reindl'``, or ``'perez'`` require ``dni_extra``. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
|
59,430 |
def schedule(circuits: Union[QuantumCircuit, List[QuantumCircuit]],
backend: Optional[BaseBackend] = None,
inst_map: Optional[InstructionScheduleMap] = None,
meas_map: Optional[List[List[int]]] = None,
method: Optional[Union[str, List[str]]] = None) -> Union[Schedule, List[Schedule]]:
"""
Schedule a circuit to a pulse ``Schedule``, using the backend, according to any specified
methods. Supported methods are documented in :py:mod:`qiskit.scheduler.schedule_circuit`.
Args:
circuits: The quantum circuit or circuits to translate
backend: A backend instance, which contains hardware-specific data required for scheduling
inst_map: Mapping of circuit operations to pulse schedules. If ``None``, defaults to the
``backend``\'s ``instruction_schedule_map``
meas_map: List of sets of qubits that must be measured together. If ``None``, defaults to
the ``backend``\'s ``meas_map``
method: Optionally specify a particular scheduling method
Returns:
A pulse ``Schedule`` that implements the input circuit
Raises:
QiskitError: If ``inst_map`` and ``meas_map`` are not passed and ``backend`` is not passed
"""
start_time = time()
if inst_map is None:
if backend is None:
raise QiskitError("Must supply either a backend or InstructionScheduleMap for "
"scheduling passes.")
if backend.defaults() is None:
raise QiskitError("The backend does not support pulse.")
inst_map = backend.defaults().instruction_schedule_map
if meas_map is None:
if backend is None:
raise QiskitError("Must supply either a backend or a meas_map for scheduling passes.")
meas_map = backend.configuration().meas_map
schedule_config = ScheduleConfig(inst_map=inst_map, meas_map=meas_map)
circuits = circuits if isinstance(circuits, list) else [circuits]
schedules = [schedule_circuit(circuit, schedule_config, method) for circuit in circuits]
end_time = time()
_log_schedule_time(start_time, end_time)
return schedules[0] if len(schedules) == 1 else schedules
|
def schedule(circuits: Union[QuantumCircuit, List[QuantumCircuit]],
backend: Optional[BaseBackend] = None,
inst_map: Optional[InstructionScheduleMap] = None,
meas_map: Optional[List[List[int]]] = None,
method: Optional[Union[str, List[str]]] = None) -> Union[Schedule, List[Schedule]]:
"""
Schedule a circuit to a pulse ``Schedule``, using the backend, according to any specified
methods. Supported methods are documented in :py:mod:`qiskit.scheduler.schedule_circuit`.
Args:
circuits: The quantum circuit or circuits to translate
backend: A backend instance, which contains hardware-specific data required for scheduling
inst_map: Mapping of circuit operations to pulse schedules. If ``None``, defaults to the
``backend``\'s ``instruction_schedule_map``
meas_map: List of sets of qubits that must be measured together. If ``None``, defaults to
the ``backend``\'s ``meas_map``
method: Optionally specify a particular scheduling method
Returns:
A pulse ``Schedule`` that implements the input circuit
Raises:
QiskitError: If ``inst_map`` and ``meas_map`` are not passed and ``backend`` is not passed
"""
start_time = time()
if inst_map is None:
if backend is None:
raise QiskitError("Must supply either a backend or InstructionScheduleMap for "
"scheduling passes.")
defaults = backend.defaults()
if defaults is None:
raise QiskitError("The backend defaults are unavailable. The backend may not support pulse.")
inst_map = defaults.instruction_schedule_map
if meas_map is None:
if backend is None:
raise QiskitError("Must supply either a backend or a meas_map for scheduling passes.")
meas_map = backend.configuration().meas_map
schedule_config = ScheduleConfig(inst_map=inst_map, meas_map=meas_map)
circuits = circuits if isinstance(circuits, list) else [circuits]
schedules = [schedule_circuit(circuit, schedule_config, method) for circuit in circuits]
end_time = time()
_log_schedule_time(start_time, end_time)
return schedules[0] if len(schedules) == 1 else schedules
|
13,299 |
def _project_docs(db, project_name=None, show_empty_releases=False):
queries = [Release.yanked.is_(False)]
if not show_empty_releases:
queries.append(Release.files)
releases_list = (
db.query(Release.id)
.filter(*queries)
.order_by(
Release.project_id,
Release.is_prerelease.nullslast(),
Release._pypi_ordering.desc(),
)
.distinct(Release.project_id)
)
if project_name:
releases_list = releases_list.join(Project).filter(Project.name == project_name)
releases_list = releases_list.subquery()
r = aliased(Release, name="r")
all_versions = (
db.query(func.array_agg(r.version))
.filter(r.project_id == Release.project_id)
.correlate(Release)
.as_scalar()
.label("all_versions")
)
classifiers = (
db.query(func.array_agg(Classifier.classifier))
.select_from(release_classifiers)
.join(Classifier, Classifier.id == release_classifiers.c.trove_id)
.filter(Release.id == release_classifiers.c.release_id)
.correlate(Release)
.as_scalar()
.label("classifiers")
)
release_data = (
db.query(
Description.raw.label("description"),
Release.version.label("latest_version"),
all_versions,
Release.author,
Release.author_email,
Release.maintainer,
Release.maintainer_email,
Release.home_page,
Release.summary,
Release.keywords,
Release.platform,
Release.download_url,
Release.created,
classifiers,
Project.normalized_name,
Project.name,
Project.zscore,
)
.select_from(releases_list)
.join(Release, Release.id == releases_list.c.id)
.join(Description)
.outerjoin(Release.project)
)
for release in windowed_query(release_data, Release.project_id, 50000):
p = ProjectDocument.from_db(release)
p._index = None
p.full_clean()
doc = p.to_dict(include_meta=True)
doc.pop("_index", None)
yield doc
|
def _project_docs(db, project_name=None):
releases_list = (
db.query(Release.id)
.filter(*queries)
.order_by(
Release.project_id,
Release.is_prerelease.nullslast(),
Release._pypi_ordering.desc(),
)
.distinct(Release.project_id)
)
if project_name:
releases_list = releases_list.join(Project).filter(Project.name == project_name)
releases_list = releases_list.subquery()
r = aliased(Release, name="r")
all_versions = (
db.query(func.array_agg(r.version))
.filter(r.project_id == Release.project_id)
.correlate(Release)
.as_scalar()
.label("all_versions")
)
classifiers = (
db.query(func.array_agg(Classifier.classifier))
.select_from(release_classifiers)
.join(Classifier, Classifier.id == release_classifiers.c.trove_id)
.filter(Release.id == release_classifiers.c.release_id)
.correlate(Release)
.as_scalar()
.label("classifiers")
)
release_data = (
db.query(
Description.raw.label("description"),
Release.version.label("latest_version"),
all_versions,
Release.author,
Release.author_email,
Release.maintainer,
Release.maintainer_email,
Release.home_page,
Release.summary,
Release.keywords,
Release.platform,
Release.download_url,
Release.created,
classifiers,
Project.normalized_name,
Project.name,
Project.zscore,
)
.select_from(releases_list)
.join(Release, Release.id == releases_list.c.id)
.join(Description)
.outerjoin(Release.project)
)
for release in windowed_query(release_data, Release.project_id, 50000):
p = ProjectDocument.from_db(release)
p._index = None
p.full_clean()
doc = p.to_dict(include_meta=True)
doc.pop("_index", None)
yield doc
|
2,983 |
def test_groupby_crash_on_nunique(axis):
# Fix following 30253
df = pd.DataFrame(
{("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]},
index=pd.bdate_range("20191212", "20191213"),
)
df.columns.names = ["COL1", "COL2"]
axis_number = df._get_axis_number(axis)
if not axis_number:
df = df.T
nunique_t = df.T.groupby(level=0, axis=axis).nunique().T
nunique = df.groupby(axis=int(not axis_number), level=0).nunique()
pd.testing.assert_frame_equal(nunique_t, nunique)
|
def test_groupby_crash_on_nunique(axis):
# Fix following 30253
df = pd.DataFrame(
{("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]},
index=pd.bdate_range("20191212", "20191213"),
)
df.columns.names = ["COL1", "COL2"]
axis_number = df._get_axis_number(axis)
if not axis_number:
df = df.T
nunique_t = df.T.groupby(level=0, axis=axis).nunique().T
nunique = df.groupby(axis=int(not axis_number), level=0).nunique()
tm.assert_frame_equal(result, expected)
|
14,421 |
def set_mru_totp_code(email, token, env):
conn, c = open_database(env, with_connection=True)
c.execute('UPDATE totp_credentials SET mru_token=? WHERE user_email=?', (token, email))
if c.rowcount != 1:
conn.close()
raise ValueError("That's not a user (%s)." % email)
conn.commit()
return "OK"
|
def set_mru_totp_code(email, token, env):
conn, c = open_database(env, with_connection=True)
c.execute('UPDATE totp_credentials SET mru_token=? WHERE user_email=?', (token, email))
if c.rowcount != 1:
conn.close()
raise ValueError("That's not a user ({}).".format(mail))
conn.commit()
return "OK"
|
3,892 |
def is_aperiodic(G):
"""Returns True if `G` is aperiodic.
A directed graph is aperiodic if there is no integer k > 1 that
divides the length of every cycle in the graph.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
bool
True if the graph is aperiodic False otherwise
Raises
------
NetworkXError
If `G` is not directed
Examples
--------
A graph consisting of one cycle,
the length of which is 2
(k = 2 divides the length of every cycle in the graph
and therefore the graph is not aperiodic)::
>>> DG = nx.DiGraph([(1, 2), (2, 1)])
>>> nx.is_aperiodic(DG)
False
A graph consisting of two cycles,
the length of which differs by one
(cycle lengths are coprime,
so there is no k > 1 dividing each cycle length
and therefore the graph is aperiodic)::
>>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1), (1, 4), (4, 1)])
>>> nx.is_aperiodic(DG)
True
Graph without cycles
(the lengths of all cycles are equal to zero,
so every k > 1 dividing each cycle length
and therefore the graph is not aperiodic)::
>>> DG = nx.DiGraph([(1, 2), (2, 3)])
>>> nx.is_aperiodic(DG)
False
Notes
-----
This uses the method outlined in [1]_, which runs in $O(m)$ time
given $m$ edges in `G`. Note that a graph is not aperiodic if it is
acyclic as every integer trivial divides length 0 cycles.
References
----------
.. [1] Jarvis, J. P.; Shier, D. R. (1996),
"Graph-theoretic analysis of finite Markov chains,"
in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
A Multidisciplinary Approach, CRC Press.
"""
if not G.is_directed():
raise nx.NetworkXError("is_aperiodic not defined for undirected graphs")
s = arbitrary_element(G)
levels = {s: 0}
this_level = [s]
g = 0
lev = 1
while this_level:
next_level = []
for u in this_level:
for v in G[u]:
if v in levels: # Non-Tree Edge
g = gcd(g, levels[u] - levels[v] + 1)
else: # Tree Edge
next_level.append(v)
levels[v] = lev
this_level = next_level
lev += 1
if len(levels) == len(G): # All nodes in tree
return g == 1
else:
return g == 1 and nx.is_aperiodic(G.subgraph(set(G) - set(levels)))
|
def is_aperiodic(G):
"""Returns True if `G` is aperiodic.
A directed graph is aperiodic if there is no integer k > 1 that
divides the length of every cycle in the graph.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
bool
True if the graph is aperiodic False otherwise
Raises
------
NetworkXError
If `G` is not directed
Examples
--------
A graph consisting of one cycle,
the length of which is 2
(k = 2 divides the length of every cycle in the graph
and therefore the graph is not aperiodic)::
>>> DG = nx.DiGraph([(1, 2), (2, 1)])
>>> nx.is_aperiodic(DG)
False
A graph consisting of two cycles,
the length of which differs by one
(cycle lengths are coprime,
so there is no single value of k, where k > 1 that divides each cycle length
and therefore the graph is aperiodic)::
>>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1), (1, 4), (4, 1)])
>>> nx.is_aperiodic(DG)
True
Graph without cycles
(the lengths of all cycles are equal to zero,
so every k > 1 dividing each cycle length
and therefore the graph is not aperiodic)::
>>> DG = nx.DiGraph([(1, 2), (2, 3)])
>>> nx.is_aperiodic(DG)
False
Notes
-----
This uses the method outlined in [1]_, which runs in $O(m)$ time
given $m$ edges in `G`. Note that a graph is not aperiodic if it is
acyclic as every integer trivial divides length 0 cycles.
References
----------
.. [1] Jarvis, J. P.; Shier, D. R. (1996),
"Graph-theoretic analysis of finite Markov chains,"
in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
A Multidisciplinary Approach, CRC Press.
"""
if not G.is_directed():
raise nx.NetworkXError("is_aperiodic not defined for undirected graphs")
s = arbitrary_element(G)
levels = {s: 0}
this_level = [s]
g = 0
lev = 1
while this_level:
next_level = []
for u in this_level:
for v in G[u]:
if v in levels: # Non-Tree Edge
g = gcd(g, levels[u] - levels[v] + 1)
else: # Tree Edge
next_level.append(v)
levels[v] = lev
this_level = next_level
lev += 1
if len(levels) == len(G): # All nodes in tree
return g == 1
else:
return g == 1 and nx.is_aperiodic(G.subgraph(set(G) - set(levels)))
|
46,023 |
def edge_aware_blur_pool2d(
input: torch.Tensor,
kernel_size: int,
edge_threshold: float = 2.0,
edge_dilatation_kernel_size: int = 3,
epsilon: float = 1e-6,
) -> torch.Tensor:
r"""Blur the input tensor while maintaining its edges.
Edge detection is done with the sobel filter, and blurring is done with a pool2d.
Args:
input: the input image to blur with shape :math:`(B, C, H, W)`.
kernel_size: the kernel size for max pooling.
edge_threshold: threshold for the edge decision rule; edge/non-edge.
edge_dilatation_kernel_size: the kernel size for dilating the edges.
epsilon: for numerical stability.
Returns:
The blurred tensor of shape :math:`(B, C, H, W)`.
"""
input = F.pad(input, (2, 2, 2, 2), mode="reflect") # pad to avoid artifacts near physical edges
blurred_input = blur_pool2d(input, kernel_size=kernel_size, stride=1) # blurry version of the input
# calculate the edges (add epsilon to avoid taking the log of 0)
log_input, log_thresh = torch.log2(input + epsilon), torch.log2(torch.tensor(edge_threshold))
edges_x = log_input[..., :, 4:] - log_input[..., :, :-4]
edges_y = log_input[..., 4:, :] - log_input[..., :-4, :]
edges_x, edges_y = torch.mean(edges_x, dim=-3, keepdim=True), torch.mean(edges_y, dim=-3, keepdim=True)
edges_x_mask, edges_y_mask = edges_x.abs() > log_thresh.to(edges_x), edges_y.abs() > log_thresh.to(edges_y)
edges_xy_mask = (edges_x_mask[..., 2:-2, :] + edges_y_mask[..., :, 2:-2]).type_as(input)
# dilate the content edges to have a soft mask of edges
dilated_edges = F.max_pool3d(edges_xy_mask, edge_dilatation_kernel_size, 1, edge_dilatation_kernel_size // 2)
# slice the padded regions
input = input[..., 2:-2, 2:-2]
blurred_input = blurred_input[..., 2:-2, 2:-2]
# fuse the input image on edges and blurry input everywhere else
blurred = dilated_edges * input + (1.0 - dilated_edges) * blurred_input
return blurred
|
def edge_aware_blur_pool2d(
input: torch.Tensor,
kernel_size: int,
edge_threshold: float = 2.0,
edge_dilation_kernel_size: int = 3,
epsilon: float = 1e-6,
) -> torch.Tensor:
r"""Blur the input tensor while maintaining its edges.
Edge detection is done with the sobel filter, and blurring is done with a pool2d.
Args:
input: the input image to blur with shape :math:`(B, C, H, W)`.
kernel_size: the kernel size for max pooling.
edge_threshold: threshold for the edge decision rule; edge/non-edge.
edge_dilatation_kernel_size: the kernel size for dilating the edges.
epsilon: for numerical stability.
Returns:
The blurred tensor of shape :math:`(B, C, H, W)`.
"""
input = F.pad(input, (2, 2, 2, 2), mode="reflect") # pad to avoid artifacts near physical edges
blurred_input = blur_pool2d(input, kernel_size=kernel_size, stride=1) # blurry version of the input
# calculate the edges (add epsilon to avoid taking the log of 0)
log_input, log_thresh = torch.log2(input + epsilon), torch.log2(torch.tensor(edge_threshold))
edges_x = log_input[..., :, 4:] - log_input[..., :, :-4]
edges_y = log_input[..., 4:, :] - log_input[..., :-4, :]
edges_x, edges_y = torch.mean(edges_x, dim=-3, keepdim=True), torch.mean(edges_y, dim=-3, keepdim=True)
edges_x_mask, edges_y_mask = edges_x.abs() > log_thresh.to(edges_x), edges_y.abs() > log_thresh.to(edges_y)
edges_xy_mask = (edges_x_mask[..., 2:-2, :] + edges_y_mask[..., :, 2:-2]).type_as(input)
# dilate the content edges to have a soft mask of edges
dilated_edges = F.max_pool3d(edges_xy_mask, edge_dilatation_kernel_size, 1, edge_dilatation_kernel_size // 2)
# slice the padded regions
input = input[..., 2:-2, 2:-2]
blurred_input = blurred_input[..., 2:-2, 2:-2]
# fuse the input image on edges and blurry input everywhere else
blurred = dilated_edges * input + (1.0 - dilated_edges) * blurred_input
return blurred
|
26,422 |
def _object_upload(ul_handler, **kwargs):
ul_chunks = ul_handler.chunk_prep()
return next(ul_chunks.next), 0, 'd41d8cd98f00b204e9800998ecf8427e'
|
def _object_upload(ul_handler, **kwargs):
ul_chunks = ul_handler.chunk_prep()
return next(ul_chunks), 0, 'd41d8cd98f00b204e9800998ecf8427e'
|
47,119 |
def main():
# Parse the arguments
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.model_name_or_path)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
assert (args.target_lang is not None and args.source_lang is not None
), "mBart requires --target_lang and --source_lang"
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = args.source_prefix if args.source_prefix is not None else ""
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# For translation we set the codes of our source and target languages (only useful for mBART, the others will
# ignore those attributes).
if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if args.source_lang is not None:
tokenizer.src_lang = args.source_lang
if args.target_lang is not None:
tokenizer.tgt_lang = args.target_lang
# Get the language codes for input/target.
source_lang = args.source_lang.split("_")[0]
target_lang = args.target_lang.split("_")[0]
padding = "max_length" if args.pad_to_max_length else False
# Temporarily set max_target_length for training.
max_target_length = args.max_target_length
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
metric = load_metric("sacrebleu")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
model.eval()
if args.val_max_target_length is None:
args.val_max_target_length = args.max_target_length
gen_kwargs = {
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = model.generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1,
pad_index=tokenizer.pad_token_id)
labels = batch["labels"]
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id)
generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
labels = accelerator.gather(labels).cpu().numpy()
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(
predictions=accelerator.gather(decoded_preds),
references=accelerator.gather(decoded_labels),
)
eval_metric = metric.compute()
logger.info(f"epoch {epoch}: {eval_metric}")
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
|
def main():
# Parse the arguments
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.model_name_or_path)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
assert (args.target_lang is not None and args.source_lang is not None
), "mBart requires --target_lang and --source_lang"
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = args.source_prefix if args.source_prefix is not None else ""
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# For translation we set the codes of our source and target languages (only useful for mBART, the others will
# ignore those attributes).
if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if args.source_lang is not None:
tokenizer.src_lang = args.source_lang
if args.target_lang is not None:
tokenizer.tgt_lang = args.target_lang
# Get the language codes for input/target.
source_lang = args.source_lang.split("_")[0]
target_lang = args.target_lang.split("_")[0]
padding = "max_length" if args.pad_to_max_length else False
# Temporarily set max_target_length for training.
max_target_length = args.max_target_length
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
metric = load_metric("sacrebleu")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
model.eval()
if args.val_max_target_length is None:
args.val_max_target_length = args.max_target_length
gen_kwargs = {
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = model.generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1,
pad_index=tokenizer.pad_token_id)
labels = batch["labels"]
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id)
generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
labels = accelerator.gather(labels).cpu().numpy()
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
eval_metric = metric.compute()
logger.info(f"epoch {epoch}: {eval_metric}")
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
|
23,142 |
def store(
sources: Array | Collection[Array],
targets: Array | Collection[Array],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets] # type: ignore
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions] # type: ignore
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources) # type: ignore
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions)
]
map_keys: list = []
for s, t, n, r in zip(sources, targets, map_names, regions):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | Mapping = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
|
def store(
sources: Array | Collection[Array],
targets: Array | Collection[Array],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets] # type: ignore
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions]
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources) # type: ignore
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions)
]
map_keys: list = []
for s, t, n, r in zip(sources, targets, map_names, regions):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | Mapping = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
|
45,587 |
def ReminderScheduled(
intent: Text,
trigger_date_time: datetime.datetime,
entities: Union[List[Dict[Text, Any]], Dict[Text, Text]] = None,
name: Optional[Text] = None,
kill_on_user_message: bool = True,
timestamp: Optional[float] = None,
) -> EventType:
return {
"event": "reminder",
"timestamp": timestamp,
"intent": intent,
"entities": entities,
"date_time": trigger_date_time.isoformat(),
"name": name,
"kill_on_user_msg": kill_on_user_message,
}
|
def ReminderScheduled(
intent: Text,
trigger_date_time: datetime.datetime,
entities: Optional[Union[List[Dict[Text, Any]], Dict[Text, Text]]] = None,
name: Optional[Text] = None,
kill_on_user_message: bool = True,
timestamp: Optional[float] = None,
) -> EventType:
return {
"event": "reminder",
"timestamp": timestamp,
"intent": intent,
"entities": entities,
"date_time": trigger_date_time.isoformat(),
"name": name,
"kill_on_user_msg": kill_on_user_message,
}
|
31,295 |
def get_indicators_command(client, args: dict) -> CommandResults:
""" Gets indicator from opencti to readable output
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_type = argToList(args.get("indicator_types"))
limit = int(args.get('limit', 50))
limit = 200 if limit > 200 else limit
last_run_id, indicators_list = get_indicators(client, indicator_type, limit=limit)
if indicators_list:
indicators = [{'type': indicator['type'], 'value': indicator['value'], 'id': indicator['rawJSON']['id'],
'createdBy': indicator['rawJSON'].get('createdBy').get('id')
if indicator['rawJSON'].get('createdBy') else None,
'score': indicator['rawJSON']['x_opencti_score'],
'description': indicator['rawJSON']['x_opencti_description'],
'labels': [label.get('value') for label in indicator['rawJSON'].get('objectLabel')],
'marking': [mark.get('definition') for mark in indicator['rawJSON'].get('objectMarking')]
}
for indicator in indicators_list]
readable_output = tableToMarkdown('Indicators from OpenCTI', indicators,
headers=["type", "value", "id"],
removeNull=True)
return CommandResults(
outputs_prefix='OpenCTI.Indicators',
outputs_key_field='id',
outputs=indicators,
readable_output=readable_output,
raw_response=indicators_list
)
else:
return CommandResults(readable_output='No indicators')
|
def get_indicators_command(client, args: dict) -> CommandResults:
""" Gets indicator from opencti to readable output
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_type = argToList(args.get("indicator_types"))
limit = min(arg_to_number(args.get('limit', 50)), 200)
last_run_id, indicators_list = get_indicators(client, indicator_type, limit=limit)
if indicators_list:
indicators = [{'type': indicator['type'], 'value': indicator['value'], 'id': indicator['rawJSON']['id'],
'createdBy': indicator['rawJSON'].get('createdBy').get('id')
if indicator['rawJSON'].get('createdBy') else None,
'score': indicator['rawJSON']['x_opencti_score'],
'description': indicator['rawJSON']['x_opencti_description'],
'labels': [label.get('value') for label in indicator['rawJSON'].get('objectLabel')],
'marking': [mark.get('definition') for mark in indicator['rawJSON'].get('objectMarking')]
}
for indicator in indicators_list]
readable_output = tableToMarkdown('Indicators from OpenCTI', indicators,
headers=["type", "value", "id"],
removeNull=True)
return CommandResults(
outputs_prefix='OpenCTI.Indicators',
outputs_key_field='id',
outputs=indicators,
readable_output=readable_output,
raw_response=indicators_list
)
else:
return CommandResults(readable_output='No indicators')
|
5,695 |
def _perturb_discrepancy(sample, i1, i2, k, disc):
"""Centered discrepancy after and elementary perturbation on a LHS.
An elementary perturbation consists of an exchange of coordinates between
two points: ``sample[i1, k] <-> sample[i2, k]``. By construction,
this operation conserves the LHS properties.
Parameters
----------
sample : array_like (n, d)
The sample (before permutation) to compute the discrepancy from.
i1 : int
The first line of the elementary permutation.
i2 : int
The second line of the elementary permutation.
k : int
The column of the elementary permutation.
disc : float
Centered discrepancy of the design before permutation.
Returns
-------
discrepancy : float
Centered discrepancy.
References
----------
.. [1] Jin et al. "An efficient algorithm for constructing optimal design
of computer experiments", Journal of Statistical Planning and
Inference, 2005.
"""
sample = np.asarray(sample)
n = sample.shape[0]
z_ij = sample - 0.5
# Eq (19)
c_i1j = 1. / n ** 2. * np.prod(0.5 * (2. + abs(z_ij[i1, :]) +
abs(z_ij) -
abs(z_ij[i1, :] - z_ij)),
axis=1)
c_i2j = 1. / n ** 2. * np.prod(0.5 * (2. + abs(z_ij[i2, :]) +
abs(z_ij) -
abs(z_ij[i2, :] - z_ij)),
axis=1)
# Eq (20)
c_i1i1 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i1, :])) -
2. / n * np.prod(1. + 0.5 * abs(z_ij[i1, :]) -
0.5 * z_ij[i1, :] ** 2))
c_i2i2 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i2, :])) -
2. / n * np.prod(1. + 0.5 * abs(z_ij[i2, :]) -
0.5 * z_ij[i2, :] ** 2))
# Eq (22), typo in the article in the denominator i2 -> i1
num = (2 + abs(z_ij[i2, k]) + abs(z_ij[:, k]) -
abs(z_ij[i2, k] - z_ij[:, k]))
denum = (2 + abs(z_ij[i1, k]) + abs(z_ij[:, k]) -
abs(z_ij[i1, k] - z_ij[:, k]))
gamma = num / denum
# Eq (23)
c_p_i1j = gamma * c_i1j
# Eq (24)
c_p_i2j = c_i2j / gamma
alpha = (1 + abs(z_ij[i2, k])) / (1 + abs(z_ij[i1, k]))
beta = (2 - abs(z_ij[i2, k])) / (2 - abs(z_ij[i1, k]))
g_i1 = np.prod(1. + abs(z_ij[i1, :]))
g_i2 = np.prod(1. + abs(z_ij[i2, :]))
h_i1 = np.prod(1. + 0.5 * abs(z_ij[i1, :]) - 0.5 * (z_ij[i1, :] ** 2))
h_i2 = np.prod(1. + 0.5 * abs(z_ij[i2, :]) - 0.5 * (z_ij[i2, :] ** 2))
# Eq (25), typo in the article g is missing
c_p_i1i1 = ((g_i1 * alpha) / (n ** 2) - 2. * alpha * beta * h_i1 / n)
# Eq (26), typo in the article n ** 2
c_p_i2i2 = ((g_i2 / ((n ** 2) * alpha)) - (2. * h_i2 / (n * alpha * beta)))
# Eq (26)
sum_ = c_p_i1j - c_i1j + c_p_i2j - c_i2j
mask = np.ones(n, dtype=bool)
mask[[i1, i2]] = False
sum_ = sum(sum_[mask])
disc_ep = (disc + c_p_i1i1 - c_i1i1 + c_p_i2i2 - c_i2i2 + 2 * sum_)
return disc_ep
|
def _perturb_discrepancy(sample, i1, i2, k, disc):
"""Centered discrepancy after an elementary perturbation of a LHS.
An elementary perturbation consists of an exchange of coordinates between
two points: ``sample[i1, k] <-> sample[i2, k]``. By construction,
this operation conserves the LHS properties.
Parameters
----------
sample : array_like (n, d)
The sample (before permutation) to compute the discrepancy from.
i1 : int
The first line of the elementary permutation.
i2 : int
The second line of the elementary permutation.
k : int
The column of the elementary permutation.
disc : float
Centered discrepancy of the design before permutation.
Returns
-------
discrepancy : float
Centered discrepancy.
References
----------
.. [1] Jin et al. "An efficient algorithm for constructing optimal design
of computer experiments", Journal of Statistical Planning and
Inference, 2005.
"""
sample = np.asarray(sample)
n = sample.shape[0]
z_ij = sample - 0.5
# Eq (19)
c_i1j = 1. / n ** 2. * np.prod(0.5 * (2. + abs(z_ij[i1, :]) +
abs(z_ij) -
abs(z_ij[i1, :] - z_ij)),
axis=1)
c_i2j = 1. / n ** 2. * np.prod(0.5 * (2. + abs(z_ij[i2, :]) +
abs(z_ij) -
abs(z_ij[i2, :] - z_ij)),
axis=1)
# Eq (20)
c_i1i1 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i1, :])) -
2. / n * np.prod(1. + 0.5 * abs(z_ij[i1, :]) -
0.5 * z_ij[i1, :] ** 2))
c_i2i2 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i2, :])) -
2. / n * np.prod(1. + 0.5 * abs(z_ij[i2, :]) -
0.5 * z_ij[i2, :] ** 2))
# Eq (22), typo in the article in the denominator i2 -> i1
num = (2 + abs(z_ij[i2, k]) + abs(z_ij[:, k]) -
abs(z_ij[i2, k] - z_ij[:, k]))
denum = (2 + abs(z_ij[i1, k]) + abs(z_ij[:, k]) -
abs(z_ij[i1, k] - z_ij[:, k]))
gamma = num / denum
# Eq (23)
c_p_i1j = gamma * c_i1j
# Eq (24)
c_p_i2j = c_i2j / gamma
alpha = (1 + abs(z_ij[i2, k])) / (1 + abs(z_ij[i1, k]))
beta = (2 - abs(z_ij[i2, k])) / (2 - abs(z_ij[i1, k]))
g_i1 = np.prod(1. + abs(z_ij[i1, :]))
g_i2 = np.prod(1. + abs(z_ij[i2, :]))
h_i1 = np.prod(1. + 0.5 * abs(z_ij[i1, :]) - 0.5 * (z_ij[i1, :] ** 2))
h_i2 = np.prod(1. + 0.5 * abs(z_ij[i2, :]) - 0.5 * (z_ij[i2, :] ** 2))
# Eq (25), typo in the article g is missing
c_p_i1i1 = ((g_i1 * alpha) / (n ** 2) - 2. * alpha * beta * h_i1 / n)
# Eq (26), typo in the article n ** 2
c_p_i2i2 = ((g_i2 / ((n ** 2) * alpha)) - (2. * h_i2 / (n * alpha * beta)))
# Eq (26)
sum_ = c_p_i1j - c_i1j + c_p_i2j - c_i2j
mask = np.ones(n, dtype=bool)
mask[[i1, i2]] = False
sum_ = sum(sum_[mask])
disc_ep = (disc + c_p_i1i1 - c_i1i1 + c_p_i2i2 - c_i2i2 + 2 * sum_)
return disc_ep
|
55,840 |
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
fork_name: SpecForkName, bls_active: bool = True,
phase: Optional[str]=None) -> Iterable[TestCase]:
"""
Generate a list of test cases by running tests from the given src in generator-mode.
:param runner_name: to categorize the test in general as.
:param handler_name: to categorize the test specialization as.
:param src: to retrieve tests from (discovered using inspect.getmembers).
:param fork_name: to run tests against particular phase and/or fork.
(if multiple forks are applicable, indicate the last fork)
:param bls_active: optional, to override BLS switch preference. Defaults to True.
:param phase: optional, specific phase name
:return: an iterable of test cases.
"""
fn_names = [
name for (name, _) in getmembers(src, isfunction)
if name.startswith('test_')
]
if phase is None:
phase = fork_name
print("generating test vectors from tests source: %s" % src.__name__)
for name in fn_names:
tfn = getattr(src, name)
# strip off the `test_`
case_name = name
if case_name.startswith('test_'):
case_name = case_name[5:]
yield TestCase(
fork_name=fork_name,
runner_name=runner_name,
handler_name=handler_name,
suite_name='pyspec_tests',
case_name=case_name,
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
case_fn=lambda: tfn(generator_mode=True, phase=phase, bls_active=bls_active)
)
|
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
fork_name: SpecForkName, bls_active: bool = True,
phase: Optional[str]=None) -> Iterable[TestCase]:
"""
Generate a list of test cases by running tests from the given src in generator-mode.
:param runner_name: to categorize the test in general as.
:param handler_name: to categorize the test specialization as.
:param src: to retrieve tests from (discovered using inspect.getmembers).
:param fork_name: the folder name for these tests.
(if multiple forks are applicable, indicate the last fork)
:param bls_active: optional, to override BLS switch preference. Defaults to True.
:param phase: optional, to run tests against a particular spec version. Default to `fork_name` value.
:return: an iterable of test cases.
"""
fn_names = [
name for (name, _) in getmembers(src, isfunction)
if name.startswith('test_')
]
if phase is None:
phase = fork_name
print("generating test vectors from tests source: %s" % src.__name__)
for name in fn_names:
tfn = getattr(src, name)
# strip off the `test_`
case_name = name
if case_name.startswith('test_'):
case_name = case_name[5:]
yield TestCase(
fork_name=fork_name,
runner_name=runner_name,
handler_name=handler_name,
suite_name='pyspec_tests',
case_name=case_name,
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
case_fn=lambda: tfn(generator_mode=True, phase=phase, bls_active=bls_active)
)
|
31,889 |
def incremental_level_fetch(client: Client) -> list:
"""
This method implements the incremental level of the feed. It checks if any updates
have been made in the tags from the last time, and returns the updated tags.
Args:
client: Client object
Returns:
A list of tag details represents the tags that have been updated.
"""
results: list = []
integration_context = get_integration_context()
# This field saves tags that have been updated since the last time of fetch and need to be updated in demisto
list_of_all_updated_tags = argToList(integration_context.get('tags_need_to_be_fetched', ''))
time_from_last_update = integration_context.get('time_of_first_fetch')
# if there are such tags, we first get all of them and upload to demisto
index_to_delete = 0
for tag in list_of_all_updated_tags:
if len(results) < PAGE_SIZE:
results.append(client.get_tag_details(tag.get('public_tag_name')))
index_to_delete += 1
else:
context = get_integration_context()
context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT)
context['tags_need_to_be_fetched'] = list_of_all_updated_tags[index_to_delete:]
set_integration_context(context)
return results
page_num = 0
has_updates = True
while has_updates:
response = client.get_tags({'pageNum': page_num,
'pageSize': 200,
'sortBy': 'updated_at',
'order': 'desc'})
tags = response.get('tags', [])
for tag in tags:
update_time = tag.get('updated_at')
update_time = datetime.strptime(update_time, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if update_time else None
update_time = date_to_timestamp(update_time, DATE_FORMAT)
if update_time >= time_from_last_update:
list_of_all_updated_tags.append(
{'public_tag_name': tag.get('public_tag_name')})
else:
has_updates = False
break
page_num += 1
# add only PAGE_SIZE tag_details to results, so we wont make to many calls to the api
list_index = 0
for tag in list_of_all_updated_tags:
if len(results) < PAGE_SIZE:
public_tag_name = tag.get('public_tag_name')
response = client.get_tag_details(public_tag_name)
results.append(response)
list_index += 1
else:
break
# delete from the list all tags that will be returned this fetch
list_of_all_updated_tags = list_of_all_updated_tags[list_index:]
# update integration context
context = get_integration_context()
context['tags_need_to_be_fetched'] = list_of_all_updated_tags
context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT)
set_integration_context(context)
return results
|
def incremental_level_fetch(client: Client) -> list:
"""
This method implements the incremental level of the feed. It checks if any updates
have been made in the tags from the last time, and returns the updated tags.
Args:
client: Client object
Returns:
A list of tag details represents the tags that have been updated.
"""
results: list = []
integration_context = get_integration_context()
# This field saves tags that have been updated since the last time of fetch and need to be updated in demisto
list_of_all_updated_tags = argToList(integration_context.get('tags_need_to_be_fetched', ''))
time_from_last_update = integration_context.get('time_of_first_fetch')
# if there are such tags, we first get all of them and upload to demisto
index_to_delete = 0
for tag in list_of_all_updated_tags:
if len(results) < PAGE_SIZE:
results.append(client.get_tag_details(tag.get('public_tag_name', '')))
index_to_delete += 1
else:
context = get_integration_context()
context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT)
context['tags_need_to_be_fetched'] = list_of_all_updated_tags[index_to_delete:]
set_integration_context(context)
return results
page_num = 0
has_updates = True
while has_updates:
response = client.get_tags({'pageNum': page_num,
'pageSize': 200,
'sortBy': 'updated_at',
'order': 'desc'})
tags = response.get('tags', [])
for tag in tags:
update_time = tag.get('updated_at')
update_time = datetime.strptime(update_time, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if update_time else None
update_time = date_to_timestamp(update_time, DATE_FORMAT)
if update_time >= time_from_last_update:
list_of_all_updated_tags.append(
{'public_tag_name': tag.get('public_tag_name')})
else:
has_updates = False
break
page_num += 1
# add only PAGE_SIZE tag_details to results, so we wont make to many calls to the api
list_index = 0
for tag in list_of_all_updated_tags:
if len(results) < PAGE_SIZE:
public_tag_name = tag.get('public_tag_name')
response = client.get_tag_details(public_tag_name)
results.append(response)
list_index += 1
else:
break
# delete from the list all tags that will be returned this fetch
list_of_all_updated_tags = list_of_all_updated_tags[list_index:]
# update integration context
context = get_integration_context()
context['tags_need_to_be_fetched'] = list_of_all_updated_tags
context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT)
set_integration_context(context)
return results
|
29,773 |
def group_dicoms_into_seqinfos(files, file_filter, dcmfilter, grouping):
"""Process list of dicoms and return seqinfo and file group
`seqinfo` contains per-sequence extract of fields from DICOMs which
will be later provided into heuristics to decide on filenames
Parameters
----------
files : list of str
List of files to consider
file_filter : callable, optional
Applied to each item of filenames. Should return True if file needs to be
kept, False otherwise.
dcmfilter : callable, optional
If called on dcm_data and returns True, it is used to set series_id
grouping : {'studyUID', 'accession_number', None}, optional
what to group by: studyUID or accession_number
Returns
-------
seqinfo : list of list
`seqinfo` is a list of info entries per each sequence (some entry
there defines a key for `filegrp`)
filegrp : dict
`filegrp` is a dictionary with files groupped per each sequence
"""
allowed_groupings = ['studyUID', 'accession_number', None]
if grouping not in allowed_groupings:
raise ValueError('I do not know how to group by {0}'.format(grouping))
per_studyUID = grouping == 'studyUID'
per_accession_number = grouping == 'accession_number'
lgr.info("Analyzing %d dicoms", len(files))
groups = [[], []]
mwgroup = []
studyUID = None
# for sanity check that all DICOMs came from the same
# "study". If not -- what is the use-case? (interrupted acquisition?)
# and how would then we deal with series numbers
# which would differ already
if file_filter:
nfl_before = len(files)
files = list(filter(file_filter, files))
nfl_after = len(files)
lgr.info('Filtering out {0} dicoms based on their filename'.format(
nfl_before-nfl_after))
for fidx, filename in enumerate(files):
from heudiconv.external.dcmstack import ds
# TODO after getting a regression test check if the same behavior
# with stop_before_pixels=True
mw = ds.wrapper_from_data(dcm.read_file(filename, force=True))
for sig in ('iop', 'ICE_Dims', 'SequenceName'):
try:
del mw.series_signature[sig]
except:
pass
try:
file_studyUID = mw.dcm_data.StudyInstanceUID
except AttributeError:
lgr.info("File {} is missing any StudyInstanceUID".format(filename))
file_studyUID = None
# Workaround for protocol name in private siemens csa header
try:
mw.dcm_data.ProtocolName
except AttributeError:
mw.dcm_data.ProtocolName = parse_private_csa_header(mw.dcm_data, 'ProtocolName', 'tProtocolName') if mw.is_csa else ''
try:
series_id = (int(mw.dcm_data.SeriesNumber),
mw.dcm_data.ProtocolName)
file_studyUID = mw.dcm_data.StudyInstanceUID
if not per_studyUID:
# verify that we are working with a single study
if studyUID is None:
studyUID = file_studyUID
elif not per_accession_number:
assert studyUID == file_studyUID, (
"Conflicting study identifiers found [{}, {}].".format(
studyUID, file_studyUID
))
except AttributeError as exc:
lgr.warning('Ignoring %s since not quite a "normal" DICOM: %s',
filename, exc)
series_id = (-1, 'none')
file_studyUID = None
if not series_id[0] < 0:
if dcmfilter is not None and dcmfilter(mw.dcm_data):
series_id = (-1, mw.dcm_data.ProtocolName)
# filter out unwanted non-image-data DICOMs by assigning
# a series number < 0 (see test below)
if not series_id[0] < 0 and mw.dcm_data[0x0008, 0x0016].repval in (
'Raw Data Storage',
'GrayscaleSoftcopyPresentationStateStorage'):
series_id = (-1, mw.dcm_data.ProtocolName)
if per_studyUID:
series_id = series_id + (file_studyUID,)
ingrp = False
for idx in range(len(mwgroup)):
# same = mw.is_same_series(mwgroup[idx])
if mw.is_same_series(mwgroup[idx]):
# the same series should have the same study uuid
assert (mwgroup[idx].dcm_data.get('StudyInstanceUID', None)
== file_studyUID)
ingrp = True
if series_id[0] >= 0:
series_id = (mwgroup[idx].dcm_data.SeriesNumber,
mwgroup[idx].dcm_data.ProtocolName)
if per_studyUID:
series_id = series_id + (file_studyUID,)
groups[0].append(series_id)
groups[1].append(idx)
if not ingrp:
mwgroup.append(mw)
groups[0].append(series_id)
groups[1].append(len(mwgroup) - 1)
group_map = dict(zip(groups[0], groups[1]))
total = 0
seqinfo = OrderedDict()
# for the next line to make any sense the series_id needs to
# be sortable in a way that preserves the series order
for series_id, mwidx in sorted(group_map.items()):
if series_id[0] < 0:
# skip our fake series with unwanted files
continue
mw = mwgroup[mwidx]
if mw.image_shape is None:
# this whole thing has now image data (maybe just PSg DICOMs)
# nothing to see here, just move on
continue
dcminfo = mw.dcm_data
series_files = [files[i] for i, s in enumerate(groups[0])
if s == series_id]
# turn the series_id into a human-readable string -- string is needed
# for JSON storage later on
if per_studyUID:
studyUID = series_id[2]
series_id = series_id[:2]
accession_number = dcminfo.get('AccessionNumber')
series_id = '-'.join(map(str, series_id))
size = list(mw.image_shape) + [len(series_files)]
total += size[-1]
if len(size) < 4:
size.append(1)
# MG - refactor into util function
try:
TR = float(dcminfo.RepetitionTime) / 1000.
except (AttributeError, ValueError):
TR = -1
try:
TE = float(dcminfo.EchoTime)
except (AttributeError, ValueError):
TE = -1
try:
refphys = str(dcminfo.ReferringPhysicianName)
except AttributeError:
refphys = ''
try:
image_type = tuple(dcminfo.ImageType)
except AttributeError:
image_type = ''
try:
series_desc = dcminfo.SeriesDescription
except AttributeError:
series_desc = ''
motion_corrected = 'MOCO' in image_type
if dcminfo.get([0x18,0x24], None):
# GE and Philips scanners
sequence_name = dcminfo[0x18,0x24].value
elif dcminfo.get([0x19, 0x109c], None):
# Siemens scanners
sequence_name = dcminfo[0x19, 0x109c].value
else:
sequence_name = 'Not found'
info = SeqInfo(
total,
op.split(series_files[0])[1],
series_id,
op.basename(op.dirname(series_files[0])),
'-', '-',
size[0], size[1], size[2], size[3],
TR, TE,
dcminfo.ProtocolName,
motion_corrected,
'derived' in [x.lower() for x in dcminfo.get('ImageType', [])],
dcminfo.get('PatientID'),
dcminfo.get('StudyDescription'),
refphys,
series_desc, #We try to set this further up.
sequence_name,
image_type,
accession_number,
# For demographics to populate BIDS participants.tsv
dcminfo.get('PatientAge'),
dcminfo.get('PatientSex'),
dcminfo.get('AcquisitionDate'),
dcminfo.get('SeriesInstanceUID')
)
# candidates
# dcminfo.AccessionNumber
# len(dcminfo.ReferencedImageSequence)
# len(dcminfo.SourceImageSequence)
# FOR demographics
if per_studyUID:
key = studyUID.split('.')[-1]
elif per_accession_number:
key = accession_number
else:
key = ''
lgr.debug("%30s %30s %27s %27s %5s nref=%-2d nsrc=%-2d %s" % (
key,
info.series_id,
series_desc,
dcminfo.ProtocolName,
info.is_derived,
len(dcminfo.get('ReferencedImageSequence', '')),
len(dcminfo.get('SourceImageSequence', '')),
info.image_type
))
if per_studyUID:
if studyUID not in seqinfo:
seqinfo[studyUID] = OrderedDict()
seqinfo[studyUID][info] = series_files
elif per_accession_number:
if accession_number not in seqinfo:
seqinfo[accession_number] = OrderedDict()
seqinfo[accession_number][info] = series_files
else:
seqinfo[info] = series_files
if per_studyUID:
lgr.info("Generated sequence info for %d studies with %d entries total",
len(seqinfo), sum(map(len, seqinfo.values())))
elif per_accession_number:
lgr.info("Generated sequence info for %d accession numbers with %d "
"entries total", len(seqinfo), sum(map(len, seqinfo.values())))
else:
lgr.info("Generated sequence info with %d entries", len(seqinfo))
return seqinfo
|
def group_dicoms_into_seqinfos(files, file_filter, dcmfilter, grouping):
"""Process list of dicoms and return seqinfo and file group
`seqinfo` contains per-sequence extract of fields from DICOMs which
will be later provided into heuristics to decide on filenames
Parameters
----------
files : list of str
List of files to consider
file_filter : callable, optional
Applied to each item of filenames. Should return True if file needs to be
kept, False otherwise.
dcmfilter : callable, optional
If called on dcm_data and returns True, it is used to set series_id
grouping : {'studyUID', 'accession_number', None}, optional
what to group by: studyUID or accession_number
Returns
-------
seqinfo : list of list
`seqinfo` is a list of info entries per each sequence (some entry
there defines a key for `filegrp`)
filegrp : dict
`filegrp` is a dictionary with files groupped per each sequence
"""
allowed_groupings = ['studyUID', 'accession_number', None]
if grouping not in allowed_groupings:
raise ValueError('I do not know how to group by {0}'.format(grouping))
per_studyUID = grouping == 'studyUID'
per_accession_number = grouping == 'accession_number'
lgr.info("Analyzing %d dicoms", len(files))
groups = [[], []]
mwgroup = []
studyUID = None
# for sanity check that all DICOMs came from the same
# "study". If not -- what is the use-case? (interrupted acquisition?)
# and how would then we deal with series numbers
# which would differ already
if file_filter:
nfl_before = len(files)
files = list(filter(file_filter, files))
nfl_after = len(files)
lgr.info('Filtering out {0} dicoms based on their filename'.format(
nfl_before-nfl_after))
for fidx, filename in enumerate(files):
from heudiconv.external.dcmstack import ds
# TODO after getting a regression test check if the same behavior
# with stop_before_pixels=True
mw = ds.wrapper_from_data(dcm.read_file(filename, force=True))
for sig in ('iop', 'ICE_Dims', 'SequenceName'):
try:
del mw.series_signature[sig]
except:
pass
try:
file_studyUID = mw.dcm_data.StudyInstanceUID
except AttributeError:
lgr.info("File {} is missing any StudyInstanceUID".format(filename))
file_studyUID = None
# Workaround for protocol name in private siemens csa header
try:
mw.dcm_data.ProtocolName
except AttributeError:
mw.dcm_data.ProtocolName = parse_private_csa_header(mw.dcm_data, 'ProtocolName', 'tProtocolName') if mw.is_csa else ''
try:
series_id = (int(mw.dcm_data.SeriesNumber),
mw.dcm_data.ProtocolName)
file_studyUID = mw.dcm_data.StudyInstanceUID
if not per_studyUID:
# verify that we are working with a single study
if studyUID is None:
studyUID = file_studyUID
elif not per_accession_number:
assert studyUID == file_studyUID, (
"Conflicting study identifiers found [{}, {}].".format(
studyUID, file_studyUID
))
except AttributeError as exc:
lgr.warning('Ignoring %s since not quite a "normal" DICOM: %s',
filename, exc)
series_id = (-1, 'none')
file_studyUID = None
if not series_id[0] < 0:
if dcmfilter is not None and dcmfilter(mw.dcm_data):
series_id = (-1, mw.dcm_data.ProtocolName)
# filter out unwanted non-image-data DICOMs by assigning
# a series number < 0 (see test below)
if not series_id[0] < 0 and mw.dcm_data[0x0008, 0x0016].repval in (
'Raw Data Storage',
'GrayscaleSoftcopyPresentationStateStorage'):
series_id = (-1, mw.dcm_data.ProtocolName)
if per_studyUID:
series_id = series_id + (file_studyUID,)
ingrp = False
for idx in range(len(mwgroup)):
# same = mw.is_same_series(mwgroup[idx])
if mw.is_same_series(mwgroup[idx]):
# the same series should have the same study uuid
assert (mwgroup[idx].dcm_data.get('StudyInstanceUID', None)
== file_studyUID)
ingrp = True
if series_id[0] >= 0:
series_id = (mwgroup[idx].dcm_data.SeriesNumber,
mwgroup[idx].dcm_data.ProtocolName)
if per_studyUID:
series_id = series_id + (file_studyUID,)
groups[0].append(series_id)
groups[1].append(idx)
if not ingrp:
mwgroup.append(mw)
groups[0].append(series_id)
groups[1].append(len(mwgroup) - 1)
group_map = dict(zip(groups[0], groups[1]))
total = 0
seqinfo = OrderedDict()
# for the next line to make any sense the series_id needs to
# be sortable in a way that preserves the series order
for series_id, mwidx in sorted(group_map.items()):
if series_id[0] < 0:
# skip our fake series with unwanted files
continue
mw = mwgroup[mwidx]
if mw.image_shape is None:
# this whole thing has now image data (maybe just PSg DICOMs)
# nothing to see here, just move on
continue
dcminfo = mw.dcm_data
series_files = [files[i] for i, s in enumerate(groups[0])
if s == series_id]
# turn the series_id into a human-readable string -- string is needed
# for JSON storage later on
if per_studyUID:
studyUID = series_id[2]
series_id = series_id[:2]
accession_number = dcminfo.get('AccessionNumber')
series_id = '-'.join(map(str, series_id))
size = list(mw.image_shape) + [len(series_files)]
total += size[-1]
if len(size) < 4:
size.append(1)
# MG - refactor into util function
try:
TR = float(dcminfo.RepetitionTime) / 1000.
except (AttributeError, ValueError):
TR = -1
try:
TE = float(dcminfo.EchoTime)
except (AttributeError, ValueError):
TE = -1
try:
refphys = str(dcminfo.ReferringPhysicianName)
except AttributeError:
refphys = ''
try:
image_type = tuple(dcminfo.ImageType)
except AttributeError:
image_type = ''
try:
series_desc = dcminfo.SeriesDescription
except AttributeError:
series_desc = ''
motion_corrected = 'MOCO' in image_type
if dcminfo.get([0x18,0x24], None):
# GE and Philips scanners
sequence_name = dcminfo[0x18,0x24].value
elif dcminfo.get([0x19, 0x109c], None):
# Siemens scanners
sequence_name = dcminfo[0x19, 0x109c].value
else:
sequence_name = 'Not found'
info = SeqInfo(
total,
op.split(series_files[0])[1],
series_id,
op.basename(op.dirname(series_files[0])),
'-', '-',
size[0], size[1], size[2], size[3],
TR, TE,
dcminfo.ProtocolName,
motion_corrected,
'derived' in [x.lower() for x in dcminfo.get('ImageType', [])],
dcminfo.get('PatientID'),
dcminfo.get('StudyDescription'),
refphys,
series_desc, # We try to set this further up.
sequence_name,
image_type,
accession_number,
# For demographics to populate BIDS participants.tsv
dcminfo.get('PatientAge'),
dcminfo.get('PatientSex'),
dcminfo.get('AcquisitionDate'),
dcminfo.get('SeriesInstanceUID')
)
# candidates
# dcminfo.AccessionNumber
# len(dcminfo.ReferencedImageSequence)
# len(dcminfo.SourceImageSequence)
# FOR demographics
if per_studyUID:
key = studyUID.split('.')[-1]
elif per_accession_number:
key = accession_number
else:
key = ''
lgr.debug("%30s %30s %27s %27s %5s nref=%-2d nsrc=%-2d %s" % (
key,
info.series_id,
series_desc,
dcminfo.ProtocolName,
info.is_derived,
len(dcminfo.get('ReferencedImageSequence', '')),
len(dcminfo.get('SourceImageSequence', '')),
info.image_type
))
if per_studyUID:
if studyUID not in seqinfo:
seqinfo[studyUID] = OrderedDict()
seqinfo[studyUID][info] = series_files
elif per_accession_number:
if accession_number not in seqinfo:
seqinfo[accession_number] = OrderedDict()
seqinfo[accession_number][info] = series_files
else:
seqinfo[info] = series_files
if per_studyUID:
lgr.info("Generated sequence info for %d studies with %d entries total",
len(seqinfo), sum(map(len, seqinfo.values())))
elif per_accession_number:
lgr.info("Generated sequence info for %d accession numbers with %d "
"entries total", len(seqinfo), sum(map(len, seqinfo.values())))
else:
lgr.info("Generated sequence info with %d entries", len(seqinfo))
return seqinfo
|
22,689 |
def test_ocsp_must_staple(context):
"""Test that OCSP Must-Staple is correctly set in the generated certificate."""
certname = context.get_domain('must-staple')
context.certbot(['auth', '--must-staple', '--domains', certname])
certificate = misc.read_certificate(join(context.config_dir,
'live/{0}/cert.pem').format(certname))
assert 'status_request' in certificate or '1.3.6.1.5.5.7.1.24'
|
def test_ocsp_must_staple(context):
"""Test that OCSP Must-Staple is correctly set in the generated certificate."""
certname = context.get_domain('must-staple')
context.certbot(['auth', '--must-staple', '--domains', certname])
certificate = misc.read_certificate(join(context.config_dir,
'live/{0}/cert.pem').format(certname))
assert 'status_request' in certificate or '1.3.6.1.5.5.7.1.24' in certificate
|
20,420 |
def user_list(fields=None):
from yunohost.utils.ldap import _get_ldap_interface
ldap_attrs = {
'username': 'uid',
'password': 'uid',
'fullname': 'cn',
'firstname': 'givenName',
'lastname': 'sn',
'mail': 'mail',
'mail-alias': 'mail',
'mail-forward': 'maildrop',
'mailbox-quota': 'mailuserquota',
'groups': 'memberOf',
'shell': 'loginShell',
'home-path': 'homeDirectory'
}
def display_default(values, _):
return values[0] if len(values) == 1 else values
display = {
'password': lambda values, user: '',
'mail': lambda values, user: display_default(values[:1], user),
'mail-alias': lambda values, _: values[1:],
'mail-forward': lambda values, user: [forward for forward in values if forward != user['uid'][0]],
'groups': lambda values, user: [
group[3:].split(',')[0]
for group in values
if not group.startswith('cn=all_users,') and
not group.startswith('cn=' + user['uid'][0] + ',')],
'shell': lambda values, _: len(values) > 0 and values[0].strip() == "/bin/false"
}
attrs = set(['uid'])
users = {}
if not fields:
fields = ['username', 'fullname', 'mail', 'mailbox-quota', 'shell']
for field in fields:
if field in ldap_attrs:
attrs |= set([ldap_attrs[field]])
else:
raise YunohostError('field_invalid', field)
ldap = _get_ldap_interface()
result = ldap.search(
"ou=users,dc=yunohost,dc=org",
"(&(objectclass=person)(!(uid=root))(!(uid=nobody)))",
attrs,
)
for user in result:
entry = {}
for field in fields:
values = []
if ldap_attrs[field] in user:
values = user[ldap_attrs[field]]
entry[field] = display.get(field, display_default)(values, user)
users[user['uid'][0]] = entry
return {"users": users}
|
def user_list(fields=None):
from yunohost.utils.ldap import _get_ldap_interface
ldap_attrs = {
'username': 'uid',
'password': 'uid',
'fullname': 'cn',
'firstname': 'givenName',
'lastname': 'sn',
'mail': 'mail',
'mail-alias': 'mail',
'mail-forward': 'maildrop',
'mailbox-quota': 'mailuserquota',
'groups': 'memberOf',
'shell': 'loginShell',
'home-path': 'homeDirectory'
}
def display_default(values, _):
return values[0] if len(values) == 1 else values
display = {
'password': lambda values, user: '',
'mail': lambda values, user: display_default(values[:1], user),
'mail-alias': lambda values, _: values[1:],
'mail-forward': lambda values, user: [forward for forward in values if forward != user['uid'][0]],
'groups': lambda values, user: [
group[3:].split(',')[0]
for group in values
if not group.startswith('cn=all_users,') and
not group.startswith('cn=' + user['uid'][0] + ',')],
'shell': lambda values, _: len(values) > 0 and values[0].strip() == "/bin/false"
}
attrs = set(['uid'])
users = {}
if not fields:
fields = ['username', 'fullname', 'mail', 'mailbox-quota', 'shell']
for field in fields:
if field in ldap_attrs:
attrs.add(ldap_attrs[field])
else:
raise YunohostError('field_invalid', field)
ldap = _get_ldap_interface()
result = ldap.search(
"ou=users,dc=yunohost,dc=org",
"(&(objectclass=person)(!(uid=root))(!(uid=nobody)))",
attrs,
)
for user in result:
entry = {}
for field in fields:
values = []
if ldap_attrs[field] in user:
values = user[ldap_attrs[field]]
entry[field] = display.get(field, display_default)(values, user)
users[user['uid'][0]] = entry
return {"users": users}
|
41,416 |
def add_gaia_figure_elements(tpf, fig, magnitude_limit=18):
"""Make the Gaia Figure Elements"""
#Get the positions of the Gaia sources
c1 = SkyCoord(tpf.ra, tpf.dec, frame='icrs', unit='deg')
# Use pixel scale for query size
pix_scale = 4.0 # arcseconds / pixel for Kepler, default
if tpf.mission == 'TESS':
pix_scale = 21.0
# We are querying with a diameter as the radius, overfilling by 2x.
result = Vizier.query_region(c1, catalog=["I/345/gaia2"], radius=Angle(np.max(tpf.shape[1:]) * pix_scale, "arcsec"))
if result is None:
raise ValueError('No targets found in region.')
result = result["I/345/gaia2"].to_pandas()
result = result[result.Gmag < magnitude_limit]
radecs = np.vstack([result['RA_ICRS'], result['DE_ICRS']]).T
coords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is this supposed to be zero or one?????
year = ((tpf.astropy_time[0].jd - 2457206.375) * u.day).to(u.year)
pmra = ((np.asarray(result.pmRA) * u.milliarcsecond/u.year) * year).to(u.arcsec).value
pmdec = ((np.asarray(result.pmDE) * u.milliarcsecond/u.year) * year).to(u.arcsec).value
## todo: filter NaNs in pmra/pmdec
result.RA_ICRS += pmra
result.DE_ICRS += pmdec
# Gently size the points by their Gaia magnitude
sizes = 64.0 / 2**(result['Gmag']/5.0)
source = ColumnDataSource(data=dict(ra=result['RA_ICRS'],
dec=result['DE_ICRS'],
source=result['Source'],
Gmag=result['Gmag'],
plx=result['Plx'],
x=coords[:, 0]+tpf.column,
y=coords[:, 1]+tpf.row,
size=sizes))
r = fig.circle('x', 'y', source=source,fill_alpha=0.3, size='size', line_color=None,
selection_color="firebrick",nonselection_fill_alpha=0.0, nonselection_line_color=None,
nonselection_line_alpha=0.0, fill_color="firebrick",
hover_fill_color="firebrick", hover_alpha=0.9, hover_line_color="white")
fig.add_tools(HoverTool(tooltips=[("Source", "@source"),("G", "@Gmag"),("Parallax", "@plx"),
("RA", "@ra{0,0.00000000}"),
("DEC", "@dec{0,0.00000000}"),
("x", "@x"),
("y", "@y")],
renderers=[r],
mode='mouse',
point_policy="snap_to_data"))
return fig, r
|
def add_gaia_figure_elements(tpf, fig, magnitude_limit=18):
"""Make the Gaia Figure Elements"""
#Get the positions of the Gaia sources
c1 = SkyCoord(tpf.ra, tpf.dec, frame='icrs', unit='deg')
# Use pixel scale for query size
pix_scale = 4.0 # arcseconds / pixel for Kepler, default
if tpf.mission == 'TESS':
pix_scale = 21.0
# We are querying with a diameter as the radius, overfilling by 2x.
result = Vizier.query_region(c1, catalog=["I/345/gaia2"], radius=Angle(np.max(tpf.shape[1:]) * pix_scale, "arcsec"))
if result is None:
raise ValueError('No targets found in region.')
result = result["I/345/gaia2"].to_pandas()
result = result[result.Gmag < magnitude_limit]
radecs = np.vstack([result['RA_ICRS'], result['DE_ICRS']]).T
coords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is this supposed to be zero or one?????
year = ((tpf.astropy_time[0].jd - 2457206.375) * u.day).to(u.year)
pmra = ((np.nan_to_num(np.asarray(result.pmRA)) * u.milliarcsecond/u.year) * year).to(u.arcsec).value
pmdec = ((np.asarray(result.pmDE) * u.milliarcsecond/u.year) * year).to(u.arcsec).value
## todo: filter NaNs in pmra/pmdec
result.RA_ICRS += pmra
result.DE_ICRS += pmdec
# Gently size the points by their Gaia magnitude
sizes = 64.0 / 2**(result['Gmag']/5.0)
source = ColumnDataSource(data=dict(ra=result['RA_ICRS'],
dec=result['DE_ICRS'],
source=result['Source'],
Gmag=result['Gmag'],
plx=result['Plx'],
x=coords[:, 0]+tpf.column,
y=coords[:, 1]+tpf.row,
size=sizes))
r = fig.circle('x', 'y', source=source,fill_alpha=0.3, size='size', line_color=None,
selection_color="firebrick",nonselection_fill_alpha=0.0, nonselection_line_color=None,
nonselection_line_alpha=0.0, fill_color="firebrick",
hover_fill_color="firebrick", hover_alpha=0.9, hover_line_color="white")
fig.add_tools(HoverTool(tooltips=[("Source", "@source"),("G", "@Gmag"),("Parallax", "@plx"),
("RA", "@ra{0,0.00000000}"),
("DEC", "@dec{0,0.00000000}"),
("x", "@x"),
("y", "@y")],
renderers=[r],
mode='mouse',
point_policy="snap_to_data"))
return fig, r
|
42,665 |
def history_event_from_kraken(
events: List[Dict[str, Any]],
name: str,
msg_aggregator: MessagesAggregator,
) -> Tuple[List[HistoryBaseEntry], bool]:
"""
This function gets raw data from kraken and creates a list of related history events
to be used in the app. It returns a list of events and a boolean in the case that an unknown
type is found.
"""
group_events = []
found_unknown_event = False
current_fee_index = len(events)
for idx, raw_event in enumerate(events):
try:
timestamp = TimestampMS((deserialize_fval(
value=raw_event['time'], name='time', location='kraken ledger processing',
) * 1000).to_int(exact=False))
identifier = raw_event['refid']
event_type = kraken_ledger_entry_type_to_ours(raw_event['type'])
asset = asset_from_kraken(raw_event['asset'])
event_subtype = HistoryEventSubType.NONE
notes = None
raw_amount = deserialize_asset_amount(raw_event['amount'])
# If we don't know how to handle an event atm or we find an unsupported
# event type the logic will be to store it as unknown and if in the future
# we need some information from it we can take actions to process them
if event_type == HistoryEventType.TRANSFER:
if raw_event['subtype'] == 'spottostaking':
event_type = HistoryEventType.STAKING
event_subtype = HistoryEventSubType.DEPOSIT_ASSET
elif raw_event['subtype'] == 'stakingfromspot':
event_type = HistoryEventType.STAKING
event_subtype = HistoryEventSubType.RECEIVE_WRAPPED
elif raw_event['subtype'] == 'stakingtospot':
event_type = HistoryEventType.STAKING
event_subtype = HistoryEventSubType.REMOVE_ASSET
elif raw_event['subtype'] == 'spotfromstaking':
event_type = HistoryEventType.STAKING
event_subtype = HistoryEventSubType.RETURN_WRAPPED
elif event_type == HistoryEventType.ADJUSTMENT:
if raw_amount < ZERO:
event_subtype = HistoryEventSubType.SPEND
else:
event_subtype = HistoryEventSubType.RECEIVE
elif event_type == HistoryEventType.STAKING:
event_subtype = HistoryEventSubType.REWARD
elif event_type == HistoryEventType.INFORMATIONAL:
found_unknown_event = True
notes = raw_event['type']
log.warning(
f'Encountered kraken historic event type we do not process. {raw_event}',
)
fee_amount = deserialize_asset_amount(raw_event['fee'])
# Make sure to not generate a event for KFEES that is not of type FEE
if asset != A_KFEE:
group_events.append(HistoryBaseEntry(
event_identifier=identifier,
sequence_index=idx,
timestamp=timestamp,
location=Location.KRAKEN,
location_label=name,
asset=asset,
balance=Balance(
amount=raw_amount,
usd_value=ZERO,
),
notes=notes,
event_type=event_type,
event_subtype=event_subtype,
))
if fee_amount != ZERO:
group_events.append(HistoryBaseEntry(
event_identifier=identifier,
sequence_index=current_fee_index,
timestamp=timestamp,
location=Location.KRAKEN,
location_label=name,
asset=asset,
balance=Balance(
amount=fee_amount,
usd_value=ZERO,
),
notes=notes,
event_type=event_type,
event_subtype=HistoryEventSubType.FEE,
))
# Increase the fee index to not have duplicates in the case of having a normal
# fee and KFEE
current_fee_index += 1
except (DeserializationError, KeyError, UnknownAsset) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Keyrror {msg}'
msg_aggregator.add_error(
f'Failed to read ledger event from kraken {raw_event} due to {msg}',
)
return [], False
return group_events, found_unknown_event
|
def history_event_from_kraken(
events: List[Dict[str, Any]],
name: str,
msg_aggregator: MessagesAggregator,
) -> Tuple[List[HistoryBaseEntry], bool]:
"""
This function gets raw data from kraken and creates a list of related history events
to be used in the app. It returns a list of events and a boolean in the case that an unknown
type is found.
"""
group_events = []
found_unknown_event = False
current_fee_index = len(events)
for idx, raw_event in enumerate(events):
try:
timestamp = TimestampMS((deserialize_fval(
value=raw_event['time'], name='time', location='kraken ledger processing',
) * 1000).to_int(exact=False))
identifier = raw_event['refid']
event_type = kraken_ledger_entry_type_to_ours(raw_event['type'])
asset = asset_from_kraken(raw_event['asset'])
event_subtype = HistoryEventSubType.NONE
notes = None
raw_amount = deserialize_asset_amount(raw_event['amount'])
# If we don't know how to handle an event atm or we find an unsupported
# event type the logic will be to store it as unknown and if in the future
# we need some information from it we can take actions to process them
if event_type == HistoryEventType.TRANSFER:
if raw_event['subtype'] == 'spottostaking':
event_type = HistoryEventType.STAKING
event_subtype = HistoryEventSubType.DEPOSIT_ASSET
elif raw_event['subtype'] == 'stakingfromspot':
event_type = HistoryEventType.STAKING
event_subtype = HistoryEventSubType.RECEIVE_WRAPPED
elif raw_event['subtype'] == 'stakingtospot':
event_type = HistoryEventType.STAKING
event_subtype = HistoryEventSubType.REMOVE_ASSET
elif raw_event['subtype'] == 'spotfromstaking':
event_type = HistoryEventType.STAKING
event_subtype = HistoryEventSubType.RETURN_WRAPPED
elif event_type == HistoryEventType.ADJUSTMENT:
if raw_amount < ZERO:
event_subtype = HistoryEventSubType.SPEND
else:
event_subtype = HistoryEventSubType.RECEIVE
elif event_type == HistoryEventType.STAKING:
event_subtype = HistoryEventSubType.REWARD
elif event_type == HistoryEventType.INFORMATIONAL:
found_unknown_event = True
notes = raw_event['type']
log.warning(
f'Encountered kraken historic event type we do not process. {raw_event}',
)
fee_amount = deserialize_asset_amount(raw_event['fee'])
# Make sure to not generate an event for KFEES that is not of type FEE
if asset != A_KFEE:
group_events.append(HistoryBaseEntry(
event_identifier=identifier,
sequence_index=idx,
timestamp=timestamp,
location=Location.KRAKEN,
location_label=name,
asset=asset,
balance=Balance(
amount=raw_amount,
usd_value=ZERO,
),
notes=notes,
event_type=event_type,
event_subtype=event_subtype,
))
if fee_amount != ZERO:
group_events.append(HistoryBaseEntry(
event_identifier=identifier,
sequence_index=current_fee_index,
timestamp=timestamp,
location=Location.KRAKEN,
location_label=name,
asset=asset,
balance=Balance(
amount=fee_amount,
usd_value=ZERO,
),
notes=notes,
event_type=event_type,
event_subtype=HistoryEventSubType.FEE,
))
# Increase the fee index to not have duplicates in the case of having a normal
# fee and KFEE
current_fee_index += 1
except (DeserializationError, KeyError, UnknownAsset) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Keyrror {msg}'
msg_aggregator.add_error(
f'Failed to read ledger event from kraken {raw_event} due to {msg}',
)
return [], False
return group_events, found_unknown_event
|
4,179 |
def bootstrap_ci(arr, ci=.95, n_bootstraps=2000, stat_fun='mean',
random_state=None):
"""Get confidence intervals from non-parametric bootstrap.
Parameters
----------
arr : ndarray
The input data on which to calculate the confidence interval.
ci : float
Level of the confidence interval between 0 and 1.
n_bootstraps : int
Number of bootstraps
stat_fun : str | callable
Can be "mean", "median", or a callable operating along `axis=0`.
random_state : int | float | array_like | None
The seed at which to initialize the bootstrap.
Returns
-------
cis : ndarray
Containing the lower boundary of the CI at `cis[0, ...]` and the upper
boundary of the CI at `cis[1, ...]`.
"""
if stat_fun == "mean":
def stat_fun(x):
return x.mean(axis=0)
elif stat_fun == 'median':
def stat_fun(x):
return np.median(x, axis=0)
elif not callable(stat_fun):
raise ValueError("stat_fun must be 'mean', 'median' or callable.")
n_trials = arr.shape[0]
indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too
rng = check_random_state(random_state)
boot_indices = rng.choice(indices, replace=True,
size=(n_bootstraps, len(indices)))
stat = np.array([stat_fun(arr[inds]) for inds in boot_indices])
ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100)
ci_low, ci_up = np.percentile(stat, ci, axis=0)
return np.array([ci_low, ci_up])
|
def bootstrap_ci(arr, ci=.95, n_bootstraps=2000, stat_fun='mean',
random_state=None):
"""Get confidence intervals from non-parametric bootstrap.
Parameters
----------
arr : ndarray
The input data on which to calculate the confidence interval.
ci : float
Level of the confidence interval between 0 and 1.
n_bootstraps : int
Number of bootstraps
stat_fun : str | callable
Can be "mean", "median", or a callable operating along `axis=0`.
random_state : int | float | array_like | None
The seed at which to initialize the bootstrap.
Returns
-------
cis : ndarray, shape (2, ...)
Containing the lower boundary of the CI at `cis[0, ...]` and the upper
boundary of the CI at `cis[1, ...]`.
"""
if stat_fun == "mean":
def stat_fun(x):
return x.mean(axis=0)
elif stat_fun == 'median':
def stat_fun(x):
return np.median(x, axis=0)
elif not callable(stat_fun):
raise ValueError("stat_fun must be 'mean', 'median' or callable.")
n_trials = arr.shape[0]
indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too
rng = check_random_state(random_state)
boot_indices = rng.choice(indices, replace=True,
size=(n_bootstraps, len(indices)))
stat = np.array([stat_fun(arr[inds]) for inds in boot_indices])
ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100)
ci_low, ci_up = np.percentile(stat, ci, axis=0)
return np.array([ci_low, ci_up])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.