id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
27,761 |
def test_show_fixtures_and_test(pytester: Pytester, dummy_yaml_custom_test):
"""Verify that fixtures are not executed."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg():
assert False
def test_arg(arg):
assert False
"""
)
result = pytester.runpytest("--setup-plan")
assert result.ret == 0
result.stdout.fnmatch_lines(
["*SETUP F arg*", "*test_arg (fixtures used: arg)", "*TEARDOWN F arg*"]
)
|
def test_show_fixtures_and_test(pytester: Pytester, dummy_yaml_custom_test: None) -> None:
"""Verify that fixtures are not executed."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg():
assert False
def test_arg(arg):
assert False
"""
)
result = pytester.runpytest("--setup-plan")
assert result.ret == 0
result.stdout.fnmatch_lines(
["*SETUP F arg*", "*test_arg (fixtures used: arg)", "*TEARDOWN F arg*"]
)
|
3,356 |
def histogram_query(
fields,
user_query,
params,
num_buckets,
precision=0,
min_value=None,
max_value=None,
data_filter=None,
referrer=None,
group_by=None,
order_by=None,
limit_by=None,
extra_conditions=None,
normalize_results=True,
):
"""
API for generating histograms for numeric columns.
A multihistogram is possible only if the columns are all array columns.
Array columns are columns whose values are nested arrays.
Measurements and span op breakdowns are examples of array columns.
The resulting histograms will have their bins aligned.
:param [str] fields: The list of fields for which you want to generate histograms for.
:param str user_query: Filter query string to create conditions from.
:param {str: str} params: Filtering parameters with start, end, project_id, environment
:param int num_buckets: The number of buckets the histogram should contain.
:param int precision: The number of decimal places to preserve, default 0.
:param float min_value: The minimum value allowed to be in the histogram.
If left unspecified, it is queried using `user_query` and `params`.
:param float max_value: The maximum value allowed to be in the histogram.
If left unspecified, it is queried using `user_query` and `params`.
:param str data_filter: Indicate the filter strategy to be applied to the data.
:param [str] group_by: Experimental. Allows additional grouping to serve multifacet histograms.
:param [str] order_by: Experimental. Allows additional ordering within each alias to serve multifacet histograms.
:param [str] limit_by: Experimental. Allows limiting within a group when serving multifacet histograms.
:param [str] extra_conditions: Adds any additional conditions to the histogram query that aren't received from params.
:param bool normalize_results: Indicate whether to normalize the results by column into bins.
"""
multiplier = int(10 ** precision)
if max_value is not None:
# We want the specified max_value to be exclusive, and the queried max_value
# to be inclusive. So we adjust the specified max_value using the multiplier.
max_value -= 0.1 / multiplier
min_value, max_value = find_histogram_min_max(
fields, min_value, max_value, user_query, params, data_filter
)
key_column = None
array_column = None
histogram_function = None
conditions = []
if len(fields) > 1:
array_column = check_multihistogram_fields(fields)
if array_column == "measurements":
key_column = "array_join(measurements_key)"
histogram_function = get_measurement_name
elif array_column == "span_op_breakdowns":
key_column = "array_join(span_op_breakdowns_key)"
histogram_function = get_span_op_breakdown_name
else:
raise InvalidSearchQuery(
"multihistogram expected either all measurements or all breakdowns"
)
key_alias = get_function_alias(key_column)
field_names = [histogram_function(field) for field in fields]
conditions.append([key_alias, "IN", field_names])
if extra_conditions:
conditions.extend(extra_conditions)
histogram_params = find_histogram_params(num_buckets, min_value, max_value, multiplier)
histogram_column = get_histogram_column(fields, key_column, histogram_params, array_column)
histogram_alias = get_function_alias(histogram_column)
if min_value is None or max_value is None:
return normalize_histogram_results(
fields, key_column, histogram_params, {"data": []}, array_column
)
# make sure to bound the bins to get the desired range of results
if min_value is not None:
min_bin = histogram_params.start_offset
conditions.append([histogram_alias, ">=", min_bin])
if max_value is not None:
max_bin = histogram_params.start_offset + histogram_params.bucket_size * num_buckets
conditions.append([histogram_alias, "<=", max_bin])
columns = [] if key_column is None else [key_column]
limit = len(fields) * num_buckets
orderby = [histogram_alias]
if order_by:
orderby.insert(0, order_by)
histogram_query = prepare_discover_query(
selected_columns=columns + [histogram_column, "count()"],
conditions=conditions,
query=user_query,
params=params,
orderby=orderby,
functions_acl=["array_join", "histogram"],
)
snuba_filter = histogram_query.filter
if group_by:
snuba_filter.groupby += group_by
result = raw_query(
start=snuba_filter.start,
end=snuba_filter.end,
groupby=snuba_filter.groupby,
conditions=snuba_filter.conditions,
aggregations=snuba_filter.aggregations,
selected_columns=snuba_filter.selected_columns,
filter_keys=snuba_filter.filter_keys,
having=snuba_filter.having,
orderby=snuba_filter.orderby,
dataset=Dataset.Discover,
limitby=limit_by,
limit=limit,
referrer=referrer,
)
results = transform_results(
result,
histogram_query.fields["functions"],
histogram_query.columns,
snuba_filter,
)
if not normalize_results:
return results
return normalize_histogram_results(fields, key_column, histogram_params, results, array_column)
|
def histogram_query(
fields,
user_query,
params,
num_buckets,
precision=0,
min_value=None,
max_value=None,
data_filter=None,
referrer=None,
group_by=None,
order_by=None,
limit_by=None,
extra_conditions=None,
normalize_results=True,
):
"""
API for generating histograms for numeric columns.
A multihistogram is possible only if the columns are all array columns.
Array columns are columns whose values are nested arrays.
Measurements and span op breakdowns are examples of array columns.
The resulting histograms will have their bins aligned.
:param [str] fields: The list of fields for which you want to generate histograms for.
:param str user_query: Filter query string to create conditions from.
:param {str: str} params: Filtering parameters with start, end, project_id, environment
:param int num_buckets: The number of buckets the histogram should contain.
:param int precision: The number of decimal places to preserve, default 0.
:param float min_value: The minimum value allowed to be in the histogram.
If left unspecified, it is queried using `user_query` and `params`.
:param float max_value: The maximum value allowed to be in the histogram.
If left unspecified, it is queried using `user_query` and `params`.
:param str data_filter: Indicate the filter strategy to be applied to the data.
:param [str] group_by: Experimental. Allows additional grouping to serve multifacet histograms.
:param [str] order_by: Experimental. Allows additional ordering within each alias to serve multifacet histograms.
:param [str] limit_by: Experimental. Allows limiting within a group when serving multifacet histograms.
:param [str] extra_conditions: Adds any additional conditions to the histogram query that aren't received from params.
:param bool normalize_results: Indicate whether to normalize the results by column into bins.
"""
multiplier = int(10 ** precision)
if max_value is not None:
# We want the specified max_value to be exclusive, and the queried max_value
# to be inclusive. So we adjust the specified max_value using the multiplier.
max_value -= 0.1 / multiplier
min_value, max_value = find_histogram_min_max(
fields, min_value, max_value, user_query, params, data_filter
)
key_column = None
array_column = None
histogram_function = None
conditions = []
if len(fields) > 1:
array_column = check_multihistogram_fields(fields)
if array_column == "measurements":
key_column = "array_join(measurements_key)"
histogram_function = get_measurement_name
elif array_column == "span_op_breakdowns":
key_column = "array_join(span_op_breakdowns_key)"
histogram_function = get_span_op_breakdown_name
else:
raise InvalidSearchQuery(
"multihistogram expected either all measurements or all breakdowns"
)
key_alias = get_function_alias(key_column)
field_names = [histogram_function(field) for field in fields]
conditions.append([key_alias, "IN", field_names])
if extra_conditions:
conditions.extend(extra_conditions)
histogram_params = find_histogram_params(num_buckets, min_value, max_value, multiplier)
histogram_column = get_histogram_column(fields, key_column, histogram_params, array_column)
histogram_alias = get_function_alias(histogram_column)
if min_value is None or max_value is None:
return normalize_histogram_results(
fields, key_column, histogram_params, {"data": []}, array_column
)
# make sure to bound the bins to get the desired range of results
if min_value is not None:
min_bin = histogram_params.start_offset
conditions.append([histogram_alias, ">=", min_bin])
if max_value is not None:
max_bin = histogram_params.start_offset + histogram_params.bucket_size * num_buckets
conditions.append([histogram_alias, "<=", max_bin])
columns = [] if key_column is None else [key_column]
limit = len(fields) * num_buckets
orderby = orderby + [histogram_alias]
histogram_query = prepare_discover_query(
selected_columns=columns + [histogram_column, "count()"],
conditions=conditions,
query=user_query,
params=params,
orderby=orderby,
functions_acl=["array_join", "histogram"],
)
snuba_filter = histogram_query.filter
if group_by:
snuba_filter.groupby += group_by
result = raw_query(
start=snuba_filter.start,
end=snuba_filter.end,
groupby=snuba_filter.groupby,
conditions=snuba_filter.conditions,
aggregations=snuba_filter.aggregations,
selected_columns=snuba_filter.selected_columns,
filter_keys=snuba_filter.filter_keys,
having=snuba_filter.having,
orderby=snuba_filter.orderby,
dataset=Dataset.Discover,
limitby=limit_by,
limit=limit,
referrer=referrer,
)
results = transform_results(
result,
histogram_query.fields["functions"],
histogram_query.columns,
snuba_filter,
)
if not normalize_results:
return results
return normalize_histogram_results(fields, key_column, histogram_params, results, array_column)
|
41,820 |
def _save_best_config(config_file: str, study: optuna.Study) -> Dict:
best_params = study.best_params
for key, value in best_params.items():
best_params[key] = str(value)
config = json.loads(_jsonnet.evaluate_file(config_file, ext_vars=best_params))
return allennlp.common.params.infer_and_cast(config)
|
def _dump_best_config(config_file: str, study: optuna.Study) -> Dict:
best_params = study.best_params
for key, value in best_params.items():
best_params[key] = str(value)
config = json.loads(_jsonnet.evaluate_file(config_file, ext_vars=best_params))
return allennlp.common.params.infer_and_cast(config)
|
50,326 |
def test_get_project_languages(gl, resp_list_languages):
python = gl.projects.get(1).languages().get("python")
ruby = gl.projects.get(1).languages().get("ruby")
coffee_script = gl.projects.get(1).languages().get("CoffeeScript")
assert python == 80.00
assert ruby == 99.99
assert coffee_script == 00.01
|
def test_get_project_languages(project, resp_list_languages):
python = project.languages().get("python")
ruby = project.languages().get("ruby")
coffee_script = project.languages().get("CoffeeScript")
assert python == 80.00
assert ruby == 99.99
assert coffee_script == 00.01
|
13,905 |
def updateCallOfReleaseChecklist(filename: str, lines: List[str]):
newLines = []
callReleaseChecklist = "admin/release_checklist"
callFound = False
iterLines = iter(lines)
for line in iterLines:
if callReleaseChecklist in line:
line = re.sub(r"\d+\.\d+$", VERSION, line)
callFound = True
newLines.append(line)
if not callFound:
raise RuntimeError(f"Call of {callReleaseChecklist!r} not found in {filename!r}.")
return newLines
|
def updateCallOfReleaseChecklist(filename: str, lines: List[str]):
newLines = []
callReleaseChecklist = "admin/release_checklist"
callFound = False
for line in lines:
if callReleaseChecklist in line:
line = re.sub(r"\d+\.\d+$", VERSION, line)
callFound = True
newLines.append(line)
if not callFound:
raise RuntimeError(f"Call of {callReleaseChecklist!r} not found in {filename!r}.")
return newLines
|
31,711 |
def group_entry(group_object, custom_attributes):
# create an endpoint entry from a group object
group = {
'Type': 'AD',
'ID': group_object.get('dn'),
'Name': group_object.get('name'),
'Groups': group_object.get('memberOf')
}
lower_cased_person_object_keys = {
person_object_key.lower(): person_object_key for person_object_key in group_object.keys()
}
for attr in custom_attributes:
if attr == '*':
continue
try:
group[attr] = group_object[attr]
except KeyError as e:
lower_cased_custom_attr = attr.lower()
if lower_cased_custom_attr in lower_cased_person_object_keys:
cased_custom_attr = lower_cased_person_object_keys.get(lower_cased_custom_attr, '')
group[cased_custom_attr] = group_object[cased_custom_attr]
else:
demisto.error(f'Failed parsing custom attribute {attr}, error: {e}')
return group
|
def group_entry(group_object, custom_attributes):
# create an endpoint entry from a group object
group = {
'Type': 'AD',
'ID': group_object.get('dn'),
'Name': group_object.get('name'),
'Groups': group_object.get('memberOf'),
}
lower_cased_person_object_keys = {
person_object_key.lower(): person_object_key for person_object_key in group_object.keys()
}
for attr in custom_attributes:
if attr == '*':
continue
try:
group[attr] = group_object[attr]
except KeyError as e:
lower_cased_custom_attr = attr.lower()
if lower_cased_custom_attr in lower_cased_person_object_keys:
cased_custom_attr = lower_cased_person_object_keys.get(lower_cased_custom_attr, '')
group[cased_custom_attr] = group_object[cased_custom_attr]
else:
demisto.error(f'Failed parsing custom attribute {attr}, error: {e}')
return group
|
14,876 |
def info_from_service(service):
"""Return prepared info from mDNS entries."""
properties = {"_raw": {}}
for key, value in service.properties.items():
# See https://ietf.org/rfc/rfc6763.html#section-6.4 and
# https://ietf.org/rfc/rfc6763.html#section-6.5 for expected encodings
# for property keys and values
key = key.decode("ascii")
properties["_raw"][key] = value
try:
if isinstance(value, bytes):
properties[key] = value.decode("utf-8")
except UnicodeDecodeError:
properties[key] = None
address = service.addresses[0]
info = {
ATTR_HOST: str(ipaddress.ip_address(address)),
ATTR_PORT: service.port,
ATTR_HOSTNAME: service.server,
ATTR_TYPE: service.type,
ATTR_NAME: service.name,
ATTR_PROPERTIES: properties,
}
return info
|
def info_from_service(service):
"""Return prepared info from mDNS entries."""
properties = {"_raw": {}}
for key, value in service.properties.items():
# See https://ietf.org/rfc/rfc6763.html#section-6.4 and
# https://ietf.org/rfc/rfc6763.html#section-6.5 for expected encodings
# for property keys and values
key = key.decode("ascii")
properties["_raw"][key] = value
try:
if isinstance(value, bytes):
properties[key] = value.decode("utf-8")
except UnicodeDecodeError:
pass
address = service.addresses[0]
info = {
ATTR_HOST: str(ipaddress.ip_address(address)),
ATTR_PORT: service.port,
ATTR_HOSTNAME: service.server,
ATTR_TYPE: service.type,
ATTR_NAME: service.name,
ATTR_PROPERTIES: properties,
}
return info
|
58,178 |
def main(params: dict, args: dict, command: str):
"""main function, parses params and runs command functions
"""
demisto.debug(f"Command called {command}")
demisto.results(f"Args are {args}")
demisto.results(f"params are {params}")
client = Client(params)
if command == 'quttera-scan-start':
results = scan_start(client, args)
elif command == 'quttera-rescan-status':
results = rescan_status(client, args)
elif command == 'quttera-scan-report':
results = scan_report(client, args)
elif command == 'quttera-report-malware':
results = report_malware(client, args)
elif command == 'quttera-status-blacklist':
results = status_blacklist(client, args)
else:
raise NotImplementedError(f'Command {command} not implemented')
return_results(results)
|
def main(params: dict, args: dict, command: str):
"""main function, parses params and runs command functions
"""
demisto.debug(f"Command called {command}")
demisto.info(f"Args are {args}")
demisto.info(f"params are {params}")
client = Client(params)
if command == 'quttera-scan-start':
results = scan_start(client, args)
elif command == 'quttera-rescan-status':
results = rescan_status(client, args)
elif command == 'quttera-scan-report':
results = scan_report(client, args)
elif command == 'quttera-report-malware':
results = report_malware(client, args)
elif command == 'quttera-status-blacklist':
results = status_blacklist(client, args)
else:
raise NotImplementedError(f'Command {command} not implemented')
return_results(results)
|
7,401 |
def check_numpy_arr(arr, name, bool_expected=False):
if type(arr) != np.ndarray:
raise ValueError(
f"{name} is of type {type(arr)} not nd.array of dtype boolean as",
"expected")
if bool_expected:
if np.sum(np.where(arr > 1, 1, 0)) > 0:
raise ValueError(
f"{name} is ndarray of dtype {arr.dtype} with non-binary",
"values.Check if image mask was passed in as expected.")
|
def check_numpy_arr(arr, name, bool_expected=False):
if type(arr) != np.ndarray:
raise ValueError(
f"{name} is of type {type(arr)} not nd.array of dtype boolean as",
"expected")
if bool_expected:
if np.sum(np.where(arr > 1, 1, 0)) > 0:
raise ValueError(
f"{arr} is ndarray of dtype {arr.dtype} with non-binary ",
"values.Check if image mask was passed in as expected.")
|
41,713 |
def objective(trial):
data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.3)
param = {
'objective': trial.suggest_categorical('objective', ['Logloss', 'CrossEntropy']),
'iterations': trial.suggest_int('num_leaves', 500, 2000),
'colsample_bylevel': trial.suggest_uniform('colsample_bylevel', 0.01, 0.1),
'depth': trial.suggest_int('depth', 1, 16),
'boosting_type': trial.suggest_categorical('boosting_type', ['Ordered', 'Plain']),
'learning_rate': trial.suggest_loguniform('learning_rate', 1e-6, 1.0)
}
gbm = cb.CatBoostClassifier(**param)
gbm.fit(train_x, train_y, eval_set=[(test_x, test_y)], verbose=0, early_stopping_rounds=100)
preds = gbm.predict(test_x)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(test_y, pred_labels)
return accuracy
|
def objective(trial):
data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.3)
param = {
'objective': trial.suggest_categorical('objective', ['Logloss', 'CrossEntropy']),
'iterations': trial.suggest_int('iterations', 500, 2000),
'colsample_bylevel': trial.suggest_uniform('colsample_bylevel', 0.01, 0.1),
'depth': trial.suggest_int('depth', 1, 16),
'boosting_type': trial.suggest_categorical('boosting_type', ['Ordered', 'Plain']),
'learning_rate': trial.suggest_loguniform('learning_rate', 1e-6, 1.0)
}
gbm = cb.CatBoostClassifier(**param)
gbm.fit(train_x, train_y, eval_set=[(test_x, test_y)], verbose=0, early_stopping_rounds=100)
preds = gbm.predict(test_x)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(test_y, pred_labels)
return accuracy
|
45,926 |
def draw_line(
image : torch.Tensor,
p1 : torch.Tensor, p2 : torch.Tensor,
color : torch.Tensor,
) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
p1: the start point of the line with shape (2).
p2: the end point of the line with shape (2).
color: the color of the line with shape (3).
Return:
the image containing the line.
"""
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if (A < 0):
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if (B != 0):
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2), max(x1, x2)
y1, y2 = min(y1, y2), max(y1, y2)
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1:y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1:x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
|
def draw_line(
image : torch.Tensor,
p1 : torch.Tensor, p2 : torch.Tensor,
color : torch.Tensor,
) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape :math:`(C,H,W)`.
p1: the start point of the line with shape (2).
p2: the end point of the line with shape (2).
color: the color of the line with shape (3).
Return:
the image containing the line.
"""
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if (A < 0):
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if (B != 0):
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2), max(x1, x2)
y1, y2 = min(y1, y2), max(y1, y2)
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1:y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1:x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
|
10,884 |
def getnumbers(str1):
"""returns any numbers that are in the string"""
# TODO: handle locale-based periods e.g. 2,5 for Afrikaans
assert isinstance(str1, str)
numbers = []
innumber = False
degreesign = "\xb0"
lastnumber = ""
carryperiod = ""
for chr1 in str1:
if chr1.isdigit():
innumber = True
elif innumber:
if not chr1 in (".", degreesign):
innumber = False
if lastnumber:
numbers.append(lastnumber)
lastnumber = ""
if innumber:
if chr1 == degreesign:
lastnumber += chr1
elif chr1 == ".":
carryperiod += chr1
else:
lastnumber += carryperiod + chr1
carryperiod = ""
else:
carryperiod = ""
if innumber:
if lastnumber:
numbers.append(lastnumber)
return numbers
|
def getnumbers(str1):
"""returns any numbers that are in the string"""
# TODO: handle locale-based periods e.g. 2,5 for Afrikaans
assert isinstance(str1, str)
numbers = []
innumber = False
degreesign = "\xb0"
lastnumber = ""
carryperiod = ""
for chr1 in str1:
if chr1.isdigit():
innumber = True
elif innumber:
if chr1 not in (".", degreesign):
innumber = False
if lastnumber:
numbers.append(lastnumber)
lastnumber = ""
if innumber:
if chr1 == degreesign:
lastnumber += chr1
elif chr1 == ".":
carryperiod += chr1
else:
lastnumber += carryperiod + chr1
carryperiod = ""
else:
carryperiod = ""
if innumber:
if lastnumber:
numbers.append(lastnumber)
return numbers
|
8,172 |
def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,
recenter=False, missing=0.0, use_scipy=False):
"""
Rotates, shifts and scales an image.
Will use `skimage.transform.warp` unless scikit-image can't be imported
then it will use`scipy.ndimage.affine_transform`.
Parameters
----------
image : `numpy.ndarray`
2D image to be rotated.
rmatrix : `numpy.ndarray` that is 2x2
Linear transformation rotation matrix.
order : `int` 0-5, optional
Interpolation order to be used, defaults to 3. When using scikit-image this parameter
is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).
When using scipy it is passed into
`scipy.ndimage.affine_transform` where it controls the order of the spline.
scale : `float`
A scale factor for the image with the default being no scaling.
image_center : tuple, optional
The point in the image to rotate around (axis of rotation).
Defaults to the center of the array.
recenter : `bool` or array-like, optional
Move the axis of rotation to the center of the array or recenter coords.
Defaults to `True` i.e., recenter to the center of the array.
missing : `float`, optional
The value to replace any missing data after the transformation.
use_scipy : `bool`, optional
Force use of `scipy.ndimage.affine_transform`.
Will set all "NaNs" in image to zero before doing the transform.
Defaults to `False`, unless scikit-image can't be imported.
Returns
-------
`numpy.ndarray`:
New rotated, scaled and translated image.
Notes
-----
This algorithm uses an affine transformation as opposed to a polynomial
geometrical transformation, which by default is `skimage.transform.warp`.
One can specify using `scipy.ndimage.affine_transform` as
an alternative affine transformation. The two transformations use different
algorithms and thus do not give identical output.
When using for `skimage.transform.warp` with order >= 4 or using
`scipy.ndimage.affine_transform` at all, "NaN" values will
replaced with zero prior to rotation. No attempt is made to retain the NaN
values.
Input arrays with integer data are cast to float 64 and can be re-cast using
`numpy.ndarray.astype` if desired.
Although this function is analogous to the IDL's ``rot`` function, it does not
use the same algorithm as the IDL ``rot`` function.
IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
method to calculate the inverse mapping of original to target pixel
coordinates. This is a polynomial geometrical transformation.
Then optionally it uses a bicubic convolution interpolation
algorithm to map the original to target pixel values.
"""
rmatrix = rmatrix / scale
array_center = (np.array(image.shape)[::-1] - 1) / 2.0
# Make sure the image center is an array and is where it's supposed to be
if image_center is not None:
image_center = np.asanyarray(image_center)
else:
image_center = array_center
# Determine center of rotation based on use (or not) of the recenter keyword
if recenter:
rot_center = array_center
else:
rot_center = image_center
displacement = np.dot(rmatrix, rot_center)
shift = image_center - displacement
if not use_scipy:
try:
import skimage.transform
except ImportError:
warnings.warn("scikit-image could not be imported. Image rotation will use scipy",
ImportWarning)
use_scipy = True
if use_scipy:
if np.any(np.isnan(image)):
warnings.warn("Setting NaNs to 0 for SciPy rotation.", SunpyUserWarning)
# Transform the image using the scipy affine transform
rotated_image = scipy.ndimage.interpolation.affine_transform(
np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
mode='constant', cval=missing).T
else:
tform = _affine_transform_matrix(rmatrix, shift)
rotated_image = image.copy()
# skikit-image expects floating point images to be in the range -1, 1
to_scale = (not np.issubdtype(image.dtype, np.int)) and np.any(np.isfinite(image))
if to_scale:
scale = np.nanmax(np.abs(rotated_image))
rotated_image /= scale
if np.any(np.isnan(rotated_image)) and order >= 4:
warnings.warn("Setting NaNs to 0 for higher-order scikit-image rotation.",
SunpyUserWarning)
rotated_image = np.nan_to_num(rotated_image)
rotated_image = skimage.transform.warp(rotated_image, tform, order=order,
mode='constant', cval=missing, preserve_range=True)
if to_scale:
rotated_image *= scale
return rotated_image
|
def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,
recenter=False, missing=0.0, use_scipy=False):
"""
Rotates, shifts and scales an image.
Will use `skimage.transform.warp` unless scikit-image can't be imported
then it will use`scipy.ndimage.affine_transform`.
Parameters
----------
image : `numpy.ndarray`
2D image to be rotated.
rmatrix : `numpy.ndarray` that is 2x2
Linear transformation rotation matrix.
order : `int` 0-5, optional
Interpolation order to be used, defaults to 3. When using scikit-image this parameter
is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).
When using scipy it is passed into
`scipy.ndimage.affine_transform` where it controls the order of the spline.
scale : `float`
A scale factor for the image with the default being no scaling.
image_center : tuple, optional
The point in the image to rotate around (axis of rotation).
Defaults to the center of the array.
recenter : `bool` or array-like, optional
Move the axis of rotation to the center of the array or recenter coords.
Defaults to `True` i.e., recenter to the center of the array.
missing : `float`, optional
The value to replace any missing data after the transformation.
use_scipy : `bool`, optional
Force use of `scipy.ndimage.affine_transform`.
Will set all "NaNs" in image to zero before doing the transform.
Defaults to `False`, unless scikit-image can't be imported.
Returns
-------
`numpy.ndarray`:
New rotated, scaled and translated image.
Notes
-----
This algorithm uses an affine transformation as opposed to a polynomial
geometrical transformation, which by default is `skimage.transform.warp`.
One can specify using `scipy.ndimage.affine_transform` as
an alternative affine transformation. The two transformations use different
algorithms and thus do not give identical output.
When using for `skimage.transform.warp` with order >= 4 or using
`scipy.ndimage.affine_transform` at all, "NaN" values will
replaced with zero prior to rotation. No attempt is made to retain the NaN
values.
Input arrays with integer data are cast to float 64 and can be re-cast using
`numpy.ndarray.astype` if desired.
Although this function is analogous to the IDL's ``rot`` function, it does not
use the same algorithm as the IDL ``rot`` function.
IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
method to calculate the inverse mapping of original to target pixel
coordinates. This is a polynomial geometrical transformation.
Then optionally it uses a bicubic convolution interpolation
algorithm to map the original to target pixel values.
"""
rmatrix = rmatrix / scale
array_center = (np.array(image.shape)[::-1] - 1) / 2.0
# Make sure the image center is an array and is where it's supposed to be
if image_center is not None:
image_center = np.asanyarray(image_center)
else:
image_center = array_center
# Determine center of rotation based on use (or not) of the recenter keyword
if recenter:
rot_center = array_center
else:
rot_center = image_center
displacement = np.dot(rmatrix, rot_center)
shift = image_center - displacement
if not use_scipy:
try:
import skimage.transform
except ImportError:
warnings.warn("scikit-image could not be imported. Image rotation will use scipy",
ImportWarning)
use_scipy = True
if use_scipy:
if np.any(np.isnan(image)):
warnings.warn("Setting NaNs to 0 for SciPy rotation.", SunpyUserWarning)
# Transform the image using the scipy affine transform
rotated_image = scipy.ndimage.interpolation.affine_transform(
np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
mode='constant', cval=missing).T
else:
tform = _affine_transform_matrix(rmatrix, shift)
rotated_image = image.copy()
# scikit-image expects floating point images to be in the range 0, 1
to_scale = (not np.issubdtype(image.dtype, np.int)) and np.any(np.isfinite(image))
if to_scale:
scale = np.nanmax(np.abs(rotated_image))
rotated_image /= scale
if np.any(np.isnan(rotated_image)) and order >= 4:
warnings.warn("Setting NaNs to 0 for higher-order scikit-image rotation.",
SunpyUserWarning)
rotated_image = np.nan_to_num(rotated_image)
rotated_image = skimage.transform.warp(rotated_image, tform, order=order,
mode='constant', cval=missing, preserve_range=True)
if to_scale:
rotated_image *= scale
return rotated_image
|
49,118 |
def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
Examples
========
The output varies according to the input and can be seen by example:
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
Boolean or univariate Relational:
>>> solve(x < 3)
(-oo < x) & (x < 3)
To always get a list of solution mappings, use flag dict=True:
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> sol = solve([x - 3, y - 1], dict=True)
>>> sol
[{x: 3, y: 1}]
>>> sol[0][x]
3
>>> sol[0][y]
1
To get a list of *symbols* and set of solution(s) use flag set=True:
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
Single expression and single symbol that is in the expression:
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
Single expression with no symbol that is in the expression:
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
Single expression with no symbol given. In this case, all free *symbols*
will be selected as potential *symbols* to solve for. If the equation is
univariate then a list of solutions is returned; otherwise - as is the case
when *symbols* are given as an iterable of length greater than 1 - a list of
mappings will be returned:
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use ``dsolve``.
Single expression and more than one symbol:
* When there is a linear solution:
>>> solve(x - y**2, x, y)
[(y**2, y)]
>>> solve(x**2 - y, x, y)
[(x, x**2)]
>>> solve(x**2 - y, x, y, dict=True)
[{y: x**2}]
* When undetermined coefficients are identified:
* That are linear:
>>> solve((a + b)*x - b + 2, a, b)
{a: -2, b: 2}
* That are nonlinear:
>>> solve((a + b)*x - b**2 + 2, a, b, set=True)
([a, b], {(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))})
* If there is no linear solution, then the first successful
attempt for a nonlinear solution will be returned:
>>> solve(x**2 - y**2, x, y, dict=True)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y, dict=True)
[{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)]
Iterable of one or more of the above:
* Involving relationals or bools:
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* When the system is linear:
* With a solution:
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: 2 - 5*y, z: 21*y - 6}
* Without a solution:
>>> solve([x + 3, x - 3])
[]
* When the system is not linear:
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* If no *symbols* are given, all free *symbols* will be selected and a
list of mappings returned:
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
elimate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions are not checked when ``solve()`` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False, then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since $\sin(x)/x$ has the well
known limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) -
(-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3,
-(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)),
-(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to
a linear system with as many zeros as possible; this is very
expensive.
quick=True (default is False; ``particular`` must be True)
Selects a fast heuristic to find a solution with many zeros
whereas a value of False uses the very slow method guaranteed
to find the largest number of zeros possible.
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
When False, quartics and quintics are disabled, too.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
When False, quintics are disabled, too.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
from .inequalities import reduce_inequalities
# set solver types explicitly; as soon as one is False
# all the rest will be False
###########################################################################
hints = ('cubics', 'quartics', 'quintics')
default = True
for k in hints:
default = flags.setdefault(k, bool(flags.get(k, default)))
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
# check flag usage for particular/quick which should only be used
# with systems of equations
if flags.get('quick', None) is not None:
if not flags.get('particular', None):
raise ValueError('when using `quick`, `particular` should be True')
if flags.get('particular', False) and bare_f:
raise ValueError(filldedent("""
The 'particular/quick' flag is usually used with systems of
equations. Either pass your equation in a list or
consider using a solver like `diophantine` if you are
looking for a solution in integers."""))
f, symbols = (_sympified_list(w) for w in [f, symbols])
if isinstance(f, list):
f = [s for s in f if s is not S.true and s is not True]
implicit = flags.get('implicit', False)
# preprocess symbol(s)
###########################################################################
ordered_symbols = None # were the symbols in a well defined order?
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
ordered_symbols = False
symbols = list(ordered(symbols)) # to make it canonical
else:
if len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
ordered_symbols = symbols and is_sequence(symbols,
include=GeneratorType)
_symbols = list(uniq(symbols))
if len(_symbols) != len(symbols):
ordered_symbols = False
symbols = list(ordered(symbols))
else:
symbols = _symbols
# remove symbols the user is not interested in
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, (Eq, Ne)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Ne):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
if fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp if they have symbols of
# interest
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction) and \
w.has_free(*symbols), lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if flags.get('set', False):
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
f = [fi.xreplace({s: rhs}) for fi in f] + [s - rhs]
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# this is needed in the next two events
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any(_has_piecewise(a) for a in e.args)
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return {k: v.subs(non_inverts) for k, v in
solution.items()}
for i in range(1):
if isinstance(solution, dict):
solution = _do_dict(solution)
break
elif solution and isinstance(solution, list):
if isinstance(solution[0], dict):
solution = [_do_dict(s) for s in solution]
break
elif isinstance(solution[0], tuple):
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
break
else:
solution = [v.subs(non_inverts) for v in solution]
break
elif not solution:
break
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
if isinstance(solution, dict):
solution = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in solution.items()}
elif solution and isinstance(solution, list) and isinstance(solution[0], dict):
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if isinstance(solution, tuple):
# this has already been checked and is in as_set form
return solution
elif isinstance(solution, list):
if isinstance(solution[0], tuple):
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif isinstance(solution[0], dict):
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif isinstance(solution, dict):
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
cannot be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if solution is not None and type(solution) not in (list, dict):
return solution
if not solution:
return []
if (
# undo the dictionary solutions returned when the system was
# only partially solved with poly-system
not as_dict and
ordered_symbols and
type(solution) is list and
type(solution[0]) is dict
):
solution = [tuple([r.get(s, s) for s in symbols]) for r in solution]
# make orderings canonical for:
# - dict
# - list of
# * values
# * tuples
# * dicts
if type(solution) is dict:
solution = {k: solution[k] for k in ordered(solution.keys())}
elif not as_set: # for set, no point in ordering
solution.sort(key=default_sort_key)
if solution and type(solution[0]) is tuple:
# XXX is it better to handle at source of introduction?
# if we don't do it then (or now) then
# solve([x**2 + y -2, y**2 - 4], x, y) would
# otherwise have (0, 2) appearing twice
solution = list(uniq(solution))
if not (as_set or as_dict):
return solution
# convert all input to list of dicts
if type(solution) is list and type(solution[0]) is dict:
LOD = solution
else:
LOD = None
if as_dict or not LOD:
if isinstance(solution, dict):
LOD = [solution] # dict was made canonical above
elif type(solution[0]) is tuple:
LOD = [dict(zip(symbols, s)) for s in solution]
elif type(solution[0]) is dict:
if not as_set:
# put the keys in order within each dict
LOD = [{k: s[k] for k in ordered(s)} for s in solution]
else:
LOD = solution # we will order after unifying keys
else:
assert len(symbols) == 1, 'logical error'
LOD = [{symbols[0]: s} for s in solution]
else:
LOD = solution
if as_dict:
return LOD
# set output: (symbols, {t1, t2, ...}) from list of dictionaries;
# include all symbols for those that like a verbose solution
# and to resolve any differences in dictionary keys.
#
# The set results can easily be used to make a verbose dict as
# k, v = solve(eqs, syms, set=True)
# sol = [dict(zip(k,i)) for i in v]
#
if ordered_symbols:
k = symbols # keep preferred order
else:
# just unify the symbols for which solutions were found
k = list(ordered(set(flatten(tuple(i.keys()) for i in LOD))))
return k, {tuple([s.get(ki, ki) for ki in k]) for s in LOD}
|
def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
Examples
========
The output varies according to the input and can be seen by example:
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
Boolean or univariate Relational:
>>> solve(x < 3)
(-oo < x) & (x < 3)
To always get a list of solution mappings, use flag dict=True:
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> sol = solve([x - 3, y - 1], dict=True)
>>> sol
[{x: 3, y: 1}]
>>> sol[0][x]
3
>>> sol[0][y]
1
To get a list of *symbols* and set of solution(s) use flag set=True:
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
Single expression and single symbol that is in the expression:
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
Single expression with no symbol that is in the expression:
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
Single expression with no symbol given. In this case, all free *symbols*
will be selected as potential *symbols* to solve for. If the equation is
univariate then a list of solutions is returned; otherwise - as is the case
when *symbols* are given as an iterable of length greater than 1 - a list of
mappings will be returned:
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use ``dsolve``.
Single expression and more than one symbol:
* When there is a linear solution:
>>> solve(x - y**2, x, y)
[(y**2, y)]
>>> solve(x**2 - y, x, y)
[(x, x**2)]
>>> solve(x**2 - y, x, y, dict=True)
[{y: x**2}]
* When undetermined coefficients are identified:
* That are linear:
>>> solve((a + b)*x - b + 2, a, b)
{a: -2, b: 2}
* That are nonlinear:
>>> solve((a + b)*x - b**2 + 2, a, b, set=True)
([a, b], {(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))})
* If there is no linear solution, then the first successful
attempt for a nonlinear solution will be returned:
>>> solve(x**2 - y**2, x, y, dict=True)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y, dict=True)
[{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)]
Iterable of one or more of the above:
* Involving relationals or bools:
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* When the system is linear:
* With a solution:
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: 2 - 5*y, z: 21*y - 6}
* Without a solution:
>>> solve([x + 3, x - 3])
[]
* When the system is not linear:
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* If no *symbols* are given, all free *symbols* will be selected and a
list of mappings returned:
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
elimate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions are not checked when ``solve()`` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False, then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since $\sin(x)/x$ has the well
known limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) -
(-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3,
-(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)),
-(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to
a linear system with as many zeros as possible; this is very
expensive.
quick=True (default is False; ``particular`` must be True)
Selects a fast heuristic to find a solution with many zeros
whereas a value of False uses the very slow method guaranteed
to find the largest number of zeros possible.
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
When False, quartics and quintics are disabled, too.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
When False, quintics are disabled, too.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
from .inequalities import reduce_inequalities
# set solver types explicitly; as soon as one is False
# all the rest will be False
###########################################################################
hints = ('cubics', 'quartics', 'quintics')
default = True
for k in hints:
default = flags.setdefault(k, bool(flags.get(k, default)))
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
# check flag usage for particular/quick which should only be used
# with systems of equations
if flags.get('quick', None) is not None:
if not flags.get('particular', None):
raise ValueError('when using `quick`, `particular` should be True')
if flags.get('particular', False) and bare_f:
raise ValueError(filldedent("""
The 'particular/quick' flag is usually used with systems of
equations. Either pass your equation in a list or
consider using a solver like `diophantine` if you are
looking for a solution in integers."""))
f, symbols = (_sympified_list(w) for w in [f, symbols])
if isinstance(f, list):
f = [s for s in f if s is not S.true and s is not True]
implicit = flags.get('implicit', False)
# preprocess symbol(s)
###########################################################################
ordered_symbols = None # were the symbols in a well defined order?
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
ordered_symbols = False
symbols = list(ordered(symbols)) # to make it canonical
else:
if len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
ordered_symbols = False
if symbols:
if is_sequence(symbols, include=GeneratorType):
# the symbols are well ordered; keep the
# order but make sure they are unique
_symbols = list(uniq(symbols))
if len(_symbols) == len(symbols):
# the symbols are unique so we can return
# solutions relative to their given order
ordered_symbols = True
symbols = _symbols
else:
# put them in order for canonical processing
symbols = list(ordered(symbols))
if flags.get('set', False):
# since symbols were provided, all will be
# included in the set output; even though they
# were not well ordered, they are now
ordered_symbols = True
# remove symbols the user is not interested in
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, (Eq, Ne)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Ne):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
if fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp if they have symbols of
# interest
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction) and \
w.has_free(*symbols), lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if flags.get('set', False):
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
f = [fi.xreplace({s: rhs}) for fi in f] + [s - rhs]
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# this is needed in the next two events
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any(_has_piecewise(a) for a in e.args)
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return {k: v.subs(non_inverts) for k, v in
solution.items()}
for i in range(1):
if isinstance(solution, dict):
solution = _do_dict(solution)
break
elif solution and isinstance(solution, list):
if isinstance(solution[0], dict):
solution = [_do_dict(s) for s in solution]
break
elif isinstance(solution[0], tuple):
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
break
else:
solution = [v.subs(non_inverts) for v in solution]
break
elif not solution:
break
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
if isinstance(solution, dict):
solution = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in solution.items()}
elif solution and isinstance(solution, list) and isinstance(solution[0], dict):
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if isinstance(solution, tuple):
# this has already been checked and is in as_set form
return solution
elif isinstance(solution, list):
if isinstance(solution[0], tuple):
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif isinstance(solution[0], dict):
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif isinstance(solution, dict):
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
cannot be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if solution is not None and type(solution) not in (list, dict):
return solution
if not solution:
return []
if (
# undo the dictionary solutions returned when the system was
# only partially solved with poly-system
not as_dict and
ordered_symbols and
type(solution) is list and
type(solution[0]) is dict
):
solution = [tuple([r.get(s, s) for s in symbols]) for r in solution]
# make orderings canonical for:
# - dict
# - list of
# * values
# * tuples
# * dicts
if type(solution) is dict:
solution = {k: solution[k] for k in ordered(solution.keys())}
elif not as_set: # for set, no point in ordering
solution.sort(key=default_sort_key)
if solution and type(solution[0]) is tuple:
# XXX is it better to handle at source of introduction?
# if we don't do it then (or now) then
# solve([x**2 + y -2, y**2 - 4], x, y) would
# otherwise have (0, 2) appearing twice
solution = list(uniq(solution))
if not (as_set or as_dict):
return solution
# convert all input to list of dicts
if type(solution) is list and type(solution[0]) is dict:
LOD = solution
else:
LOD = None
if as_dict or not LOD:
if isinstance(solution, dict):
LOD = [solution] # dict was made canonical above
elif type(solution[0]) is tuple:
LOD = [dict(zip(symbols, s)) for s in solution]
elif type(solution[0]) is dict:
if not as_set:
# put the keys in order within each dict
LOD = [{k: s[k] for k in ordered(s)} for s in solution]
else:
LOD = solution # we will order after unifying keys
else:
assert len(symbols) == 1, 'logical error'
LOD = [{symbols[0]: s} for s in solution]
else:
LOD = solution
if as_dict:
return LOD
# set output: (symbols, {t1, t2, ...}) from list of dictionaries;
# include all symbols for those that like a verbose solution
# and to resolve any differences in dictionary keys.
#
# The set results can easily be used to make a verbose dict as
# k, v = solve(eqs, syms, set=True)
# sol = [dict(zip(k,i)) for i in v]
#
if ordered_symbols:
k = symbols # keep preferred order
else:
# just unify the symbols for which solutions were found
k = list(ordered(set(flatten(tuple(i.keys()) for i in LOD))))
return k, {tuple([s.get(ki, ki) for ki in k]) for s in LOD}
|
6,583 |
def is_not_child_table(doctype):
if frappe.get_value('DocType', doctype, 'istable'):
return False
return True
|
def is_not_child_table(doctype):
return not bool(frappe.get_value('DocType', doctype, 'istable'))
|
32,322 |
def get_commit_status(topology: Topology, match_job_id: List[str] = None) -> List[CommitStatus]:
"""
Returns the status of the commit operation on all devices. If an ID is given, only that id will be returned.
:param topology: `Topology` instance !no-auto-argument
:param match_job_id: job ID or list of Job IDs to return.
"""
return UniversalCommand.get_commit_job_status(topology, match_job_id)
|
def get_commit_status(topology: Topology, match_job_id: Optional[str] = None) -> List[CommitStatus]:
"""
Returns the status of the commit operation on all devices. If an ID is given, only that id will be returned.
:param topology: `Topology` instance !no-auto-argument
:param match_job_id: job ID or list of Job IDs to return.
"""
return UniversalCommand.get_commit_job_status(topology, match_job_id)
|
8,818 |
def test_handle_rpl_myinfo(mockbot):
"""Test coretasks handle RPL_MYINFO events."""
assert not hasattr(mockbot, 'myinfo'), (
'Attribute myinfo is not available until the server sends RPL_MYINFO')
rpl_myinfo = ' '.join([
':niven.freenode.net',
'004',
'TestName',
'irc.example.net',
'example-1.2.3',
# modes for channels and users are ignored by Sopel
# we prefer to use RPL_ISUPPORT for that
'DOQRSZaghilopsuwz',
'CFILMPQSbcefgijklmnopqrstuvz',
'bkloveqjfI',
# text is ignored for RPL_MYINFO
':Some random text',
])
mockbot.on_message(rpl_myinfo)
assert hasattr(mockbot, 'myinfo')
assert mockbot.myinfo.client == 'TestName'
assert mockbot.myinfo.servername == 'irc.example.net'
assert mockbot.myinfo.version == 'example-1.2.3'
|
def test_handle_rpl_myinfo(mockbot):
"""Test handling RPL_MYINFO events."""
assert not hasattr(mockbot, 'myinfo'), (
'Attribute myinfo is not available until the server sends RPL_MYINFO')
rpl_myinfo = ' '.join([
':niven.freenode.net',
'004',
'TestName',
'irc.example.net',
'example-1.2.3',
# modes for channels and users are ignored by Sopel
# we prefer to use RPL_ISUPPORT for that
'DOQRSZaghilopsuwz',
'CFILMPQSbcefgijklmnopqrstuvz',
'bkloveqjfI',
# text is ignored for RPL_MYINFO
':Some random text',
])
mockbot.on_message(rpl_myinfo)
assert hasattr(mockbot, 'myinfo')
assert mockbot.myinfo.client == 'TestName'
assert mockbot.myinfo.servername == 'irc.example.net'
assert mockbot.myinfo.version == 'example-1.2.3'
|
45,818 |
def load_image(file_name):
"""Loads the image with OpenCV and converts to torch.Tensor"""
if not os.path.isfile(file_name):
raise AssertionError("Invalid file {}".format(file_name))
# load image with OpenCV
img = cv2.imread(file_name, cv2.IMREAD_COLOR)
# convert image to torch tensor
tensor = dgm.utils.image_to_tensor(img).float() / 255.0
tensor = tensor.view(1, *tensor.shape) # 1xCxHxW
return tensor, img
|
def load_image(file_name):
"""Loads the image with OpenCV and converts to torch.Tensor"""
if not os.path.isfile(file_name):
raise FileExistsError("Invalid file {}".format(file_name))
# load image with OpenCV
img = cv2.imread(file_name, cv2.IMREAD_COLOR)
# convert image to torch tensor
tensor = dgm.utils.image_to_tensor(img).float() / 255.0
tensor = tensor.view(1, *tensor.shape) # 1xCxHxW
return tensor, img
|
47,160 |
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if training_args.should_log else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = XLNetConfig()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = XLNetLMHeadModel.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = XLNetLMHeadModel.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()]
return tokenizer(examples["text"], padding=padding, truncation=True, max_length=max_seq_length)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on concatenated texts", # not sure if it's right
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Data collator
data_collator = DataCollatorForPermutationLanguageModeling(
tokenizer=tokenizer,
plm_probability=data_args.plm_probability,
max_span_length=data_args.max_span_length,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "language-modeling"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
|
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if training_args.should_log else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = XLNetConfig()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = XLNetLMHeadModel.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = XLNetLMHeadModel.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()]
return tokenizer(examples["text"], padding=padding, truncation=True, max_length=max_seq_length)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Data collator
data_collator = DataCollatorForPermutationLanguageModeling(
tokenizer=tokenizer,
plm_probability=data_args.plm_probability,
max_span_length=data_args.max_span_length,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "language-modeling"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
|
10,200 |
def destroy_role(connection, module):
params = dict()
params['RoleName'] = module.params.get('name')
role = get_role(connection, module, params['RoleName'])
if role:
# We need to remove any instance profiles from the role before we delete it
try:
instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'])['InstanceProfiles']
except ClientError as e:
module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
if role.get('PermissionsBoundary') is not None:
try:
connection.delete_role_permissions_boundary(RoleName=params['RoleName'])
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not delete role permission boundary on role {0}".format(params['RoleName']))
# Now remove the role from the instance profile(s)
for profile in instance_profiles:
try:
if not module.check_mode:
connection.remove_role_from_instance_profile(InstanceProfileName=profile['InstanceProfileName'], RoleName=params['RoleName'])
if profile['InstanceProfileName'] == params['RoleName']:
if module.params.get("delete_instance_profile"):
try:
connection.delete_instance_profile(InstanceProfileName=profile['InstanceProfileName'])
except ClientError as e:
module.fail_json_aws(e, msg="Unable to remove instance profile {1}".format(profile['InstanceProfileName']))
except ClientError as e:
module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(
params['RoleName'], profile['InstanceProfileName']))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(
params['RoleName'], profile['InstanceProfileName']))
# Now remove any attached policies otherwise deletion fails
try:
for policy in get_attached_policy_list(connection, module, params['RoleName']):
if not module.check_mode:
connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json_aws(e, msg="Unable to detach policy {0} from role {1}".format(policy['PolicyArn'], params['RoleName']))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to detach policy {0} from role {1}".format(policy['PolicyArn'], params['RoleName']))
try:
if not module.check_mode:
connection.delete_role(**params)
except ClientError as e:
module.fail_json_aws(e, msg="Unable to delete role")
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to delete role")
else:
module.exit_json(changed=False)
module.exit_json(changed=True)
|
def destroy_role(connection, module):
params = dict()
params['RoleName'] = module.params.get('name')
role = get_role(connection, module, params['RoleName'])
if role:
# We need to remove any instance profiles from the role before we delete it
try:
instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'])['InstanceProfiles']
except ClientError as e:
module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
if role.get('PermissionsBoundary') is not None:
try:
connection.delete_role_permissions_boundary(RoleName=params['RoleName'])
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not delete role permission boundary on role {0}".format(params['RoleName']))
# Now remove the role from the instance profile(s)
for profile in instance_profiles:
try:
if not module.check_mode:
connection.remove_role_from_instance_profile(InstanceProfileName=profile['InstanceProfileName'], RoleName=params['RoleName'])
if profile['InstanceProfileName'] == params['RoleName']:
if module.params.get("delete_instance_profile"):
try:
connection.delete_instance_profile(InstanceProfileName=profile['InstanceProfileName'])
except ClientError as e:
module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile['InstanceProfileName']))
except ClientError as e:
module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(
params['RoleName'], profile['InstanceProfileName']))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(
params['RoleName'], profile['InstanceProfileName']))
# Now remove any attached policies otherwise deletion fails
try:
for policy in get_attached_policy_list(connection, module, params['RoleName']):
if not module.check_mode:
connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json_aws(e, msg="Unable to detach policy {0} from role {1}".format(policy['PolicyArn'], params['RoleName']))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to detach policy {0} from role {1}".format(policy['PolicyArn'], params['RoleName']))
try:
if not module.check_mode:
connection.delete_role(**params)
except ClientError as e:
module.fail_json_aws(e, msg="Unable to delete role")
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to delete role")
else:
module.exit_json(changed=False)
module.exit_json(changed=True)
|
5,650 |
def _clean_inputs(lp):
"""
Given user inputs for a linear programming problem, return the
objective vector, upper bound constraints, equality constraints,
and simple bounds in a preferred format.
Parameters
----------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : various valid formats, optional
The bounds of ``x``, as ``min`` and ``max`` pairs.
If bounds are specified for all N variables separately, valid formats are:
* a 2D array (2 x N or N x 2);
* a sequence of N sequences, each with 2 values.
If all variables have the same bounds, a single pair of values can
be specified. Valid formats are:
* a sequence with 2 scalar values;
* a sequence with a single element containing 2 scalar values.
If all variables have a lower bound of 0 and no upper bound, the bounds
parameter can be omitted (or given as None).
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Returns
-------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : 2D array
The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
elements of ``x``. The N x 2 array contains lower bounds in the first
column and upper bounds in the 2nd. Unbounded variables have lower
bound -np.inf and/or upper bound np.inf.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
"""
c, A_ub, b_ub, A_eq, b_eq, bounds, x0 = lp
if c is None:
raise TypeError
try:
c = np.array(c, dtype=np.float, copy=True).squeeze()
except ValueError:
raise TypeError(
"Invalid input for linprog: c must be a 1-D array of numerical "
"coefficients")
else:
# If c is a single value, convert it to a 1-D array.
if c.size == 1:
c = c.reshape((-1))
n_x = len(c)
if n_x == 0 or len(c.shape) != 1:
raise ValueError(
"Invalid input for linprog: c must be a 1-D array and must "
"not have more than one non-singleton dimension")
if not(np.isfinite(c).all()):
raise ValueError(
"Invalid input for linprog: c must not contain values "
"inf, nan, or None")
sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
try:
A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
except ValueError:
raise TypeError(
"Invalid input for linprog: A_ub must be a 2-D array "
"of numerical values")
else:
n_ub = A_ub.shape[0]
if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
raise ValueError(
"Invalid input for linprog: A_ub must have exactly two "
"dimensions, and the number of columns in A_ub must be "
"equal to the size of c")
if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
raise ValueError(
"Invalid input for linprog: A_ub must not contain values "
"inf, nan, or None")
try:
b_ub = _format_b_constraints(b_ub)
except ValueError:
raise TypeError(
"Invalid input for linprog: b_ub must be a 1-D array of "
"numerical values, each representing the upper bound of an "
"inequality constraint (row) in A_ub")
else:
if b_ub.shape != (n_ub,):
raise ValueError(
"Invalid input for linprog: b_ub must be a 1-D array; b_ub "
"must not have more than one non-singleton dimension and "
"the number of rows in A_ub must equal the number of values "
"in b_ub")
if not(np.isfinite(b_ub).all()):
raise ValueError(
"Invalid input for linprog: b_ub must not contain values "
"inf, nan, or None")
try:
A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
except ValueError:
raise TypeError(
"Invalid input for linprog: A_eq must be a 2-D array "
"of numerical values")
else:
n_eq = A_eq.shape[0]
if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
raise ValueError(
"Invalid input for linprog: A_eq must have exactly two "
"dimensions, and the number of columns in A_eq must be "
"equal to the size of c")
if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
raise ValueError(
"Invalid input for linprog: A_eq must not contain values "
"inf, nan, or None")
try:
b_eq = _format_b_constraints(b_eq)
except ValueError:
raise TypeError(
"Invalid input for linprog: b_eq must be a 1-D array of "
"numerical values, each representing the upper bound of an "
"inequality constraint (row) in A_eq")
else:
if b_eq.shape != (n_eq,):
raise ValueError(
"Invalid input for linprog: b_eq must be a 1-D array; b_eq "
"must not have more than one non-singleton dimension and "
"the number of rows in A_eq must equal the number of values "
"in b_eq")
if not(np.isfinite(b_eq).all()):
raise ValueError(
"Invalid input for linprog: b_eq must not contain values "
"inf, nan, or None")
# x0 gives a (optional) starting solution to the solver. If x0 is None,
# skip the checks. Initial solution will be generated automatically.
if x0 is not None:
try:
x0 = np.array(x0, dtype=float, copy=True).squeeze()
except ValueError:
raise TypeError(
"Invalid input for linprog: x0 must be a 1-D array of "
"numerical coefficients")
if x0.ndim == 0:
x0 = x0.reshape((-1))
if len(x0) == 0 or x0.ndim != 1:
raise ValueError(
"Invalid input for linprog: x0 should be a 1-D array; it "
"must not have more than one non-singleton dimension")
if not x0.size == c.size:
raise ValueError(
"Invalid input for linprog: x0 and c should contain the "
"same number of elements")
if not np.isfinite(x0).all():
raise ValueError(
"Invalid input for linprog: x0 must not contain values "
"inf, nan, or None")
# Bounds can be one of these formats:
# (1) None
# (2) a sequence with 2 scalars
# (3) a sequence with 1 element as (2)
# (4) a sequence with N elements, all with 2 values (N is the size of x)
# (5) a sequence with 2 elements, both sequences with N values
# (6) a 2-D array, with shape N x 2 or 2 x N
# Unspecified bounds can be represented by None or (-)np.inf.
# All formats are converted into a N x 2 np.array with (-)np.inf where bounds are unspecified.
# Strings in input result in a ValueError
clean_bounds = np.zeros((n_x,2))
bounds_valid = False
# Determine shape of provided bounds
# np.shape returns a tuple, but only if sizes are consistent
bsh = np.shape(bounds)
# 1. Check if bounds can be interpreted as n_x pairs (n_x is the number of variables)
# Bounds can have sizes n_x x 2 and 2 x n_x
if len(bsh) == 2:
if bsh[0] == n_x and bsh[1] == 2:
for i in range(n_x):
bi = bounds[i] # no need to check if length == 2, np.shape did that
clean_bounds[i,:] = bi
bounds_valid = True
elif bsh[0] == 2 and bsh[1] == n_x:
for i in range(2):
bi = bounds[i] # no need to check length, np.shape did that
clean_bounds[:,i] = bi
bounds_valid = True
# 2. Check if bounds can be interpreted as a single pair
# Bounds can have sizes 1 x 2, 2 x 1 or 2
# Raises TypeError if elements are not scalars
if not bounds_valid:
if len(bsh) == 2:
if bsh[0] == 2 and bsh[1] == 1:
clean_bounds[:,0] = bounds[0][0]
clean_bounds[:,1] = bounds[1][0]
bounds_valid = True
elif bsh[0] == 1 and bsh[1] == 2:
clean_bounds[:,0] = bounds[0][0]
clean_bounds[:,1] = bounds[0][1]
bounds_valid = True
elif len(bsh) == 1:
if bsh[0] == 2:
clean_bounds[:,0] = bounds[0]
clean_bounds[:,1] = bounds[1]
bounds_valid = True
# 3. Check remaining possibility
if bounds is None:
clean_bounds[:,1] = np.inf
bounds_valid = True
bounds_valid = True
# 4. If none of the formats where found, raise a ValueError
if not bounds_valid:
raise ValueError("Invalid input for linprog: unable to interpret bounds.")
# The process above creates nan-s where the input specified None
# Convert the nan-s in the 1st column to -np.inf and in the 2nd column to np.inf
i_none = np.isnan(clean_bounds[:,0])
clean_bounds[i_none,0] = -np.inf;
i_none = np.isnan(clean_bounds[:,1])
clean_bounds[i_none,1] = np.inf;
return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, clean_bounds, x0)
|
def _clean_inputs(lp):
"""
Given user inputs for a linear programming problem, return the
objective vector, upper bound constraints, equality constraints,
and simple bounds in a preferred format.
Parameters
----------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : various valid formats, optional
The bounds of ``x``, as ``min`` and ``max`` pairs.
If bounds are specified for all N variables separately, valid formats are:
* a 2D array (2 x N or N x 2);
* a sequence of N sequences, each with 2 values.
If all variables have the same bounds, a single pair of values can
be specified. Valid formats are:
* a sequence with 2 scalar values;
* a sequence with a single element containing 2 scalar values.
If all variables have a lower bound of 0 and no upper bound, the bounds
parameter can be omitted (or given as None).
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Returns
-------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : 2D array
The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
elements of ``x``. The N x 2 array contains lower bounds in the first
column and upper bounds in the 2nd. Unbounded variables have lower
bound -np.inf and/or upper bound np.inf.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
"""
c, A_ub, b_ub, A_eq, b_eq, bounds, x0 = lp
if c is None:
raise TypeError
try:
c = np.array(c, dtype=np.float, copy=True).squeeze()
except ValueError:
raise TypeError(
"Invalid input for linprog: c must be a 1-D array of numerical "
"coefficients")
else:
# If c is a single value, convert it to a 1-D array.
if c.size == 1:
c = c.reshape((-1))
n_x = len(c)
if n_x == 0 or len(c.shape) != 1:
raise ValueError(
"Invalid input for linprog: c must be a 1-D array and must "
"not have more than one non-singleton dimension")
if not(np.isfinite(c).all()):
raise ValueError(
"Invalid input for linprog: c must not contain values "
"inf, nan, or None")
sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
try:
A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
except ValueError:
raise TypeError(
"Invalid input for linprog: A_ub must be a 2-D array "
"of numerical values")
else:
n_ub = A_ub.shape[0]
if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
raise ValueError(
"Invalid input for linprog: A_ub must have exactly two "
"dimensions, and the number of columns in A_ub must be "
"equal to the size of c")
if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
raise ValueError(
"Invalid input for linprog: A_ub must not contain values "
"inf, nan, or None")
try:
b_ub = _format_b_constraints(b_ub)
except ValueError:
raise TypeError(
"Invalid input for linprog: b_ub must be a 1-D array of "
"numerical values, each representing the upper bound of an "
"inequality constraint (row) in A_ub")
else:
if b_ub.shape != (n_ub,):
raise ValueError(
"Invalid input for linprog: b_ub must be a 1-D array; b_ub "
"must not have more than one non-singleton dimension and "
"the number of rows in A_ub must equal the number of values "
"in b_ub")
if not(np.isfinite(b_ub).all()):
raise ValueError(
"Invalid input for linprog: b_ub must not contain values "
"inf, nan, or None")
try:
A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
except ValueError:
raise TypeError(
"Invalid input for linprog: A_eq must be a 2-D array "
"of numerical values")
else:
n_eq = A_eq.shape[0]
if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
raise ValueError(
"Invalid input for linprog: A_eq must have exactly two "
"dimensions, and the number of columns in A_eq must be "
"equal to the size of c")
if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
raise ValueError(
"Invalid input for linprog: A_eq must not contain values "
"inf, nan, or None")
try:
b_eq = _format_b_constraints(b_eq)
except ValueError:
raise TypeError(
"Invalid input for linprog: b_eq must be a 1-D array of "
"numerical values, each representing the upper bound of an "
"inequality constraint (row) in A_eq")
else:
if b_eq.shape != (n_eq,):
raise ValueError(
"Invalid input for linprog: b_eq must be a 1-D array; b_eq "
"must not have more than one non-singleton dimension and "
"the number of rows in A_eq must equal the number of values "
"in b_eq")
if not(np.isfinite(b_eq).all()):
raise ValueError(
"Invalid input for linprog: b_eq must not contain values "
"inf, nan, or None")
# x0 gives a (optional) starting solution to the solver. If x0 is None,
# skip the checks. Initial solution will be generated automatically.
if x0 is not None:
try:
x0 = np.array(x0, dtype=float, copy=True).squeeze()
except ValueError:
raise TypeError(
"Invalid input for linprog: x0 must be a 1-D array of "
"numerical coefficients")
if x0.ndim == 0:
x0 = x0.reshape((-1))
if len(x0) == 0 or x0.ndim != 1:
raise ValueError(
"Invalid input for linprog: x0 should be a 1-D array; it "
"must not have more than one non-singleton dimension")
if not x0.size == c.size:
raise ValueError(
"Invalid input for linprog: x0 and c should contain the "
"same number of elements")
if not np.isfinite(x0).all():
raise ValueError(
"Invalid input for linprog: x0 must not contain values "
"inf, nan, or None")
# Bounds can be one of these formats:
# (1) None
# (2) a sequence with 2 scalars
# (3) a sequence with 1 element as (2)
# (4) a sequence with N elements, all with 2 values (N is the size of x)
# (5) a sequence with 2 elements, both sequences with N values
# (6) a 2-D array, with shape N x 2 or 2 x N
# Unspecified bounds can be represented by None or (-)np.inf.
# All formats are converted into a N x 2 np.array with (-)np.inf where bounds are unspecified.
# Strings in input result in a ValueError
clean_bounds = np.zeros((n_x, 2))
bounds_valid = False
# Determine shape of provided bounds
# np.shape returns a tuple, but only if sizes are consistent
bsh = np.shape(bounds)
# 1. Check if bounds can be interpreted as n_x pairs (n_x is the number of variables)
# Bounds can have sizes n_x x 2 and 2 x n_x
if len(bsh) == 2:
if bsh[0] == n_x and bsh[1] == 2:
for i in range(n_x):
bi = bounds[i] # no need to check if length == 2, np.shape did that
clean_bounds[i,:] = bi
bounds_valid = True
elif bsh[0] == 2 and bsh[1] == n_x:
for i in range(2):
bi = bounds[i] # no need to check length, np.shape did that
clean_bounds[:,i] = bi
bounds_valid = True
# 2. Check if bounds can be interpreted as a single pair
# Bounds can have sizes 1 x 2, 2 x 1 or 2
# Raises TypeError if elements are not scalars
if not bounds_valid:
if len(bsh) == 2:
if bsh[0] == 2 and bsh[1] == 1:
clean_bounds[:,0] = bounds[0][0]
clean_bounds[:,1] = bounds[1][0]
bounds_valid = True
elif bsh[0] == 1 and bsh[1] == 2:
clean_bounds[:,0] = bounds[0][0]
clean_bounds[:,1] = bounds[0][1]
bounds_valid = True
elif len(bsh) == 1:
if bsh[0] == 2:
clean_bounds[:,0] = bounds[0]
clean_bounds[:,1] = bounds[1]
bounds_valid = True
# 3. Check remaining possibility
if bounds is None:
clean_bounds[:,1] = np.inf
bounds_valid = True
bounds_valid = True
# 4. If none of the formats where found, raise a ValueError
if not bounds_valid:
raise ValueError("Invalid input for linprog: unable to interpret bounds.")
# The process above creates nan-s where the input specified None
# Convert the nan-s in the 1st column to -np.inf and in the 2nd column to np.inf
i_none = np.isnan(clean_bounds[:,0])
clean_bounds[i_none,0] = -np.inf;
i_none = np.isnan(clean_bounds[:,1])
clean_bounds[i_none,1] = np.inf;
return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, clean_bounds, x0)
|
42,100 |
def _make_scatter_object(
n_targets: int,
axis_order: Sequence[int],
include_dominated_trials: bool,
trials_with_values: Optional[Sequence[Tuple[FrozenTrial, Sequence[float]]]],
hovertemplate: str,
infeasible: bool = False,
dominated_trials: bool = False,
) -> Union["go.Scatter", "go.Scatter3d"]:
trials_with_values = trials_with_values or []
marker = _make_marker(
[trial for trial, _ in trials_with_values],
include_dominated_trials,
dominated_trials=dominated_trials,
infeasible=infeasible,
)
if n_targets == 2:
return go.Scatter(
x=[values[axis_order[0]] for _, values in trials_with_values],
y=[values[axis_order[1]] for _, values in trials_with_values],
text=[_make_hovertext(trial) for trial, _ in trials_with_values],
mode="markers",
hovertemplate=hovertemplate,
marker=marker,
showlegend=False,
)
elif n_targets == 3:
return go.Scatter3d(
x=[values[axis_order[0]] for _, values in trials_with_values],
y=[values[axis_order[1]] for _, values in trials_with_values],
z=[values[axis_order[2]] for _, values in trials_with_values],
text=[_make_hovertext(trial) for trial, _ in trials_with_values],
mode="markers",
hovertemplate=hovertemplate,
marker=marker,
showlegend=False,
)
else:
raise ValueError(
"`plot_pareto_front` function only supports 2 or 3 targets."
" you used {} targets now.".format(n_targets)
)
|
def _make_scatter_object(
n_targets: int,
axis_order: Sequence[int],
include_dominated_trials: bool,
trials_with_values: Optional[Sequence[Tuple[FrozenTrial, Sequence[float]]]],
hovertemplate: str,
infeasible: bool = False,
dominated_trials: bool = False,
) -> Union["go.Scatter", "go.Scatter3d"]:
trials_with_values = trials_with_values or []
marker = _make_marker(
[trial for trial, _ in trials_with_values],
include_dominated_trials,
dominated_trials=dominated_trials,
infeasible=infeasible,
)
if n_targets == 2:
return go.Scatter(
x=[values[axis_order[0]] for _, values in trials_with_values],
y=[values[axis_order[1]] for _, values in trials_with_values],
text=[_make_hovertext(trial) for trial, _ in trials_with_values],
mode="markers",
hovertemplate=hovertemplate,
marker=marker,
showlegend=False,
)
elif n_targets == 3:
return go.Scatter3d(
x=[values[axis_order[0]] for _, values in trials_with_values],
y=[values[axis_order[1]] for _, values in trials_with_values],
z=[values[axis_order[2]] for _, values in trials_with_values],
text=[_make_hovertext(trial) for trial, _ in trials_with_values],
mode="markers",
hovertemplate=hovertemplate,
marker=marker,
showlegend=False,
)
assert False, "Must not reach here"
|
35,149 |
def get_dtype_simd_width(dtype: str) -> int:
"""Takes a dtype, and returns how many of that dtype fit into a single microcontroller word.
>>> get_dtype_simd_width("int8")
4
>>> get_dtype_simd_width("int16")
2
"""
assert dtype[0:3] == "int"
dtype_width = int(dtype[3:])
return MICRO_WORD_LENGTH // dtype_width
|
def get_dtype_simd_width(dtype: str) -> int:
"""Takes a dtype, and returns how many of that dtype fit into a single microcontroller word.
>>> get_dtype_simd_width("int8")
4
>>> get_dtype_simd_width("int16")
2
"""
assert dtype.startswith("int")
dtype_width = int(dtype[3:])
return MICRO_WORD_LENGTH // dtype_width
|
41,212 |
def assert_optimizes(before: cirq.Circuit, expected: cirq.Circuit, **kwargs):
actual = cirq.Circuit(before)
opt = cirq.MergeInteractionsToSqrtIswap(**kwargs)
opt.optimize_circuit(actual)
# Ignore differences that would be caught by follow-up optimizations.
followup_optimizations: List[Callable[[cirq.Circuit], None]] = [
cirq.merge_single_qubit_gates_into_phased_x_z,
cirq.EjectPhasedPaulis().optimize_circuit,
cirq.EjectZ().optimize_circuit,
cirq.DropNegligible().optimize_circuit,
cirq.DropEmptyMoments().optimize_circuit,
]
for post in followup_optimizations:
post(actual)
post(expected)
assert actual == expected, f'ACTUAL {actual} : EXPECTED {expected}'
|
def assert_optimizes(before: cirq.Circuit, expected: cirq.Circuit, **kwargs):
actual = before.copy()
opt = cirq.MergeInteractionsToSqrtIswap(**kwargs)
opt.optimize_circuit(actual)
# Ignore differences that would be caught by follow-up optimizations.
followup_optimizations: List[Callable[[cirq.Circuit], None]] = [
cirq.merge_single_qubit_gates_into_phased_x_z,
cirq.EjectPhasedPaulis().optimize_circuit,
cirq.EjectZ().optimize_circuit,
cirq.DropNegligible().optimize_circuit,
cirq.DropEmptyMoments().optimize_circuit,
]
for post in followup_optimizations:
post(actual)
post(expected)
assert actual == expected, f'ACTUAL {actual} : EXPECTED {expected}'
|
26,402 |
def runSetup():
"""
Calls setup(). This function exists so the setup() invocation preceded more internal
functionality. The `version` module is imported dynamically by importVersion() below.
"""
boto = 'boto==2.48.0'
boto3 = 'boto3>=1.7.50, <2.0'
futures = 'futures==3.1.1'
pycryptodome = 'pycryptodome==3.5.1'
pymesos = 'pymesos==0.3.15'
psutil = 'psutil >= 3.0.1, <6'
pynacl = 'pynacl==1.3.0'
gcs = 'google-cloud-storage==1.6.0'
gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'
apacheLibcloud = 'apache-libcloud==2.2.1'
cwltool = 'cwltool>=3.0.20201116114821'
galaxyToolUtil = 'galaxy-tool-util'
htcondor = 'htcondor>=8.6.0'
kubernetes = 'kubernetes>=10, <11'
idna = 'idna>=2'
pytz = 'pytz>=2012'
dill = 'dill>=0.3.2, <0.4'
six = 'six>=1.10.0'
future = 'future'
requests = 'requests>=2, <3'
docker = 'docker==4.3.1'
dateutil = 'python-dateutil'
addict = 'addict>=2.2.1, <2.3'
enlighten = 'enlighten>=1.5.2, <2'
core_reqs = [
dill,
six,
future,
requests,
docker,
dateutil,
psutil,
addict,
pytz,
enlighten]
aws_reqs = [
boto,
boto3,
futures,
pycryptodome]
cwl_reqs = [
cwltool,
galaxyToolUtil]
encryption_reqs = [
pynacl]
google_reqs = [
gcs_oauth2_boto_plugin, # is this being used??
apacheLibcloud,
gcs]
htcondor_reqs = [
htcondor]
kubernetes_reqs = [
kubernetes,
idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.
mesos_reqs = [
pymesos,
psutil]
wdl_reqs = []
# htcondor is not supported by apple
# this is tricky to conditionally support in 'all' due
# to how wheels work, so it is not included in all and
# must be explicitly installed as an extra
all_reqs = \
aws_reqs + \
cwl_reqs + \
encryption_reqs + \
google_reqs + \
kubernetes_reqs + \
mesos_reqs
setup(
name='toil',
version=version.distVersion,
description='Pipeline management software for clusters.',
author='Benedict Paten',
author_email='benedict@soe.usc.edu',
url="https://github.com/DataBiosphere/toil",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Healthcare Industry',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: System :: Distributed Computing',
'Topic :: Utilities'],
license="Apache License v2.0",
python_requires=">=3.6",
install_requires=core_reqs,
extras_require={
'aws': aws_reqs,
'cwl': cwl_reqs,
'encryption': encryption_reqs,
'google': google_reqs,
'htcondor:sys_platform!="darwin"': htcondor_reqs,
'kubernetes': kubernetes_reqs,
'mesos': mesos_reqs,
'wdl': wdl_reqs,
'all': all_reqs},
package_dir={'': 'src'},
packages=find_packages(where='src',
# Note that we intentionally include the top-level `test` package for
# functionality like the @experimental and @integrative decorators:
exclude=['*.test.*']),
package_data = {
'': ['*.yml', 'cloud-config'],
},
# Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so
# you can't just change them here. Luckily, most of them are pretty unique strings, and thus
# easy to search for.
entry_points={
'console_scripts': [
'toil = toil.utils.toilMain:main',
'_toil_worker = toil.worker:main',
'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',
'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',
'toil-wdl-runner = toil.wdl.toilwdl:main',
'_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',
'_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})
|
def runSetup():
"""
Calls setup(). This function exists so the setup() invocation preceded more internal
functionality. The `version` module is imported dynamically by importVersion() below.
"""
boto = 'boto==2.48.0'
boto3 = 'boto3>=1.7.50, <2.0'
futures = 'futures==3.1.1'
pycryptodome = 'pycryptodome==3.5.1'
pymesos = 'pymesos==0.3.15'
psutil = 'psutil >= 3.0.1, <6'
pynacl = 'pynacl==1.3.0'
gcs = 'google-cloud-storage==1.6.0'
gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'
apacheLibcloud = 'apache-libcloud==2.2.1'
cwltool = 'cwltool==3.0.20201121085451'
galaxyToolUtil = 'galaxy-tool-util'
htcondor = 'htcondor>=8.6.0'
kubernetes = 'kubernetes>=10, <11'
idna = 'idna>=2'
pytz = 'pytz>=2012'
dill = 'dill>=0.3.2, <0.4'
six = 'six>=1.10.0'
future = 'future'
requests = 'requests>=2, <3'
docker = 'docker==4.3.1'
dateutil = 'python-dateutil'
addict = 'addict>=2.2.1, <2.3'
enlighten = 'enlighten>=1.5.2, <2'
core_reqs = [
dill,
six,
future,
requests,
docker,
dateutil,
psutil,
addict,
pytz,
enlighten]
aws_reqs = [
boto,
boto3,
futures,
pycryptodome]
cwl_reqs = [
cwltool,
galaxyToolUtil]
encryption_reqs = [
pynacl]
google_reqs = [
gcs_oauth2_boto_plugin, # is this being used??
apacheLibcloud,
gcs]
htcondor_reqs = [
htcondor]
kubernetes_reqs = [
kubernetes,
idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.
mesos_reqs = [
pymesos,
psutil]
wdl_reqs = []
# htcondor is not supported by apple
# this is tricky to conditionally support in 'all' due
# to how wheels work, so it is not included in all and
# must be explicitly installed as an extra
all_reqs = \
aws_reqs + \
cwl_reqs + \
encryption_reqs + \
google_reqs + \
kubernetes_reqs + \
mesos_reqs
setup(
name='toil',
version=version.distVersion,
description='Pipeline management software for clusters.',
author='Benedict Paten',
author_email='benedict@soe.usc.edu',
url="https://github.com/DataBiosphere/toil",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Healthcare Industry',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: System :: Distributed Computing',
'Topic :: Utilities'],
license="Apache License v2.0",
python_requires=">=3.6",
install_requires=core_reqs,
extras_require={
'aws': aws_reqs,
'cwl': cwl_reqs,
'encryption': encryption_reqs,
'google': google_reqs,
'htcondor:sys_platform!="darwin"': htcondor_reqs,
'kubernetes': kubernetes_reqs,
'mesos': mesos_reqs,
'wdl': wdl_reqs,
'all': all_reqs},
package_dir={'': 'src'},
packages=find_packages(where='src',
# Note that we intentionally include the top-level `test` package for
# functionality like the @experimental and @integrative decorators:
exclude=['*.test.*']),
package_data = {
'': ['*.yml', 'cloud-config'],
},
# Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so
# you can't just change them here. Luckily, most of them are pretty unique strings, and thus
# easy to search for.
entry_points={
'console_scripts': [
'toil = toil.utils.toilMain:main',
'_toil_worker = toil.worker:main',
'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',
'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',
'toil-wdl-runner = toil.wdl.toilwdl:main',
'_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',
'_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})
|
46,058 |
def flops_to_string(flops: float,
units: Optional[str] = 'GFLOPs',
precision: int = 2) -> str:
"""Convert FLOPs number into a string.
Note that Here we take a multiply-add counts as one FLOP.
Args:
flops (float): FLOPs number to be converted.
units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
precision (int): Digit number after the decimal point. Default: 2.
Returns:
str: The converted FLOPs number with units.
Examples:
>>> flops_to_string(1e9)
'1.0 GFLOPs'
>>> flops_to_string(2e5, 'MFLOPs')
'0.2 MFLOPs'
>>> flops_to_string(3e-9, None)
'3e-09 FLOPs'
"""
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GFLOPs'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MFLOPs'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KFLOPs'
else:
return str(flops) + ' FLOPs'
else:
if units == 'GFLOPs':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MFLOPs':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KFLOPs':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' FLOPs'
|
def flops_to_string(flops: float,
units: str = 'GFLOPs',
precision: int = 2) -> str:
"""Convert FLOPs number into a string.
Note that Here we take a multiply-add counts as one FLOP.
Args:
flops (float): FLOPs number to be converted.
units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
precision (int): Digit number after the decimal point. Default: 2.
Returns:
str: The converted FLOPs number with units.
Examples:
>>> flops_to_string(1e9)
'1.0 GFLOPs'
>>> flops_to_string(2e5, 'MFLOPs')
'0.2 MFLOPs'
>>> flops_to_string(3e-9, None)
'3e-09 FLOPs'
"""
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GFLOPs'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MFLOPs'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KFLOPs'
else:
return str(flops) + ' FLOPs'
else:
if units == 'GFLOPs':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MFLOPs':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KFLOPs':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' FLOPs'
|
43,808 |
def _inner_net_flow_constraint_hamiltonian(
graph: nx.DiGraph, node: int
) -> Tuple[List[float], List[qml.operation.Observable]]:
r"""Calculates the squared inner portion of the Hamiltonian in :func:`net_flow_constraint`.
For a given :math:`i`, this function returns:
.. math::
\left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2}.
Args:
graph (nx.DiGraph): the graph specifying possible edges
node: a fixed node
Returns:
Tuple[List[float], List[qml.operation.Observable]]: The list of coefficients and list of
observables of the inner part of the net-flow constraint Hamiltonian.
"""
edges_to_qubits = edges_to_wires(graph)
coeffs = []
ops = []
out_edges = graph.out_edges(node)
in_edges = graph.in_edges(node)
coeffs.append(len(out_edges) - len(in_edges))
ops.append(qml.Identity(0))
for edge in out_edges:
wires = (edges_to_qubits[edge],)
coeffs.append(-1)
ops.append(qml.PauliZ(wires))
for edge in in_edges:
wires = (edges_to_qubits[edge],)
coeffs.append(1)
ops.append(qml.PauliZ(wires))
coeffs, ops = _square_hamiltonian_terms(coeffs, ops)
coeffs, ops = _collect_duplicates(coeffs, ops)
hamiltonian = qml.Hamiltonian(coeffs, ops)
return hamiltonian
|
def _inner_net_flow_constraint_hamiltonian(
graph: nx.DiGraph, node: int
) -> Tuple[List[float], List[qml.operation.Observable]]:
r"""Calculates the squared inner portion of the Hamiltonian in :func:`net_flow_constraint`.
For a given :math:`i`, this function returns:
.. math::
\left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2}.
Args:
graph (nx.DiGraph): the graph specifying possible edges
node: a fixed node
Returns:
Tuple[List[float], List[qml.operation.Observable]]: The list of coefficients and list of
observables of the inner part of the net-flow constraint Hamiltonian.
"""
edges_to_qubits = edges_to_wires(graph)
coeffs = []
ops = []
out_edges = graph.out_edges(node)
in_edges = graph.in_edges(node)
coeffs.append(len(out_edges) - len(in_edges))
ops.append(qml.Identity(0))
for edge in out_edges:
wires = (edges_to_qubits[edge],)
coeffs.append(-1)
ops.append(qml.PauliZ(wires))
for edge in in_edges:
wires = (edges_to_qubits[edge],)
coeffs.append(1)
ops.append(qml.PauliZ(wires))
coeffs, ops = _square_hamiltonian_terms(coeffs, ops)
coeffs, ops = _collect_duplicates(coeffs, ops)
return qml.Hamiltonian(coeffs, ops)
|
52,258 |
def _move_model_in_data(env, old_model, new_model, field):
renames = [
('mail_message', 'model', 'res_id'),
('ir_attachment', 'res_model', 'res_id'),
('mail_activity', 'res_model', 'res_id'),
('ir_model_data', 'model', 'res_id'),
]
for rename in renames:
query = """
UPDATE {table} t
SET {field1} = %(new_value1)s, {field2} = am.id
FROM account_move am
WHERE t.{field1} = %(old_value1)s AND am.{field} = t.{field2}"""
openupgrade.logged_query(env.cr, sql.SQL(query).format(
table=sql.Identifier(rename[0]),
field1=sql.Identifier(rename[1]),
field2=sql.Identifier(rename[2]),
field=sql.Identifier(field)
), {
"old_value1": old_model,
"new_value1": new_model,
})
openupgrade.logged_query(env.cr, sql.SQL("""
UPDATE mail_followers mf
SET res_model = %(new_value1)s, res_id = am.id
FROM account_move am
JOIN mail_followers mf1
ON am.{field} = mf1.res_id AND mf1.res_model = %(old_value1)s
LEFT JOIN mail_followers mf2
ON am.id = mf2.res_id
AND mf2.res_model = 'account.move'
AND mf2.partner_id = mf1.partner_id
WHERE mf.id = mf1.id AND mf2.id IS NULL
""").format(
field=sql.Identifier(field)
), {
"old_value1": old_model,
"new_value1": new_model,
})
|
def _move_model_in_data(env, old_model, new_model, field):
renames = [
('mail_message', 'model', 'res_id'),
('ir_attachment', 'res_model', 'res_id'),
('mail_activity', 'res_model', 'res_id'),
('ir_model_data', 'model', 'res_id'),
]
for rename in renames:
query = """
UPDATE {table} t
SET {field1} = %(new_value1)s, {field2} = am.id
FROM account_move am
WHERE t.{field1} = %(old_value1)s AND am.{field} = t.{field2}"""
openupgrade.logged_query(env.cr, sql.SQL(query).format(
table=sql.Identifier(rename[0]),
field1=sql.Identifier(rename[1]),
field2=sql.Identifier(rename[2]),
field=sql.Identifier(field)
), {
"old_value1": old_model,
"new_value1": new_model,
})
openupgrade.logged_query(env.cr, sql.SQL("""
UPDATE mail_followers mf
SET res_model = %(new_value1)s, res_id = am.id
FROM account_move am
JOIN mail_followers mf1
ON (am.{field} = mf1.res_id AND mf1.res_model = %(old_value1)s)
LEFT JOIN mail_followers mf2
ON (am.id = mf2.res_id
AND mf2.res_model = 'account.move'
AND mf2.partner_id = mf1.partner_id)
WHERE mf.id = mf1.id AND mf2.id IS NULL
""").format(
field=sql.Identifier(field)
), {
"old_value1": old_model,
"new_value1": new_model,
})
|
16,370 |
def _schema_with_defaults(
username="", host="", port=80, path="/", ssl=False, verifiy_ssl=True
):
return vol.Schema(
{
vol.Required(CONF_USERNAME, default=username): str,
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_PORT, default=port): cv.port,
vol.Required(CONF_PATH, default=path): str,
vol.Required(CONF_SSL, default=ssl): bool,
vol.Required(CONF_VERIFY_SSL, default=verifiy_ssl): bool,
},
extra=vol.ALLOW_EXTRA,
)
|
def _schema_with_defaults(
username="", host="", port=80, path="/", ssl=False, verifiy_ssl=True
):
return vol.Schema(
{
vol.Required(CONF_USERNAME, default=username): str,
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_PORT, default=port): cv.port,
vol.Required(CONF_PATH, default=path): str,
vol.Required(CONF_SSL, default=ssl): bool,
vol.Required(CONF_VERIFY_SSL, default=verify_ssl): bool,
},
extra=vol.ALLOW_EXTRA,
)
|
53,139 |
def validate_config_legacy(check, check_display_queue, files_failed, files_warned, file_counter):
config_files = get_config_files(check)
for config_file in config_files:
file_counter.append(None)
file_display_queue = []
file_name = basepath(config_file)
try:
file_data = read_file(config_file)
config_data = yaml.safe_load(file_data)
except Exception as e:
files_failed[config_file] = True
# We must convert to text here to free Exception object before it goes out of scope
error = str(e)
check_display_queue.append(lambda: echo_info(f'{file_name}:', indent=True))
check_display_queue.append(lambda: echo_failure('Invalid YAML -', indent=FILE_INDENT))
check_display_queue.append(lambda: echo_info(error, indent=FILE_INDENT * 2))
continue
if check not in LOGS_ONLY_INTEGRATIONS:
# TODO: Validate logs configuration
errors = validate_config(file_data)
for err in errors:
err_msg = str(err)
if err.severity == SEVERITY_ERROR:
file_display_queue.append(lambda x=err_msg: echo_failure(x, indent=FILE_INDENT))
files_failed[config_file] = True
elif err.severity == SEVERITY_WARNING:
file_display_queue.append(lambda x=err_msg: echo_warning(x, indent=FILE_INDENT))
files_warned[config_file] = True
else:
file_display_queue.append(lambda x=err_msg: echo_info(x, indent=FILE_INDENT))
# Verify there is an `instances` section
if 'instances' not in config_data:
files_failed[config_file] = True
file_display_queue.append(lambda: echo_failure('Missing `instances` section', indent=FILE_INDENT))
# Verify there is a default instance
else:
instances = config_data['instances']
if check not in IGNORE_DEFAULT_INSTANCE and not isinstance(instances, list):
files_failed[config_file] = True
file_display_queue.append(lambda: echo_failure('No default instance', indent=FILE_INDENT))
if file_display_queue:
check_display_queue.append(lambda x=file_name: echo_info(f'{x}:', indent=True))
check_display_queue.extend(file_display_queue)
|
def validate_config_legacy(check, check_display_queue, files_failed, files_warned, file_counter):
config_files = get_config_files(check)
for config_file in config_files:
file_counter.append(None)
file_display_queue = []
file_name = basepath(config_file)
try:
file_data = read_file(config_file)
config_data = yaml.safe_load(file_data)
except Exception as e:
files_failed[config_file] = True
# We must convert to text here to free Exception object before it goes out of scope
error = str(e)
check_display_queue.append(lambda: echo_info(f'{file_name}:', indent=True))
check_display_queue.append(lambda: echo_failure('Invalid YAML -', indent=FILE_INDENT))
check_display_queue.append(lambda: echo_info(error, indent=FILE_INDENT * 2))
continue
if check in LOGS_ONLY_INTEGRATIONS:
# It doesn't make sense for logs-only integrations to have `init_config`
# and `instances` entries anyway, so let's not validate them.
continue
# TODO: Validate logs configuration
errors = validate_config(file_data)
for err in errors:
err_msg = str(err)
if err.severity == SEVERITY_ERROR:
file_display_queue.append(lambda x=err_msg: echo_failure(x, indent=FILE_INDENT))
files_failed[config_file] = True
elif err.severity == SEVERITY_WARNING:
file_display_queue.append(lambda x=err_msg: echo_warning(x, indent=FILE_INDENT))
files_warned[config_file] = True
else:
file_display_queue.append(lambda x=err_msg: echo_info(x, indent=FILE_INDENT))
# Verify there is an `instances` section
if 'instances' not in config_data:
files_failed[config_file] = True
file_display_queue.append(lambda: echo_failure('Missing `instances` section', indent=FILE_INDENT))
# Verify there is a default instance
else:
instances = config_data['instances']
if check not in IGNORE_DEFAULT_INSTANCE and not isinstance(instances, list):
files_failed[config_file] = True
file_display_queue.append(lambda: echo_failure('No default instance', indent=FILE_INDENT))
if file_display_queue:
check_display_queue.append(lambda x=file_name: echo_info(f'{x}:', indent=True))
check_display_queue.extend(file_display_queue)
|
29,028 |
def process_ssh_key(keypair: Mapping, credentials: Credentials):
if len(credentials.identities) != 1:
raise SSHKeyProcessingError(
f"SSH credentials have {len(credentials.identities)}" f" users associated with " f"it!"
)
if not _contains_both_keys(keypair):
raise SSHKeyProcessingError("Private or public key missing!")
# TODO SSH key should be associated with IP that monkey exploited
ip = Monkey.get_single_monkey_by_guid(credentials.monkey_guid).ip_addresses[0]
username = credentials.identities[0]["username"]
encrypted_keys = _encrypt_ssh_keys(keypair)
ConfigService.ssh_add_keys(
user=username,
public_key=encrypted_keys["public_key"],
private_key=encrypted_keys["private_key"],
ip=ip,
)
|
def process_ssh_key(keypair: Mapping, credentials: Credentials):
if len(credentials.identities) != 1:
raise SSHKeyProcessingError(
f"SSH credentials have {len(credentials.identities)} users associated with them"
)
if not _contains_both_keys(keypair):
raise SSHKeyProcessingError("Private or public key missing!")
# TODO SSH key should be associated with IP that monkey exploited
ip = Monkey.get_single_monkey_by_guid(credentials.monkey_guid).ip_addresses[0]
username = credentials.identities[0]["username"]
encrypted_keys = _encrypt_ssh_keys(keypair)
ConfigService.ssh_add_keys(
user=username,
public_key=encrypted_keys["public_key"],
private_key=encrypted_keys["private_key"],
ip=ip,
)
|
27,477 |
def test_search_all_iam_policies(capsys):
scope = "projects/{}".format(PROJECT)
query = "policy:roles/owner"
quickstart_searchalliampolicies.search_all_iam_policies(scope, query)
out, _ = capsys.readouterr()
assert "roles/owner" in out
|
def test_search_all_iam_policies(capsys):
scope = "projects/{}".format(PROJECT)
query = "policy:roles/owner"
quickstart_searchalliampolicies.search_all_iam_policies(scope, query=query)
out, _ = capsys.readouterr()
assert "roles/owner" in out
|
57,977 |
def main():
args = demisto.args()
values = args.get('value')
keys = args.get('key')
if not isinstance(values, list):
try:
values = json.loads(values)
except TypeError:
pass
if not isinstance(values, list):
try:
values = values.split(",")
except AttributeError:
pass
if not isinstance(values, list):
return_error("Canoot convert the input values into a list")
if not isinstance(keys, list):
try:
keys = json.loads(keys)
except TypeError:
pass
if not isinstance(keys, list):
try:
keys = keys.split(",")
except AttributeError:
pass
if not isinstance(keys, list):
return_error("Canoot convert the input key into a list")
if len(values) != len(keys):
return_error("The length of the value and key are not the same")
try:
return_data = dict()
for index in range(0, len(values)):
return_data[keys[index]] = values[index]
except Exception as err:
return_error(f"There was an error - {err}")
return_results(return_data)
|
def main():
args = demisto.args()
values = argToList(args.get('value'))
keys = argToList(args.get('key'))
if not isinstance(values, list):
try:
values = json.loads(values)
except TypeError:
pass
if not isinstance(values, list):
try:
values = values.split(",")
except AttributeError:
pass
if not isinstance(values, list):
return_error("Canoot convert the input values into a list")
if not isinstance(keys, list):
try:
keys = json.loads(keys)
except TypeError:
pass
if not isinstance(keys, list):
try:
keys = keys.split(",")
except AttributeError:
pass
if not isinstance(keys, list):
return_error("Canoot convert the input key into a list")
if len(values) != len(keys):
return_error("The length of the value and key are not the same")
try:
return_data = dict()
for index in range(0, len(values)):
return_data[keys[index]] = values[index]
except Exception as err:
return_error(f"There was an error - {err}")
return_results(return_data)
|
40,909 |
def blacklist_filter_bam(bam, blacklist, out_dir):
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
filtered = '{}.bfilt.bam'.format(prefix)
if blacklist=='' or get_num_lines(blacklist)==0:
cmd = 'zcat -f {} | gzip -nc > {}'.format(bam, filtered)
run_shell_cmd(cmd)
else:
# due to bedtools bug when .gz is given for -a and -b
tmp2 = gunzip(blacklist, 'tmp2', out_dir)
cmd = 'bedtools intersect -nonamecheck -v -abam {} -b {} > {}'
cmd = cmd.format(
bam,
tmp2, # blacklist
filtered)
run_shell_cmd(cmd)
rm_f([tmp2])
return filtered
|
def blacklist_filter_bam(bam, blacklist, out_dir):
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
filtered = '{}.bfilt.bam'.format(prefix)
if not blacklist or not get_num_lines(blacklist):
cmd = 'zcat -f {} | gzip -nc > {}'.format(bam, filtered)
run_shell_cmd(cmd)
else:
# due to bedtools bug when .gz is given for -a and -b
tmp2 = gunzip(blacklist, 'tmp2', out_dir)
cmd = 'bedtools intersect -nonamecheck -v -abam {} -b {} > {}'
cmd = cmd.format(
bam,
tmp2, # blacklist
filtered)
run_shell_cmd(cmd)
rm_f([tmp2])
return filtered
|
28,595 |
def plot_pair(
data,
group="posterior",
var_names: Optional[List[str]] = None,
filter_vars: Optional[str] = None,
coords=None,
marginals=False,
figsize=None,
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
contour: Optional[bool] = None,
plot_kwargs=None,
fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
ax=None,
divergences_kwargs=None,
scatter_kwargs=None,
kde_kwargs=None,
hexbin_kwargs=None,
backend=None,
backend_kwargs=None,
marginal_kwargs=None,
point_estimate=None,
point_estimate_kwargs=None,
point_estimate_marker_kwargs=None,
reference_values=None,
reference_values_kwargs=None,
show=None,
):
"""
Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Parameters
----------
data: obj
Any object that can be converted to an :class:`az.InferenceData` object
refer to documentation of :func:`az.convert_to_dataset` for details
group: str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
marginals: bool, optional
If True pairplot will include marginal distributions for every variable
figsize: figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str or List[str]
Type of plot to display (scatter, kde and/or hexbin)
gridsize: int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool, optional, deprecated, Defaults to True.
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
**Note:** this default is implemented in the body of the code, not in argument processing.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar: bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs: dicts, optional
Additional keywords passed to ``ax.scatter`` for divergences
scatter_kwargs:
Additional keywords passed to ``ax.plot`` when using scatter kind
kde_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_kde` when using kde kind
hexbin_kwargs: dict, optional
Additional keywords passed to ``ax.hexbin`` when using hexbin kind
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
marginal_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions
plotted in the diagonal.
point_estimate: str, optional
Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
plotted using a scatter marker and vertical/horizontal lines.
point_estimate_kwargs: dict, optional
Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh)
point_estimate_marker_kwargs: dict, optional
Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
reference_values: dict, optional
Reference values for the plotted variables. The Reference values will be plotted
using a scatter marker
reference_values_kwargs: dict, optional
Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['^t', 'mu'],
... filter_vars="regex",
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
kind_boolean: Union[bool, List[bool]]
if isinstance(kind, str):
kind_boolean = kind in valid_kinds
else:
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
if fill_last or contour:
warnings.warn(
"fill_last and contour will be deprecated. Please use kde_kwargs",
UserWarning,
)
if plot_kwargs:
warnings.warn(
"plot_kwargs will be deprecated."
" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
UserWarning,
)
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
plotters = list(
xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
flat_var_names = [
labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
]
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
UserWarning,
)
if gridsize == "auto":
gridsize = int(dataset.dims["draw"] ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
plotters=plotters,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
scatter_kwargs=scatter_kwargs,
kde_kwargs=kde_kwargs,
hexbin_kwargs=hexbin_kwargs,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
marginal_kwargs=marginal_kwargs,
show=show,
marginals=marginals,
point_estimate=point_estimate,
point_estimate_kwargs=point_estimate_kwargs,
point_estimate_marker_kwargs=point_estimate_marker_kwargs,
reference_values=reference_values,
reference_values_kwargs=reference_values_kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
|
def plot_pair(
data,
group="posterior",
var_names: Optional[List[str]] = None,
filter_vars: Optional[str] = None,
coords=None,
marginals=False,
figsize=None,
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
contour: Optional[bool] = None,
plot_kwargs=None,
fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
ax=None,
divergences_kwargs=None,
scatter_kwargs=None,
kde_kwargs=None,
hexbin_kwargs=None,
backend=None,
backend_kwargs=None,
marginal_kwargs=None,
point_estimate=None,
point_estimate_kwargs=None,
point_estimate_marker_kwargs=None,
reference_values=None,
reference_values_kwargs=None,
show=None,
):
"""
Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Parameters
----------
data: obj
Any object that can be converted to an :class:`az.InferenceData` object
refer to documentation of :func:`arviz.convert_to_dataset` for details
group: str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
marginals: bool, optional
If True pairplot will include marginal distributions for every variable
figsize: figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str or List[str]
Type of plot to display (scatter, kde and/or hexbin)
gridsize: int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool, optional, deprecated, Defaults to True.
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
**Note:** this default is implemented in the body of the code, not in argument processing.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar: bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs: dicts, optional
Additional keywords passed to ``ax.scatter`` for divergences
scatter_kwargs:
Additional keywords passed to ``ax.plot`` when using scatter kind
kde_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_kde` when using kde kind
hexbin_kwargs: dict, optional
Additional keywords passed to ``ax.hexbin`` when using hexbin kind
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
marginal_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions
plotted in the diagonal.
point_estimate: str, optional
Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
plotted using a scatter marker and vertical/horizontal lines.
point_estimate_kwargs: dict, optional
Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh)
point_estimate_marker_kwargs: dict, optional
Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
reference_values: dict, optional
Reference values for the plotted variables. The Reference values will be plotted
using a scatter marker
reference_values_kwargs: dict, optional
Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['^t', 'mu'],
... filter_vars="regex",
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
kind_boolean: Union[bool, List[bool]]
if isinstance(kind, str):
kind_boolean = kind in valid_kinds
else:
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
if fill_last or contour:
warnings.warn(
"fill_last and contour will be deprecated. Please use kde_kwargs",
UserWarning,
)
if plot_kwargs:
warnings.warn(
"plot_kwargs will be deprecated."
" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
UserWarning,
)
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
plotters = list(
xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
flat_var_names = [
labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
]
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
UserWarning,
)
if gridsize == "auto":
gridsize = int(dataset.dims["draw"] ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
plotters=plotters,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
scatter_kwargs=scatter_kwargs,
kde_kwargs=kde_kwargs,
hexbin_kwargs=hexbin_kwargs,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
marginal_kwargs=marginal_kwargs,
show=show,
marginals=marginals,
point_estimate=point_estimate,
point_estimate_kwargs=point_estimate_kwargs,
point_estimate_marker_kwargs=point_estimate_marker_kwargs,
reference_values=reference_values,
reference_values_kwargs=reference_values_kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
|
35,773 |
def generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, *, filter_branch=None):
d = {
"name": f"{base_workflow_name}_upload",
"context": "org-member",
"requires": [base_workflow_name],
}
if btype == "wheel":
d["subfolder"] = "" if (os_type == "macos" or os_type == "macos_arm64") else cu_version + "/"
if filter_branch is not None:
d["filters"] = {
"branches": {"only": filter_branch},
"tags": {
# Using a raw string here to avoid having to escape
# anything
"only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
},
}
return {f"binary_{btype}_upload": d}
|
def generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, *, filter_branch=None):
d = {
"name": f"{base_workflow_name}_upload",
"context": "org-member",
"requires": [base_workflow_name],
}
if btype == "wheel":
d["subfolder"] = "" if os_type.startswith("macos") else cu_version + "/"
if filter_branch is not None:
d["filters"] = {
"branches": {"only": filter_branch},
"tags": {
# Using a raw string here to avoid having to escape
# anything
"only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
},
}
return {f"binary_{btype}_upload": d}
|
59,301 |
def assemble(asm, mode=CS_MODE_ARM):
if asm in assembly_cache[mode]:
return binascii.unhexlify(assembly_cache[mode][asm])
return binascii.unhexlify(_ks_assemble(asm, mode=mode))
|
def assemble(asm: str, mode=CS_MODE_ARM) -> bytes:
"""
Assemble the given string.
An assembly cache is first checked, and if there is no entry there, then Keystone is used.
"""
if asm in assembly_cache[mode]:
return binascii.unhexlify(assembly_cache[mode][asm])
return binascii.unhexlify(_ks_assemble(asm, mode=mode))
|
8,366 |
def _isophote_list_to_table(isophote_list, key_properties=['main']):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or \
`~photutils.isophote.IsophoteList` instance
A list of isophotes.
key_properties : A list of properties to export from the isophote_list
If key_properties = ['all'] or ['main'], it will pick all or few
of the main properties.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the selected or all isophote parameters.
"""
properties = OrderedDict()
isotable = QTable()
# main_properties: `List`
# A list of main parameters matching the original names of
# the isophote_list parameters
def __rename_properties(properties,
orig_names = ['int_err', 'eps', 'ellip_err',
'grad_r_error', 'nflag'],
new_names = ['intens_err', 'ellipticity',
'ellipticity_err', 'grad_rerror',
'nflag']
):
'''
Simple renaming for some of the isophote_list parameters.
Parameters
----------
properties: `OrderedDict`
An OrderedDict with the list of the isophote_list parameters
orig_names: `List`
A list of original names in the isophote_list parameters
to be renamed
new_names: `List`
A list of new names matching in length of the orig_names
Returns
-------
properties: `OrderedDict`
An OrderedDict with the list of the renamed isophote_list
parameters
'''
main_properties = ['sma', 'intens', 'int_err', 'eps', 'ellip_err',
'pa', 'pa_err', 'grad', 'grad_error',
'grad_r_error', 'x0', 'x0_err', 'y0', 'y0_err',
'ndata', 'nflag', 'niter', 'stop_code']
for an_item in main_properties:
if an_item in orig_names:
properties[an_item] = new_names[orig_names.index(an_item)]
else:
properties[an_item] = an_item
return properties
if 'all' in key_properties:
properties = _get_properties(isophote_list)
properties = __rename_properties(properties)
elif 'main' in key_properties:
properties = __rename_properties(properties)
else:
for an_item in key_properties:
properties[an_item] = an_item
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
|
def _isophote_list_to_table(isophote_list, key_properties=['main']):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or \
`~photutils.isophote.IsophoteList` instance
A list of isophotes.
key_properties : A list of properties to export from the isophote_list
If key_properties = ['all'] or ['main'], it will pick all or few
of the main properties.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the selected or all isophote parameters.
"""
properties = OrderedDict()
isotable = QTable()
# main_properties: `List`
# A list of main parameters matching the original names of
# the isophote_list parameters
def __rename_properties(properties,
orig_names = ['int_err', 'eps', 'ellip_err',
'grad_r_error', 'nflag'],
new_names = ['intens_err', 'ellipticity',
'ellipticity_err', 'grad_rerror',
'nflag']
):
'''
Simple renaming for some of the isophote_list parameters.
Parameters
----------
properties: `OrderedDict`
An OrderedDict with the list of the isophote_list parameters
orig_names: `List`
A list of original names in the isophote_list parameters
to be renamed
new_names: `List`
A list of new names matching in length of the orig_names
Returns
-------
properties: `OrderedDict`
An OrderedDict with the list of the renamed isophote_list
parameters
'''
main_properties = ['sma', 'intens', 'int_err', 'eps', 'ellip_err',
'pa', 'pa_err', 'grad', 'grad_error',
'grad_r_error', 'x0', 'x0_err', 'y0', 'y0_err',
'ndata', 'nflag', 'niter', 'stop_code']
for an_item in main_properties:
if an_item in orig_names:
properties[an_item] = new_names[orig_names.index(an_item)]
else:
properties[an_item] = an_item
return properties
if columns == 'all':
properties = _get_properties(isophote_list)
properties = __rename_properties(properties)
elif 'main' in key_properties:
properties = __rename_properties(properties)
else:
for an_item in key_properties:
properties[an_item] = an_item
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
|
42,623 |
def get_airdrop_data(name: str, data_dir: Path) -> Tuple[Iterator, TextIO]:
airdrops_dir = data_dir / 'airdrops'
airdrops_dir.mkdir(parents=True, exist_ok=True)
filename = airdrops_dir / f'{name}.csv'
if not filename.is_file():
# if not cached, get it from the gist
try:
request = requests.get(url=AIRDROPS[name][0], timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Airdrops Gist request failed due to {str(e)}') from e
try:
content = request.content.decode('utf-8')
# Here 20900 is the size of the smallest CSV file we track
if not csv.Sniffer().has_header(content) or len(request.content) < 20900:
raise csv.Error
with open(filename, 'w') as f:
f.write(content)
except OSError as e:
raise WriteError(f'Failed to save {filename} to disk') from e
except csv.Error as e:
log.debug(f'airdrop file {filename} contains invalid data {content}')
raise InvalidData(f'File {filename} contains invalid information. Check logs.') from e
# Verify the CSV file
csvfile = open(filename, 'r')
iterator = csv.reader(csvfile)
next(iterator) # skip header
return iterator, csvfile
|
def get_airdrop_data(name: str, data_dir: Path) -> Tuple[Iterator, TextIO]:
airdrops_dir = data_dir / 'airdrops'
airdrops_dir.mkdir(parents=True, exist_ok=True)
filename = airdrops_dir / f'{name}.csv'
if not filename.is_file():
# if not cached, get it from the gist
try:
request = requests.get(url=AIRDROPS[name][0], timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Airdrops Gist request failed due to {str(e)}') from e
try:
content = request.content.decode('utf-8')
# Here 20900 is the size of the smallest CSV file we track
if not csv.Sniffer().has_header(content) or len(request.content) < 20900:
raise csv.Error
with open(filename, 'w') as f:
f.write(content)
except OSError as e:
raise WriteError(f'Failed to save {filename} to disk') from e
except csv.Error as e:
log.debug(f'airdrop file {filename} contains invalid data {content}')
raise InvalidData(f'File {filename} contains invalid data. Check logs.') from e
# Verify the CSV file
csvfile = open(filename, 'r')
iterator = csv.reader(csvfile)
next(iterator) # skip header
return iterator, csvfile
|
57,664 |
def dehashed_search_command(client: Client, args: dict) -> tuple:
"""
this command returns data regarding a compromised assets given as arguments
:param client: Demisto client
:param args:
- asset_type: email, ip_address, username, hashed_password, name, vin, address, phone,all_fields.
- value: value to search
- operation: choose a search type that you to perform.
- results_page_number: number for the next result page.
- results_from: sets results' start range
- results_to: sets results' end range
:return: Demisto outputs
"""
asset_type = args.get("asset_type")
operation = args.get("operation")
value = argToList(args.get("value"))
results_page_number = args.get("page")
results_from = args.get("results_from")
results_to = args.get("results_to")
result = client.dehashed_search(asset_type, value, operation, results_page_number)
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected output from api: {result}")
query_data = result.get("entries")
if not query_data:
return "No matching results found", None, None
else:
filtered_results, results_from, results_to = filter_results(
query_data, results_from, results_to
)
if not results_page_number:
results_page_number = "1"
query_entries = createContext(
filtered_results, keyTransform=underscoreToCamelCase
)
headers = [key.replace("_", " ") for key in [*filtered_results[0].keys()]]
last_query = {
"ResultsFrom": results_from,
"ResultsTo": results_to,
"DisplayedResults": len(filtered_results),
"TotalResults": result.get("total"),
}
return (
tableToMarkdown(
f'DeHashed Search - Got {result.get("total")} results. Display only:'
f" {len(filtered_results)} Page number:{results_page_number}.",
filtered_results,
headers=headers,
removeNull=True,
headerTransform=pascalToSpace,
),
{
f"{INTEGRATION_CONTEXT_BRAND}.Search(val.Id==obj.Id)": query_entries,
f"{INTEGRATION_CONTEXT_BRAND}.LastQuery(true)": last_query,
},
filtered_results,
)
|
def dehashed_search_command(client: Client, args: dict) -> tuple:
"""
this command returns data regarding a compromised assets given as arguments
:param client: Demisto client
:param args:
- asset_type: email, ip_address, username, hashed_password, name, vin, address, phone,all_fields.
- value: value to search
- operation: choose a search type that you to perform.
- results_page_number: number for the next result page.
- results_from: sets result's start range
- results_to: sets result's end range
:return: Demisto outputs
"""
asset_type = args.get("asset_type")
operation = args.get("operation")
value = argToList(args.get("value"))
results_page_number = args.get("page")
results_from = args.get("results_from")
results_to = args.get("results_to")
result = client.dehashed_search(asset_type, value, operation, results_page_number)
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected output from api: {result}")
query_data = result.get("entries")
if not query_data:
return "No matching results found", None, None
else:
filtered_results, results_from, results_to = filter_results(
query_data, results_from, results_to
)
if not results_page_number:
results_page_number = "1"
query_entries = createContext(
filtered_results, keyTransform=underscoreToCamelCase
)
headers = [key.replace("_", " ") for key in [*filtered_results[0].keys()]]
last_query = {
"ResultsFrom": results_from,
"ResultsTo": results_to,
"DisplayedResults": len(filtered_results),
"TotalResults": result.get("total"),
}
return (
tableToMarkdown(
f'DeHashed Search - Got {result.get("total")} results. Display only:'
f" {len(filtered_results)} Page number:{results_page_number}.",
filtered_results,
headers=headers,
removeNull=True,
headerTransform=pascalToSpace,
),
{
f"{INTEGRATION_CONTEXT_BRAND}.Search(val.Id==obj.Id)": query_entries,
f"{INTEGRATION_CONTEXT_BRAND}.LastQuery(true)": last_query,
},
filtered_results,
)
|
51,422 |
def _localize(var, indexes_coords):
""" Speed up for linear and nearest neighbor method.
Only consider a subspace that is needed for the interpolation
"""
indexes = {}
for dim, [x, new_x] in indexes_coords.items():
index = x.to_index()
imin = index.get_loc(np.nanmin(new_x.values), method="nearest")
imax = index.get_loc(np.nanmax(new_x.values), method="nearest")
indexes[dim] = slice(max(imin - 2, 0), imax + 2)
indexes_coords[dim] = (x[indexes[dim]], new_x)
return var.isel(**indexes), indexes_coords
|
def _localize(var, indexes_coords):
""" Speed up for linear and nearest neighbor method.
Only consider a subspace that is needed for the interpolation
"""
indexes = {}
for dim, [x, new_x] in indexes_coords.items():
index = x.to_index()
if LooseVersion(np.__version__) < LooseVersion("1.18.1") and new_x.dtype.kind in "mM":
# In older versions of NumPy min and max (incorrectly) ignore NaT values
# for time-like types -- that is they behave like nanmin and nanmax
# should -- but nanmin and nanmax raise an error.
imin = index.get_loc(np.min(new_x.values), method="nearest")
imax = index.get_loc(np.max(new_x.values), method="nearest")
else:
imin = index.get_loc(np.nanmin(new_x.values), method="nearest")
imax = index.get_loc(np.nanmax(new_x.values), method="nearest")
indexes[dim] = slice(max(imin - 2, 0), imax + 2)
indexes_coords[dim] = (x[indexes[dim]], new_x)
return var.isel(**indexes), indexes_coords
|
31,820 |
def http_request(method, url_suffix, params=None, DATA=None):
res = requests.request(method, baseURL + url_suffix, params=params, headers=HEADERS, data=DATA, verify=VERIFY)
if res.status_code >= 400:
try:
json_res = res.json()
if json_res.get('errors') is None:
return_error('Error in API call to the DataDog Integration [%d] - %s' % (res.status_code, res.reason))
else:
error_code = json_res.get('errors')[0]
return_error('Error: {}'.format(error_code))
except ValueError:
return_error('Error in API call to DataDog Integration [%d] - %s' % (res.status_code, res.reason))
if res.status_code == 204:
return res
else:
try:
json_res = res.json()
except Exception as e:
return_error("Unable to parse result - " + str(e))
return res.json()
|
def http_request(method, url_suffix, params=None, DATA=None):
res = requests.request(method, urljoin(baseURL, url_suffix), params=params, headers=HEADERS, data=DATA, verify=VERIFY)
if res.status_code >= 400:
try:
json_res = res.json()
if json_res.get('errors') is None:
return_error('Error in API call to the DataDog Integration [%d] - %s' % (res.status_code, res.reason))
else:
error_code = json_res.get('errors')[0]
return_error('Error: {}'.format(error_code))
except ValueError:
return_error('Error in API call to DataDog Integration [%d] - %s' % (res.status_code, res.reason))
if res.status_code == 204:
return res
else:
try:
json_res = res.json()
except Exception as e:
return_error("Unable to parse result - " + str(e))
return res.json()
|
28,617 |
def psens(data, *, component, var_names=None, alpha=0.5, delta=0.01, dask_kwargs=None):
"""Compute power-scaling sensitivity diagnostic.
Power-scales the prior or likelihood and calculates how much the posterior is affected.
Parameters
----------
data : obj
Any object that can be converted to an :class:`arviz.InferenceData` object.
Refer to documentation of :func:`arviz.convert_to_dataset` for details.
For ndarray: shape = (chain, draw).
For n-dimensional ndarray transform first to dataset with ``az.convert_to_dataset``.
var_names : list
Names of variables to include in the rhat report
component : str
Select component to power-scale. Valid components are are:
- "prior"
- "likelihood"
dask_kwargs : dict, optional
Dask related kwargs passed to :func:`~arviz.wrap_xarray_ufunc`.
Returns
-------
xarray.Dataset
Returns dataset of power-scaling sensitivity diagnostic values.
Notes
-----
The diagnostic is computed by power-scaling the specified component (prior or likelihood)
and determining the degree to which the posterior changes. It uses Pareto-smoothed
importance sampling to avoid refitting the model.
References
----------
* Kallioinen et al. (2022) see https://arxiv.org/abs/2107.14054
"""
dataset = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, dataset)
dataset = dataset if var_names is None else dataset[var_names]
if (component == "likelihood"):
component_draws = np.sum(data["log_likelihood"].stack(draws=("chain", "draw")).obs.values, axis=0)
elif (component == "prior"):
component_draws = data["log_prior"].stack(draws = ("chain", "draw")).values
lower_w = np.exp(_powerscale_lw(component_draws=component_draws, alpha=alpha))
lower_w = lower_w/np.sum(lower_w)
upper_w = np.exp(_powerscale_lw(component_draws=component_draws, alpha=alpha))
upper_w = upper_w/np.sum(upper_w)
ufunc_kwargs = {"ravel": False}
func_kwargs = {
"lower_weights": lower_w,
"upper_weights": upper_w,
"delta": delta
}
return _wrap_xarray_ufunc(
_powerscale_sens,
dataset,
ufunc_kwargs=ufunc_kwargs,
func_kwargs=func_kwargs,
dask_kwargs=dask_kwargs,
)
|
def psens(data, *, component, var_names=None, alpha=0.5, delta=0.01, dask_kwargs=None):
"""Compute power-scaling sensitivity diagnostic.
Power-scales the prior or likelihood and calculates how much the posterior is affected.
Parameters
----------
data : obj
Any object that can be converted to an :class:`arviz.InferenceData` object.
Refer to documentation of :func:`arviz.convert_to_dataset` for details.
For ndarray: shape = (chain, draw).
For n-dimensional ndarray transform first to dataset with ``az.convert_to_dataset``.
var_names : list
Names of variables to include in the rhat report
component : str
Select component to power-scale. Valid components are are:
- "prior"
- "likelihood"
dask_kwargs : dict, optional
Dask related kwargs passed to :func:`~arviz.wrap_xarray_ufunc`.
Returns
-------
xarray.Dataset
Returns dataset of power-scaling sensitivity diagnostic values.
Notes
-----
The diagnostic is computed by power-scaling the specified component (prior or likelihood)
and determining the degree to which the posterior changes. It uses Pareto-smoothed
importance sampling to avoid refitting the model.
References
----------
* Kallioinen et al. (2022) see https://arxiv.org/abs/2107.14054
"""
dataset = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, dataset)
dataset = dataset if var_names is None else dataset[var_names]
component_draws = sample_stats["log_prior" if component == "prior" else "log_likelihood"]
lower_w = np.exp(_powerscale_lw(component_draws=component_draws, alpha=alpha))
lower_w = lower_w/np.sum(lower_w)
upper_w = np.exp(_powerscale_lw(component_draws=component_draws, alpha=alpha))
upper_w = upper_w/np.sum(upper_w)
ufunc_kwargs = {"ravel": False}
func_kwargs = {
"lower_weights": lower_w,
"upper_weights": upper_w,
"delta": delta
}
return _wrap_xarray_ufunc(
_powerscale_sens,
dataset,
ufunc_kwargs=ufunc_kwargs,
func_kwargs=func_kwargs,
dask_kwargs=dask_kwargs,
)
|
13,885 |
def pytest_generate_tests(metafunc):
"""generate a list of all available integration tests."""
is_windows = platform.system() == "Windows"
global skip_clean
skip_clean = metafunc.config.getoption("skip_clean")
generate_reference = metafunc.config.getoption("generate_reference")
update_reference = metafunc.config.getoption("update_reference")
archive_differences = metafunc.config.getoption("archive_differences")
collected_params = []
if archive_differences: # pragma: no cover
diffs_zip = os.path.join(basedir, "diff.zip")
# Create an empty ZIP
zipfile.ZipFile(diffs_zip, mode="w").close()
for name in findtests(basedir):
targets = parse_makefile_for_available_targets(
os.path.join(basedir, name, "Makefile")
)
# check that the "run" target lists no unknown formats
target_run = targets.get("run", set())
unknown_formats = target_run.difference(KNOWN_FORMATS)
if unknown_formats:
raise ValueError(
"{}/Makefile target 'run' references unknown format {}".format(
name, unknown_formats
)
)
# check that all "run" targets are actually available
unresolved_prereqs = target_run.difference(targets)
if unresolved_prereqs:
raise ValueError(
"{}/Makefile target 'run' has unresolved prerequisite {}".format(
name, unresolved_prereqs
)
)
# check that all available known formats are also listed in the "run" target
unreferenced_formats = (
set(KNOWN_FORMATS).intersection(targets).difference(target_run)
)
if unreferenced_formats:
raise ValueError(
"{}/Makefile target 'run' doesn't reference available target {}".format(
name, unreferenced_formats
)
)
for format in KNOWN_FORMATS:
# only test formats where the Makefile provides a target
if format not in targets:
continue
needs_symlinks = any(
[
name == "linked" and format == "html",
name == "filter-relative-lib",
name == "filter-relative-lib-from-unfiltered-tracefile",
]
)
marks = [
pytest.mark.xfail(
needs_symlinks and is_windows,
reason="have yet to figure out symlinks on Windows",
),
pytest.mark.xfail(
name == "exclude-throw-branches"
and format == "html"
and is_windows,
reason="branch coverage details seem to be platform-dependent",
),
pytest.mark.xfail(
name == "rounding" and is_windows,
reason="branch coverage seem to be platform-dependent",
),
]
collected_params.append(
pytest.param(
name,
format,
targets,
generate_reference,
update_reference,
archive_differences,
marks=marks,
id="-".join([name, format]),
)
)
metafunc.parametrize(
"name, format, available_targets, generate_reference, update_reference, archive_differences",
collected_params,
indirect=False,
scope="module",
)
|
def pytest_generate_tests(metafunc):
"""generate a list of all available integration tests."""
is_windows = platform.system() == "Windows"
global skip_clean
skip_clean = metafunc.config.getoption("skip_clean")
generate_reference = metafunc.config.getoption("generate_reference")
update_reference = metafunc.config.getoption("update_reference")
archive_differences = metafunc.config.getoption("archive_differences")
collected_params = []
if archive_differences: # pragma: no cover
diffs_zip = os.path.join(basedir, "diff.zip")
# Create an empty ZIP
zipfile.ZipFile(diffs_zip, mode="w").close()
for name in findtests(basedir):
targets = parse_makefile_for_available_targets(
os.path.join(basedir, name, "Makefile")
)
# check that the "run" target lists no unknown formats
target_run = targets.get("run", set())
unknown_formats = target_run.difference(KNOWN_FORMATS)
if unknown_formats:
raise ValueError(
"{}/Makefile target 'run' references unknown format {}".format(
name, unknown_formats
)
)
# check that all "run" targets are actually available
unresolved_prereqs = target_run.difference(targets)
if unresolved_prereqs: # pragma: no cover
raise ValueError(
"{}/Makefile target 'run' has unresolved prerequisite {}".format(
name, unresolved_prereqs
)
)
# check that all available known formats are also listed in the "run" target
unreferenced_formats = (
set(KNOWN_FORMATS).intersection(targets).difference(target_run)
)
if unreferenced_formats:
raise ValueError(
"{}/Makefile target 'run' doesn't reference available target {}".format(
name, unreferenced_formats
)
)
for format in KNOWN_FORMATS:
# only test formats where the Makefile provides a target
if format not in targets:
continue
needs_symlinks = any(
[
name == "linked" and format == "html",
name == "filter-relative-lib",
name == "filter-relative-lib-from-unfiltered-tracefile",
]
)
marks = [
pytest.mark.xfail(
needs_symlinks and is_windows,
reason="have yet to figure out symlinks on Windows",
),
pytest.mark.xfail(
name == "exclude-throw-branches"
and format == "html"
and is_windows,
reason="branch coverage details seem to be platform-dependent",
),
pytest.mark.xfail(
name == "rounding" and is_windows,
reason="branch coverage seem to be platform-dependent",
),
]
collected_params.append(
pytest.param(
name,
format,
targets,
generate_reference,
update_reference,
archive_differences,
marks=marks,
id="-".join([name, format]),
)
)
metafunc.parametrize(
"name, format, available_targets, generate_reference, update_reference, archive_differences",
collected_params,
indirect=False,
scope="module",
)
|
40,551 |
def load_arguments(self, _):
with self.argument_context('spring-cloud') as c:
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('name', options_list=[
'--name', '-n'], help='Name of Azure Spring Cloud.')
# A refactoring work item to move validators to command level to reduce the duplications.
# https://dev.azure.com/msazure/AzureDMSS/_workitems/edit/11002857/
with self.argument_context('spring-cloud create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=validate_location)
c.argument('sku', arg_type=sku_type, default='Standard')
c.argument('reserved_cidr_range', help='Comma-separated list of IP address ranges in CIDR format. The IP ranges are reserved to host underlying Azure Spring Cloud infrastructure, which should be 3 at least /16 unused IP ranges, must not overlap with any Subnet IP ranges.', validator=validate_vnet_required_parameters)
c.argument('vnet', help='The name or ID of an existing Virtual Network into which to deploy the Spring Cloud instance.', validator=validate_vnet_required_parameters)
c.argument('app_subnet', help='The name or ID of an existing subnet in "vnet" into which to deploy the Spring Cloud app. Required when deploying into a Virtual Network. Smaller subnet sizes are supported, please refer: https://aka.ms/azure-spring-cloud-smaller-subnet-vnet-docs', validator=validate_vnet_required_parameters)
c.argument('service_runtime_subnet', options_list=['--service-runtime-subnet', '--svc-subnet'], help='The name or ID of an existing subnet in "vnet" into which to deploy the Spring Cloud service runtime. Required when deploying into a Virtual Network.', validator=validate_vnet)
c.argument('service_runtime_network_resource_group', options_list=['--service-runtime-network-resource-group', '--svc-nrg'], help='The resource group where all network resources for Azure Spring Cloud service runtime will be created in.', validator=validate_node_resource_group)
c.argument('app_network_resource_group', options_list=['--app-network-resource-group', '--app-nrg'], help='The resource group where all network resources for apps will be created in.', validator=validate_node_resource_group)
c.argument('enable_java_agent',
arg_type=get_three_state_flag(),
help="Java in process agent is now GA-ed and used by default when Application Insights enabled. "
"This parameter is no longer needed and will be removed in future release.",
validator=validate_java_agent_parameters,
deprecate_info=c.deprecate(target='--enable-java-agent', hide=True))
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_tracing_parameters_asc_create)
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_tracing_parameters_asc_create)
c.argument('sampling_rate',
type=float,
help="Sampling Rate of application insights. Minimum is 0, maximum is 100.",
validator=validate_tracing_parameters_asc_create)
c.argument('disable_app_insights',
arg_type=get_three_state_flag(),
help="Disable Application Insights, "
"if not disabled and no existing Application Insights specified with "
"--app-insights-key or --app-insights, "
"will create a new Application Insights instance in the same resource group.",
validator=validate_tracing_parameters_asc_create)
c.argument('zone_redundant',
arg_type=get_three_state_flag(),
help="Create your Azure Spring Cloud service in an Azure availability zone or not, "
"this could only be supported in several regions at the moment ",
default=False, is_preview=True)
c.argument('build_pool_size',
arg_type=get_enum_type(['S1', 'S2', 'S3', 'S4', 'S5']),
validator=validate_build_pool_size,
default='S1',
is_preview=True,
help='Only support in enterprise tier now. Size of build agent pool. See Azure Spring Cloud Doc for size info.')
with self.argument_context('spring-cloud update') as c:
c.argument('sku', arg_type=sku_type)
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --app-insights-key',
redirect='az spring-cloud app-insights update --app-insights-key',
hide=True))
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --app-insights',
redirect='az spring-cloud app-insights update --app-insights',
hide=True))
c.argument('disable_app_insights',
arg_type=get_three_state_flag(),
help="Disable Application Insights, "
"if not disabled and no existing Application Insights specified with "
"--app-insights-key or --app-insights, "
"will create a new Application Insights instance in the same resource group.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --disable-app-insights',
redirect='az spring-cloud app-insights update --disable',
hide=True))
c.argument('build_pool_size',
arg_type=get_enum_type(['S1', 'S2', 'S3', 'S4', 'S5']),
is_preview=True,
help='Only support in enterprise tier now. Size of build agent pool. See Azure Spring Cloud Doc for size info.')
for scope in ['spring-cloud create', 'spring-cloud update']:
with self.argument_context(scope) as c:
c.argument('tags', arg_type=tags_type)
with self.argument_context('spring-cloud test-endpoint renew-key') as c:
c.argument('type', type=str, arg_type=get_enum_type(
TestKeyType), help='Type of test-endpoint key')
with self.argument_context('spring-cloud app') as c:
c.argument('service', service_name_type)
c.argument('name', name_type, help='Name of app.')
with self.argument_context('spring-cloud app create') as c:
c.argument('assign_endpoint', arg_type=get_three_state_flag(),
help='If true, assign endpoint URL for direct access.', default=False,
options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)])
c.argument('assign_identity', arg_type=get_three_state_flag(),
help='If true, assign managed service identity.')
c.argument('cpu', arg_type=cpu_type, default="1")
c.argument('memory', arg_type=memort_type, default="1Gi")
c.argument('instance_count', type=int,
default=1, help='Number of instance.', validator=validate_instance_count)
c.argument('persistent_storage', type=str,
help='A json file path for the persistent storages to be mounted to the app')
c.argument('loaded_public_certificate_file', options_list=['--loaded-public-certificate-file', '-f'], type=str,
help='A json file path indicates the certificates which would be loaded to app')
with self.argument_context('spring-cloud app update') as c:
c.argument('assign_endpoint', arg_type=get_three_state_flag(),
help='If true, assign endpoint URL for direct access.',
options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)])
c.argument('https_only', arg_type=get_three_state_flag(), help='If true, access app via https', default=False)
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
c.argument('persistent_storage', type=str,
help='A json file path for the persistent storages to be mounted to the app')
c.argument('loaded_public_certificate_file', type=str, options_list=['--loaded-public-certificate-file', '-f'],
help='A json file path indicates the certificates which would be loaded to app')
with self.argument_context('spring-cloud app append-persistent-storage') as c:
c.argument('storage_name', type=str,
help='Name of the storage resource you created in Azure Spring Cloud.')
c.argument('persistent_storage_type', options_list=['--persistent-storage-type', '-t'], type=str, help='Type of the persistent storage volumed.')
c.argument('share_name', type=str,
help="The name of the pre-created file share. "
"ShareName should be provided only if the type of the persistent storage volume is AzureFileVolume.")
c.argument('mount_path', type=str, help='The path for the persistent storage volume to be mounted.')
c.argument('mount_options', nargs='+', help='[optional] The mount options for the persistent storage volume.', default=None)
c.argument('read_only', arg_type=get_three_state_flag(), help='[optional] If true, the persistent storage volume will be read only.', default=False)
for scope in ['spring-cloud app update', 'spring-cloud app start', 'spring-cloud app stop', 'spring-cloud app restart', 'spring-cloud app deploy', 'spring-cloud app scale', 'spring-cloud app set-deployment', 'spring-cloud app show-deploy-log']:
with self.argument_context(scope) as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('main_entry', options_list=[
'--main-entry', '-m'], help="The path to the .NET executable relative to zip root.")
for scope in ['spring-cloud app identity', 'spring-cloud app unset-deployment']:
with self.argument_context(scope) as c:
c.argument('name', name_type, help='Name of app.', validator=active_deployment_exist)
with self.argument_context('spring-cloud app identity assign') as c:
c.argument('scope', help="The scope the managed identity has access to")
c.argument('role', help="Role name or id the managed identity will be assigned")
def prepare_logs_argument(c):
'''`app log tail` is deprecated. `app logs` is the new choice. They share the same command processor.'''
c.argument('instance', options_list=['--instance', '-i'], help='Name of an existing instance of the deployment.')
c.argument('lines', type=int, help='Number of lines to show. Maximum is 10000', validator=validate_log_lines)
c.argument('follow', options_list=['--follow ', '-f'], help='Specify if the logs should be streamed.', action='store_true')
c.argument('since', help='Only return logs newer than a relative duration like 5s, 2m, or 1h. Maximum is 1h', validator=validate_log_since)
c.argument('limit', type=int, help='Maximum kilobytes of logs to return. Ceiling number is 2048.', validator=validate_log_limit)
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('format_json', nargs='?', const='{timestamp} {level:>5} [{thread:>15.15}] {logger{39}:<40.40}: {message}\n{stackTrace}',
help='Format JSON logs if structured log is enabled')
with self.argument_context('spring-cloud app logs') as c:
prepare_logs_argument(c)
with self.argument_context('spring-cloud app log tail') as c:
prepare_logs_argument(c)
with self.argument_context('spring-cloud app set-deployment') as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app.', validator=ensure_not_active_deployment)
for scope in ['spring-cloud app create', 'spring-cloud app update']:
with self.argument_context(scope) as c:
c.argument('enable_persistent_storage', arg_type=get_three_state_flag(),
help='If true, mount a 50G (Standard Pricing tier) or 1G (Basic Pricing tier) disk with default path.')
for scope in ['spring-cloud app update', 'spring-cloud app deployment create', 'spring-cloud app deploy', 'spring-cloud app create']:
with self.argument_context(scope) as c:
c.argument('runtime_version', arg_type=get_enum_type(RuntimeVersion),
help='Runtime version of used language')
c.argument('jvm_options', type=str, validator=validate_jvm_options,
help="A string containing jvm options, use '=' instead of ' ' for this argument to avoid bash parse error, eg: --jvm-options='-Xms1024m -Xmx2048m'")
c.argument('env', env_type)
c.argument('disable_probe', arg_type=get_three_state_flag(), help='If true, disable the liveness and readiness probe.')
with self.argument_context('spring-cloud app scale') as c:
c.argument('cpu', arg_type=cpu_type)
c.argument('memory', arg_type=memort_type)
c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count)
for scope in ['spring-cloud app deploy', 'spring-cloud app deployment create']:
with self.argument_context(scope) as c:
c.argument(
'artifact_path', options_list=['--artifact-path',
c.deprecate(target='--jar-path', redirect='--artifact-path', hide=True),
c.deprecate(target='-p', redirect='--artifact-path', hide=True)],
help='Deploy the specified pre-built artifact (jar or netcore zip).', validator=validate_jar)
c.argument(
'disable_validation', arg_type=get_three_state_flag(),
help='If true, disable jar validation.')
c.argument('builder', help='(Enterprise Tier Only) Build service builder used to build the executable.', default='default', is_preview=True)
c.argument(
'main_entry', options_list=[
'--main-entry', '-m'], help="A string containing the path to the .NET executable relative to zip root.")
c.argument(
'target_module', help='Child module to be deployed, required for multiple jar packages built from source code.', arg_group='Source Code deploy')
c.argument(
'version', help='Deployment version, keep unchanged if not set.')
c.argument(
'container_image', help='The container image tag.', arg_group='Custom Container')
c.argument(
'container_registry', default='docker.io', help='The registry of the container image.', arg_group='Custom Container')
c.argument(
'registry_username', help='The username of the container registry.', arg_group='Custom Container')
c.argument(
'registry_password', help='The password of the container registry.', arg_group='Custom Container')
c.argument(
'container_command', help='The command of the container image.', nargs='*', arg_group='Custom Container')
c.argument(
'container_args', help='The arguments of the container image.', nargs='*', arg_group='Custom Container')
with self.argument_context('spring-cloud app deploy') as c:
c.argument('source_path', arg_type=source_path_type, validator=validate_deloy_path)
with self.argument_context('spring-cloud app deployment create') as c:
c.argument('source_path', arg_type=source_path_type, validator=validate_deloyment_create_path)
with self.argument_context('spring-cloud app deployment create') as c:
c.argument('skip_clone_settings', help='Create staging deployment will automatically copy settings from production deployment.',
action='store_true')
c.argument('cpu', arg_type=cpu_type)
c.argument('memory', arg_type=memort_type)
c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count)
with self.argument_context('spring-cloud app deployment') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=validate_app_name)
c.argument('name', name_type, help='Name of deployment.')
for scope in ['spring-cloud app deployment generate-heap-dump', 'spring-cloud app deployment generate-thread-dump']:
with self.argument_context(scope) as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('app_instance', help='Target app instance you want to dump.')
c.argument('file_path', help='The mount file path for your dump file.')
with self.argument_context('spring-cloud app deployment start-jfr') as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('app_instance', help='Target app instance you want to dump.')
c.argument('file_path', help='The mount file path for your dump file.')
c.argument('duration', type=str, default="60s", help='Duration of JFR.')
with self.argument_context('spring-cloud app binding') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=active_deployment_exist_under_app)
c.argument('name', name_type, help='Name of service binding.')
for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding mysql add', 'spring-cloud app binding redis add']:
with self.argument_context(scope) as c:
c.argument('resource_id', validator=validate_resource_id,
help='Azure resource ID of the service to bind with.')
for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding cosmos update']:
with self.argument_context(scope) as c:
c.argument(
'database_name', help='Name of database. Required for mongo, sql, gremlin')
c.argument(
'key_space', help='Cassandra key space. Required for cassandra')
c.argument('collection_name',
help='Name of collection. Required for gremlin')
with self.argument_context('spring-cloud app binding cosmos add') as c:
c.argument('api_type', help='Type of API.', arg_type=get_enum_type(
ApiType), validator=validate_cosmos_type)
for scope in ['spring-cloud app binding mysql add', 'spring-cloud app binding mysql update']:
with self.argument_context(scope) as c:
c.argument('key', help='API key of the service.')
c.argument('username', help='Username of the database')
c.argument('database_name', help='Database name')
for scope in ['spring-cloud app binding redis add', 'spring-cloud app binding redis update']:
with self.argument_context(scope) as c:
c.argument('key', help='Api key of the service.')
c.argument('disable_ssl', arg_type=get_three_state_flag(), help='If true, disable SSL. If false, enable SSL.', default=False)
with self.argument_context('spring-cloud app append-loaded-public-certificate') as c:
c.argument('certificate_name', help='Name of the certificate to be appended')
c.argument('load_trust_store', arg_type=get_three_state_flag(), help='If true, the certificate would be loaded into trust store for Java applications', default=False)
with self.argument_context('spring-cloud config-server set') as c:
c.argument('config_file',
help='A yaml file path for the configuration of Spring Cloud config server')
for scope in ['spring-cloud config-server git set', 'spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']:
with self.argument_context(scope) as c:
c.argument('uri', help='Uri of the added config.')
c.argument('label', help='Label of the added config.')
c.argument(
'search_paths', help='search_paths of the added config, use , as delimiter for multiple paths.')
c.argument('username', help='Username of the added config.')
c.argument('password', help='Password of the added config.')
c.argument('host_key', help='Host key of the added config.')
c.argument('host_key_algorithm',
help='Host key algorithm of the added config.')
c.argument('private_key', help='Private_key of the added config.')
c.argument('strict_host_key_checking',
help='Strict_host_key_checking of the added config.')
for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update', 'spring-cloud config-server git repo remove']:
with self.argument_context(scope) as c:
c.argument('repo_name', help='Name of the repo.')
for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']:
with self.argument_context(scope) as c:
c.argument(
'pattern', help='Pattern of the repo, use , as delimiter for multiple patterns')
with self.argument_context('spring-cloud test-endpoint list') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=validate_app_name)
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=validate_deployment_name)
with self.argument_context('spring-cloud storage') as c:
c.argument('service', service_name_type)
c.argument('name', help='Name of storage.')
with self.argument_context('spring-cloud storage add') as c:
c.argument('storage_type', help='The type of the torage. e.g. StorageAccount')
c.argument('account_name', help='The name of the storage account.')
c.argument('account_key', help='The account key of the storage account.')
with self.argument_context('spring-cloud storage update') as c:
c.argument('storage_type', help='The type of the torage. e.g. StorageAccount')
c.argument('account_name', help='The name of the storage account.')
c.argument('account_key', help='The account key of the storage account.')
with self.argument_context('spring-cloud certificate') as c:
c.argument('service', service_name_type)
c.argument('name', help='Name of certificate.')
with self.argument_context('spring-cloud certificate add') as c:
c.argument('vault_uri', help='The key vault uri where store the certificate')
c.argument('vault_certificate_name', help='The certificate name in key vault')
c.argument('only_public_cert', arg_type=get_three_state_flag(),
help='If true, only import public certificate part from key vault.', default=False)
c.argument('public_certificate_file', options_list=['--public-certificate-file', '-f'],
help='A file path for the public certificate to be uploaded')
with self.argument_context('spring-cloud certificate list') as c:
c.argument('certificate_type', help='Type of uploaded certificate',
arg_type=get_enum_type(['KeyVaultCertificate', 'ContentCertificate']))
with self.argument_context('spring-cloud app custom-domain') as c:
c.argument('service', service_name_type)
c.argument('app', app_name_type, help='Name of app.', validator=active_deployment_exist_under_app)
c.argument('domain_name', help='Name of custom domain.')
with self.argument_context('spring-cloud app custom-domain bind') as c:
c.argument('certificate', type=str, help='Certificate name in Azure Spring Cloud.')
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
with self.argument_context('spring-cloud app custom-domain update') as c:
c.argument('certificate', help='Certificate name in Azure Spring Cloud.')
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
with self.argument_context('spring-cloud app-insights update') as c:
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_app_insights_parameters)
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_app_insights_parameters)
c.argument('sampling_rate',
type=float,
help="Sampling Rate of application insights. Maximum is 100.",
validator=validate_app_insights_parameters)
c.argument('disable',
arg_type=get_three_state_flag(),
help="Disable Application Insights.",
validator=validate_app_insights_parameters)
for scope in ['spring-cloud service-registry']:
with self.argument_context(scope) as c:
c.argument('service', service_name_type, validator=only_support_enterprise)
with self.argument_context('spring-cloud service-registry bind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
with self.argument_context('spring-cloud service-registry unbind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
for scope in ['spring-cloud build-service builder create',
'spring-cloud build-service builder update']:
with self.argument_context(scope) as c:
c.argument('builder_json', type=str, help="The JSON array of builder.", validator=validate_builder_resource)
c.argument('builder_file', type=str, help="The file path of JSON array of builder.", validator=validate_builder_resource)
with self.argument_context('spring-cloud build-service builder create') as c:
c.argument('name', type=str, help="The builder name.", validator=validate_builder_create)
with self.argument_context('spring-cloud build-service builder update') as c:
c.argument('name', type=str, help="The builder name.", validator=validate_builder_update)
for scope in ['spring-cloud build-service builder show',
'spring-cloud build-service builder delete']:
with self.argument_context(scope) as c:
c.argument('name', type=str, help="The builder name.")
|
def load_arguments(self, _):
with self.argument_context('spring-cloud') as c:
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('name', options_list=[
'--name', '-n'], help='Name of Azure Spring Cloud.')
# A refactoring work item to move validators to command level to reduce the duplications.
# https://dev.azure.com/msazure/AzureDMSS/_workitems/edit/11002857/
with self.argument_context('spring-cloud create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=validate_location)
c.argument('sku', arg_type=sku_type, default='Standard')
c.argument('reserved_cidr_range', help='Comma-separated list of IP address ranges in CIDR format. The IP ranges are reserved to host underlying Azure Spring Cloud infrastructure, which should be 3 at least /16 unused IP ranges, must not overlap with any Subnet IP ranges.', validator=validate_vnet_required_parameters)
c.argument('vnet', help='The name or ID of an existing Virtual Network into which to deploy the Spring Cloud instance.', validator=validate_vnet_required_parameters)
c.argument('app_subnet', help='The name or ID of an existing subnet in "vnet" into which to deploy the Spring Cloud app. Required when deploying into a Virtual Network. Smaller subnet sizes are supported, please refer: https://aka.ms/azure-spring-cloud-smaller-subnet-vnet-docs', validator=validate_vnet_required_parameters)
c.argument('service_runtime_subnet', options_list=['--service-runtime-subnet', '--svc-subnet'], help='The name or ID of an existing subnet in "vnet" into which to deploy the Spring Cloud service runtime. Required when deploying into a Virtual Network.', validator=validate_vnet)
c.argument('service_runtime_network_resource_group', options_list=['--service-runtime-network-resource-group', '--svc-nrg'], help='The resource group where all network resources for Azure Spring Cloud service runtime will be created in.', validator=validate_node_resource_group)
c.argument('app_network_resource_group', options_list=['--app-network-resource-group', '--app-nrg'], help='The resource group where all network resources for apps will be created in.', validator=validate_node_resource_group)
c.argument('enable_java_agent',
arg_type=get_three_state_flag(),
help="Java in process agent is now GA-ed and used by default when Application Insights enabled. "
"This parameter is no longer needed and will be removed in future release.",
validator=validate_java_agent_parameters,
deprecate_info=c.deprecate(target='--enable-java-agent', hide=True))
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_tracing_parameters_asc_create)
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_tracing_parameters_asc_create)
c.argument('sampling_rate',
type=float,
help="Sampling Rate of application insights. Minimum is 0, maximum is 100.",
validator=validate_tracing_parameters_asc_create)
c.argument('disable_app_insights',
arg_type=get_three_state_flag(),
help="Disable Application Insights, "
"if not disabled and no existing Application Insights specified with "
"--app-insights-key or --app-insights, "
"will create a new Application Insights instance in the same resource group.",
validator=validate_tracing_parameters_asc_create)
c.argument('zone_redundant',
arg_type=get_three_state_flag(),
help="Create your Azure Spring Cloud service in an Azure availability zone or not, "
"this could only be supported in several regions at the moment ",
default=False, is_preview=True)
c.argument('build_pool_size',
arg_type=get_enum_type(['S1', 'S2', 'S3', 'S4', 'S5']),
validator=validate_build_pool_size,
default='S1',
is_preview=True,
help='(Enterprise Tier Only) Size of build agent pool. See Azure Spring Cloud Doc for size info.')
with self.argument_context('spring-cloud update') as c:
c.argument('sku', arg_type=sku_type)
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --app-insights-key',
redirect='az spring-cloud app-insights update --app-insights-key',
hide=True))
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --app-insights',
redirect='az spring-cloud app-insights update --app-insights',
hide=True))
c.argument('disable_app_insights',
arg_type=get_three_state_flag(),
help="Disable Application Insights, "
"if not disabled and no existing Application Insights specified with "
"--app-insights-key or --app-insights, "
"will create a new Application Insights instance in the same resource group.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --disable-app-insights',
redirect='az spring-cloud app-insights update --disable',
hide=True))
c.argument('build_pool_size',
arg_type=get_enum_type(['S1', 'S2', 'S3', 'S4', 'S5']),
is_preview=True,
help='Only support in enterprise tier now. Size of build agent pool. See Azure Spring Cloud Doc for size info.')
for scope in ['spring-cloud create', 'spring-cloud update']:
with self.argument_context(scope) as c:
c.argument('tags', arg_type=tags_type)
with self.argument_context('spring-cloud test-endpoint renew-key') as c:
c.argument('type', type=str, arg_type=get_enum_type(
TestKeyType), help='Type of test-endpoint key')
with self.argument_context('spring-cloud app') as c:
c.argument('service', service_name_type)
c.argument('name', name_type, help='Name of app.')
with self.argument_context('spring-cloud app create') as c:
c.argument('assign_endpoint', arg_type=get_three_state_flag(),
help='If true, assign endpoint URL for direct access.', default=False,
options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)])
c.argument('assign_identity', arg_type=get_three_state_flag(),
help='If true, assign managed service identity.')
c.argument('cpu', arg_type=cpu_type, default="1")
c.argument('memory', arg_type=memort_type, default="1Gi")
c.argument('instance_count', type=int,
default=1, help='Number of instance.', validator=validate_instance_count)
c.argument('persistent_storage', type=str,
help='A json file path for the persistent storages to be mounted to the app')
c.argument('loaded_public_certificate_file', options_list=['--loaded-public-certificate-file', '-f'], type=str,
help='A json file path indicates the certificates which would be loaded to app')
with self.argument_context('spring-cloud app update') as c:
c.argument('assign_endpoint', arg_type=get_three_state_flag(),
help='If true, assign endpoint URL for direct access.',
options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)])
c.argument('https_only', arg_type=get_three_state_flag(), help='If true, access app via https', default=False)
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
c.argument('persistent_storage', type=str,
help='A json file path for the persistent storages to be mounted to the app')
c.argument('loaded_public_certificate_file', type=str, options_list=['--loaded-public-certificate-file', '-f'],
help='A json file path indicates the certificates which would be loaded to app')
with self.argument_context('spring-cloud app append-persistent-storage') as c:
c.argument('storage_name', type=str,
help='Name of the storage resource you created in Azure Spring Cloud.')
c.argument('persistent_storage_type', options_list=['--persistent-storage-type', '-t'], type=str, help='Type of the persistent storage volumed.')
c.argument('share_name', type=str,
help="The name of the pre-created file share. "
"ShareName should be provided only if the type of the persistent storage volume is AzureFileVolume.")
c.argument('mount_path', type=str, help='The path for the persistent storage volume to be mounted.')
c.argument('mount_options', nargs='+', help='[optional] The mount options for the persistent storage volume.', default=None)
c.argument('read_only', arg_type=get_three_state_flag(), help='[optional] If true, the persistent storage volume will be read only.', default=False)
for scope in ['spring-cloud app update', 'spring-cloud app start', 'spring-cloud app stop', 'spring-cloud app restart', 'spring-cloud app deploy', 'spring-cloud app scale', 'spring-cloud app set-deployment', 'spring-cloud app show-deploy-log']:
with self.argument_context(scope) as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('main_entry', options_list=[
'--main-entry', '-m'], help="The path to the .NET executable relative to zip root.")
for scope in ['spring-cloud app identity', 'spring-cloud app unset-deployment']:
with self.argument_context(scope) as c:
c.argument('name', name_type, help='Name of app.', validator=active_deployment_exist)
with self.argument_context('spring-cloud app identity assign') as c:
c.argument('scope', help="The scope the managed identity has access to")
c.argument('role', help="Role name or id the managed identity will be assigned")
def prepare_logs_argument(c):
'''`app log tail` is deprecated. `app logs` is the new choice. They share the same command processor.'''
c.argument('instance', options_list=['--instance', '-i'], help='Name of an existing instance of the deployment.')
c.argument('lines', type=int, help='Number of lines to show. Maximum is 10000', validator=validate_log_lines)
c.argument('follow', options_list=['--follow ', '-f'], help='Specify if the logs should be streamed.', action='store_true')
c.argument('since', help='Only return logs newer than a relative duration like 5s, 2m, or 1h. Maximum is 1h', validator=validate_log_since)
c.argument('limit', type=int, help='Maximum kilobytes of logs to return. Ceiling number is 2048.', validator=validate_log_limit)
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('format_json', nargs='?', const='{timestamp} {level:>5} [{thread:>15.15}] {logger{39}:<40.40}: {message}\n{stackTrace}',
help='Format JSON logs if structured log is enabled')
with self.argument_context('spring-cloud app logs') as c:
prepare_logs_argument(c)
with self.argument_context('spring-cloud app log tail') as c:
prepare_logs_argument(c)
with self.argument_context('spring-cloud app set-deployment') as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app.', validator=ensure_not_active_deployment)
for scope in ['spring-cloud app create', 'spring-cloud app update']:
with self.argument_context(scope) as c:
c.argument('enable_persistent_storage', arg_type=get_three_state_flag(),
help='If true, mount a 50G (Standard Pricing tier) or 1G (Basic Pricing tier) disk with default path.')
for scope in ['spring-cloud app update', 'spring-cloud app deployment create', 'spring-cloud app deploy', 'spring-cloud app create']:
with self.argument_context(scope) as c:
c.argument('runtime_version', arg_type=get_enum_type(RuntimeVersion),
help='Runtime version of used language')
c.argument('jvm_options', type=str, validator=validate_jvm_options,
help="A string containing jvm options, use '=' instead of ' ' for this argument to avoid bash parse error, eg: --jvm-options='-Xms1024m -Xmx2048m'")
c.argument('env', env_type)
c.argument('disable_probe', arg_type=get_three_state_flag(), help='If true, disable the liveness and readiness probe.')
with self.argument_context('spring-cloud app scale') as c:
c.argument('cpu', arg_type=cpu_type)
c.argument('memory', arg_type=memort_type)
c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count)
for scope in ['spring-cloud app deploy', 'spring-cloud app deployment create']:
with self.argument_context(scope) as c:
c.argument(
'artifact_path', options_list=['--artifact-path',
c.deprecate(target='--jar-path', redirect='--artifact-path', hide=True),
c.deprecate(target='-p', redirect='--artifact-path', hide=True)],
help='Deploy the specified pre-built artifact (jar or netcore zip).', validator=validate_jar)
c.argument(
'disable_validation', arg_type=get_three_state_flag(),
help='If true, disable jar validation.')
c.argument('builder', help='(Enterprise Tier Only) Build service builder used to build the executable.', default='default', is_preview=True)
c.argument(
'main_entry', options_list=[
'--main-entry', '-m'], help="A string containing the path to the .NET executable relative to zip root.")
c.argument(
'target_module', help='Child module to be deployed, required for multiple jar packages built from source code.', arg_group='Source Code deploy')
c.argument(
'version', help='Deployment version, keep unchanged if not set.')
c.argument(
'container_image', help='The container image tag.', arg_group='Custom Container')
c.argument(
'container_registry', default='docker.io', help='The registry of the container image.', arg_group='Custom Container')
c.argument(
'registry_username', help='The username of the container registry.', arg_group='Custom Container')
c.argument(
'registry_password', help='The password of the container registry.', arg_group='Custom Container')
c.argument(
'container_command', help='The command of the container image.', nargs='*', arg_group='Custom Container')
c.argument(
'container_args', help='The arguments of the container image.', nargs='*', arg_group='Custom Container')
with self.argument_context('spring-cloud app deploy') as c:
c.argument('source_path', arg_type=source_path_type, validator=validate_deloy_path)
with self.argument_context('spring-cloud app deployment create') as c:
c.argument('source_path', arg_type=source_path_type, validator=validate_deloyment_create_path)
with self.argument_context('spring-cloud app deployment create') as c:
c.argument('skip_clone_settings', help='Create staging deployment will automatically copy settings from production deployment.',
action='store_true')
c.argument('cpu', arg_type=cpu_type)
c.argument('memory', arg_type=memort_type)
c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count)
with self.argument_context('spring-cloud app deployment') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=validate_app_name)
c.argument('name', name_type, help='Name of deployment.')
for scope in ['spring-cloud app deployment generate-heap-dump', 'spring-cloud app deployment generate-thread-dump']:
with self.argument_context(scope) as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('app_instance', help='Target app instance you want to dump.')
c.argument('file_path', help='The mount file path for your dump file.')
with self.argument_context('spring-cloud app deployment start-jfr') as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('app_instance', help='Target app instance you want to dump.')
c.argument('file_path', help='The mount file path for your dump file.')
c.argument('duration', type=str, default="60s", help='Duration of JFR.')
with self.argument_context('spring-cloud app binding') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=active_deployment_exist_under_app)
c.argument('name', name_type, help='Name of service binding.')
for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding mysql add', 'spring-cloud app binding redis add']:
with self.argument_context(scope) as c:
c.argument('resource_id', validator=validate_resource_id,
help='Azure resource ID of the service to bind with.')
for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding cosmos update']:
with self.argument_context(scope) as c:
c.argument(
'database_name', help='Name of database. Required for mongo, sql, gremlin')
c.argument(
'key_space', help='Cassandra key space. Required for cassandra')
c.argument('collection_name',
help='Name of collection. Required for gremlin')
with self.argument_context('spring-cloud app binding cosmos add') as c:
c.argument('api_type', help='Type of API.', arg_type=get_enum_type(
ApiType), validator=validate_cosmos_type)
for scope in ['spring-cloud app binding mysql add', 'spring-cloud app binding mysql update']:
with self.argument_context(scope) as c:
c.argument('key', help='API key of the service.')
c.argument('username', help='Username of the database')
c.argument('database_name', help='Database name')
for scope in ['spring-cloud app binding redis add', 'spring-cloud app binding redis update']:
with self.argument_context(scope) as c:
c.argument('key', help='Api key of the service.')
c.argument('disable_ssl', arg_type=get_three_state_flag(), help='If true, disable SSL. If false, enable SSL.', default=False)
with self.argument_context('spring-cloud app append-loaded-public-certificate') as c:
c.argument('certificate_name', help='Name of the certificate to be appended')
c.argument('load_trust_store', arg_type=get_three_state_flag(), help='If true, the certificate would be loaded into trust store for Java applications', default=False)
with self.argument_context('spring-cloud config-server set') as c:
c.argument('config_file',
help='A yaml file path for the configuration of Spring Cloud config server')
for scope in ['spring-cloud config-server git set', 'spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']:
with self.argument_context(scope) as c:
c.argument('uri', help='Uri of the added config.')
c.argument('label', help='Label of the added config.')
c.argument(
'search_paths', help='search_paths of the added config, use , as delimiter for multiple paths.')
c.argument('username', help='Username of the added config.')
c.argument('password', help='Password of the added config.')
c.argument('host_key', help='Host key of the added config.')
c.argument('host_key_algorithm',
help='Host key algorithm of the added config.')
c.argument('private_key', help='Private_key of the added config.')
c.argument('strict_host_key_checking',
help='Strict_host_key_checking of the added config.')
for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update', 'spring-cloud config-server git repo remove']:
with self.argument_context(scope) as c:
c.argument('repo_name', help='Name of the repo.')
for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']:
with self.argument_context(scope) as c:
c.argument(
'pattern', help='Pattern of the repo, use , as delimiter for multiple patterns')
with self.argument_context('spring-cloud test-endpoint list') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=validate_app_name)
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=validate_deployment_name)
with self.argument_context('spring-cloud storage') as c:
c.argument('service', service_name_type)
c.argument('name', help='Name of storage.')
with self.argument_context('spring-cloud storage add') as c:
c.argument('storage_type', help='The type of the torage. e.g. StorageAccount')
c.argument('account_name', help='The name of the storage account.')
c.argument('account_key', help='The account key of the storage account.')
with self.argument_context('spring-cloud storage update') as c:
c.argument('storage_type', help='The type of the torage. e.g. StorageAccount')
c.argument('account_name', help='The name of the storage account.')
c.argument('account_key', help='The account key of the storage account.')
with self.argument_context('spring-cloud certificate') as c:
c.argument('service', service_name_type)
c.argument('name', help='Name of certificate.')
with self.argument_context('spring-cloud certificate add') as c:
c.argument('vault_uri', help='The key vault uri where store the certificate')
c.argument('vault_certificate_name', help='The certificate name in key vault')
c.argument('only_public_cert', arg_type=get_three_state_flag(),
help='If true, only import public certificate part from key vault.', default=False)
c.argument('public_certificate_file', options_list=['--public-certificate-file', '-f'],
help='A file path for the public certificate to be uploaded')
with self.argument_context('spring-cloud certificate list') as c:
c.argument('certificate_type', help='Type of uploaded certificate',
arg_type=get_enum_type(['KeyVaultCertificate', 'ContentCertificate']))
with self.argument_context('spring-cloud app custom-domain') as c:
c.argument('service', service_name_type)
c.argument('app', app_name_type, help='Name of app.', validator=active_deployment_exist_under_app)
c.argument('domain_name', help='Name of custom domain.')
with self.argument_context('spring-cloud app custom-domain bind') as c:
c.argument('certificate', type=str, help='Certificate name in Azure Spring Cloud.')
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
with self.argument_context('spring-cloud app custom-domain update') as c:
c.argument('certificate', help='Certificate name in Azure Spring Cloud.')
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
with self.argument_context('spring-cloud app-insights update') as c:
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_app_insights_parameters)
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_app_insights_parameters)
c.argument('sampling_rate',
type=float,
help="Sampling Rate of application insights. Maximum is 100.",
validator=validate_app_insights_parameters)
c.argument('disable',
arg_type=get_three_state_flag(),
help="Disable Application Insights.",
validator=validate_app_insights_parameters)
for scope in ['spring-cloud service-registry']:
with self.argument_context(scope) as c:
c.argument('service', service_name_type, validator=only_support_enterprise)
with self.argument_context('spring-cloud service-registry bind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
with self.argument_context('spring-cloud service-registry unbind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
for scope in ['spring-cloud build-service builder create',
'spring-cloud build-service builder update']:
with self.argument_context(scope) as c:
c.argument('builder_json', type=str, help="The JSON array of builder.", validator=validate_builder_resource)
c.argument('builder_file', type=str, help="The file path of JSON array of builder.", validator=validate_builder_resource)
with self.argument_context('spring-cloud build-service builder create') as c:
c.argument('name', type=str, help="The builder name.", validator=validate_builder_create)
with self.argument_context('spring-cloud build-service builder update') as c:
c.argument('name', type=str, help="The builder name.", validator=validate_builder_update)
for scope in ['spring-cloud build-service builder show',
'spring-cloud build-service builder delete']:
with self.argument_context(scope) as c:
c.argument('name', type=str, help="The builder name.")
|
30,296 |
def parse_base64(text):
if re.match("^=?.*?=$", text):
res = re.search('=\?.*?\?[A-Z]{1}\?(.*?)\?=', text, re.IGNORECASE)
if res:
res = res.group(1)
return base64.b64decode(res) # type: ignore
return text
|
def parse_base64(text):
if re.match("^=?.*?=$", text):
res = re.search('=\?.*?\?[A-Z]\?(.*?)\?=', text, re.IGNORECASE)
if res:
res = res.group(1)
return base64.b64decode(res) # type: ignore
return text
|
2,834 |
def rbf_kernel(X, Y=None, gamma=None):
"""Compute the rbf (gaussian) kernel between X and Y.
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
Slope. If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
The RBF kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
|
def rbf_kernel(X, Y=None, gamma=None):
"""Compute the rbf (gaussian) kernel between X and Y.
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
The RBF kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
|
42,001 |
def _find_coordinates_where_empty(
zmap: Dict[complex, Union[int, float]], contour_point_num: int
) -> List[complex]:
# this function implements missing value discovery and sorting
# algorithm used in Plotly to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/find_empties.js
# it works by repeteadly interating over coordinate map in search for patches of
# missing values with existing or previously discovered neighbors
# when discovered, such patches are added to the iteration queue (list of coordinates)
# sorted by number of neighbors, marking iteration order for interpolation algorithm
# search ends when all missing patches have been discovered
# it's like playing minesweeper in reverse
iter_queue: List[complex] = []
zcopy = zmap.copy()
discovered = 0
n_missing = (contour_point_num ** 2) - len(zmap)
coordinates = [
complex(xaxis, yaxis)
for yaxis in range(contour_point_num)
for xaxis in range(contour_point_num)
]
while discovered != n_missing:
patchmap: Dict[complex, Union[int, float]] = {}
for coord in coordinates:
value = zcopy.get(coord, None)
if value is not None:
# trial value or already discovered
continue
n_neighbors = 0
for offset in NEIGHBOR_OFFSETS:
neighbor = zcopy.get(coord + offset, None)
if neighbor is not None:
n_neighbors += 1
if n_neighbors > 0:
patchmap[coord] = n_neighbors
zcopy.update(patchmap)
patch = [k for k, _ in sorted(patchmap.items(), key=lambda i: i[1], reverse=True)]
iter_queue.extend(patch)
discovered += len(patch)
return iter_queue
|
def _find_coordinates_where_empty(
zmap: Dict[complex, float], contour_point_num: int
) -> List[complex]:
# this function implements missing value discovery and sorting
# algorithm used in Plotly to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/find_empties.js
# it works by repeteadly interating over coordinate map in search for patches of
# missing values with existing or previously discovered neighbors
# when discovered, such patches are added to the iteration queue (list of coordinates)
# sorted by number of neighbors, marking iteration order for interpolation algorithm
# search ends when all missing patches have been discovered
# it's like playing minesweeper in reverse
iter_queue: List[complex] = []
zcopy = zmap.copy()
discovered = 0
n_missing = (contour_point_num ** 2) - len(zmap)
coordinates = [
complex(xaxis, yaxis)
for yaxis in range(contour_point_num)
for xaxis in range(contour_point_num)
]
while discovered != n_missing:
patchmap: Dict[complex, Union[int, float]] = {}
for coord in coordinates:
value = zcopy.get(coord, None)
if value is not None:
# trial value or already discovered
continue
n_neighbors = 0
for offset in NEIGHBOR_OFFSETS:
neighbor = zcopy.get(coord + offset, None)
if neighbor is not None:
n_neighbors += 1
if n_neighbors > 0:
patchmap[coord] = n_neighbors
zcopy.update(patchmap)
patch = [k for k, _ in sorted(patchmap.items(), key=lambda i: i[1], reverse=True)]
iter_queue.extend(patch)
discovered += len(patch)
return iter_queue
|
31,122 |
def main():
# If an arg supplying an orgId is provided, will override the one found in params
args = {**demisto.params(), **demisto.args()}
base_url = 'https://ja3er.com/'
api_key = base64.b64encode(f'{demisto.getParam("apiKey")}:{demisto.getParam("apiSecret")}'.encode("ascii"))
verify = args.get('insecure')
client = Client(
base_url,
verify=verify
)
commands = {
'test-module': test_module,
'ja3-search': ja3_search
}
command = demisto.command()
if command in commands:
return_results(commands[command](client, **args))
else:
return_error(f'Command {command} is not available in this integration')
|
def main():
# If an arg supplying an orgId is provided, will override the one found in params
args = {**demisto.params(), **demisto.args()}
base_url = 'https://ja3er.com/'
api_key = base64.b64encode(f'{demisto.getParam("apiKey")}:{demisto.getParam("apiSecret")}'.encode("ascii"))
verify = not args.get('insecure', False)
client = Client(
base_url,
verify=verify
)
commands = {
'test-module': test_module,
'ja3-search': ja3_search
}
command = demisto.command()
if command in commands:
return_results(commands[command](client, **args))
else:
return_error(f'Command {command} is not available in this integration')
|
56,601 |
def plot_pair(
data,
group="posterior",
var_names: Optional[List[str]] = None,
filter_vars: Optional[str] = None,
coords=None,
marginals=False,
figsize=None,
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
contour: Optional[bool] = None,
plot_kwargs=None,
fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
ax=None,
divergences_kwargs=None,
scatter_kwargs=None,
kde_kwargs=None,
hexbin_kwargs=None,
backend=None,
backend_kwargs=None,
marginal_kwargs=None,
point_estimate=None,
point_estimate_kwargs=None,
point_estimate_marker_kwargs=None,
reference_values=None,
reference_values_kwargs=None,
show=None,
):
"""
Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Parameters
----------
data: obj
Any object that can be converted to an :class:`az.InferenceData` object
refer to documentation of :func:`az.convert_to_dataset` for details
group: str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
marginals: bool, optional
If True pairplot will include marginal distributions for every variable
figsize: figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str or List[str]
Type of plot to display (scatter, kde and/or hexbin)
gridsize: int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool, optional, deprecated, Defaults to True.
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
**Note:** this default is implemented in the body of the code, not in argument processing.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar: bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs: dicts, optional
Additional keywords passed to ``ax.scatter`` for divergences
scatter_kwargs:
Additional keywords passed to ``ax.plot`` when using scatter kind
kde_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_kde` when using kde kind
hexbin_kwargs: dict, optional
Additional keywords passed to ``ax.hexbin`` when using hexbin kind
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
marginal_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions
plotted in the diagonal.
point_estimate: str, optional
Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
plotted using a scatter marker and vertical/horizontal lines.
point_estimate_kwargs: dict, optional
Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh)
point_estimate_marker_kwargs: dict, optional
Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
reference_values: dict, optional
Reference values for the plotted variables. The Reference values will be plotted
using a scatter marker
reference_values_kwargs: dict, optional
Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['^t', 'mu'],
... filter_vars="regex",
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
kind_boolean: Union[bool, List[bool]]
if isinstance(kind, str):
kind_boolean = kind in valid_kinds
else:
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
if fill_last or contour:
warnings.warn(
"fill_last and contour will be deprecated. Please use kde_kwargs",
UserWarning,
)
if plot_kwargs:
warnings.warn(
"plot_kwargs will be deprecated."
" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
UserWarning,
)
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
plotters = list(
xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
flat_var_names = [
labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
]
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
UserWarning,
)
if gridsize == "auto":
gridsize = int(dataset.dims["draw"] ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
plotters=plotters,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
scatter_kwargs=scatter_kwargs,
kde_kwargs=kde_kwargs,
hexbin_kwargs=hexbin_kwargs,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
marginal_kwargs=marginal_kwargs,
show=show,
marginals=marginals,
point_estimate=point_estimate,
point_estimate_kwargs=point_estimate_kwargs,
point_estimate_marker_kwargs=point_estimate_marker_kwargs,
reference_values=reference_values,
reference_values_kwargs=reference_values_kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
|
def plot_pair(
data,
group="posterior",
var_names: Optional[List[str]] = None,
filter_vars: Optional[str] = None,
coords=None,
marginals=False,
figsize=None,
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
contour: Optional[bool] = None,
plot_kwargs=None,
fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
ax=None,
divergences_kwargs=None,
scatter_kwargs=None,
kde_kwargs=None,
hexbin_kwargs=None,
backend=None,
backend_kwargs=None,
marginal_kwargs=None,
point_estimate=None,
point_estimate_kwargs=None,
point_estimate_marker_kwargs=None,
reference_values=None,
reference_values_kwargs=None,
show=None,
):
"""
Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Parameters
----------
data: obj
Any object that can be converted to an :class:`az.InferenceData` object
refer to documentation of :func:`az.convert_to_dataset` for details
group: str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
marginals: bool, optional
If True pairplot will include marginal distributions for every variable
figsize: figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str or List[str]
Type of plot to display (scatter, kde and/or hexbin)
gridsize: int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool, optional, deprecated, Defaults to True.
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
**Note:** this default is implemented in the body of the code, not in argument processing.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar: bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs: dicts, optional
Additional keywords passed to ``ax.scatter`` for divergences
scatter_kwargs:
Additional keywords passed to ``ax.plot`` when using scatter kind
kde_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_kde` when using kde kind
hexbin_kwargs: dict, optional
Additional keywords passed to ``ax.hexbin`` when using hexbin kind
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`.
check the plotting method of the backend.
marginal_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions
plotted in the diagonal.
point_estimate: str, optional
Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
plotted using a scatter marker and vertical/horizontal lines.
point_estimate_kwargs: dict, optional
Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh)
point_estimate_marker_kwargs: dict, optional
Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
reference_values: dict, optional
Reference values for the plotted variables. The Reference values will be plotted
using a scatter marker
reference_values_kwargs: dict, optional
Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['^t', 'mu'],
... filter_vars="regex",
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
kind_boolean: Union[bool, List[bool]]
if isinstance(kind, str):
kind_boolean = kind in valid_kinds
else:
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
if fill_last or contour:
warnings.warn(
"fill_last and contour will be deprecated. Please use kde_kwargs",
UserWarning,
)
if plot_kwargs:
warnings.warn(
"plot_kwargs will be deprecated."
" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
UserWarning,
)
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
plotters = list(
xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
flat_var_names = [
labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
]
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
UserWarning,
)
if gridsize == "auto":
gridsize = int(dataset.dims["draw"] ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
plotters=plotters,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
scatter_kwargs=scatter_kwargs,
kde_kwargs=kde_kwargs,
hexbin_kwargs=hexbin_kwargs,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
marginal_kwargs=marginal_kwargs,
show=show,
marginals=marginals,
point_estimate=point_estimate,
point_estimate_kwargs=point_estimate_kwargs,
point_estimate_marker_kwargs=point_estimate_marker_kwargs,
reference_values=reference_values,
reference_values_kwargs=reference_values_kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
|
44,162 |
def get_jax_interface_name(tapes):
"""Check all parameters in each tape and output the name of the suitable
JAX interface.
This function checks each tape and determines if any of the gate parameters
was transformed by a JAX transform such as ``jax.jit``. If so, it outputs
the name of the JAX interface with jit support.
Note that determining if jit support should be turned on is done by
checking if parameters are abstract. Parameters can be abstract not just
for ``jax.jit``, but for other JAX transforms (vmap, pmap, etc.) too. The
reason is that JAX doesn't have a public API for checking whether or not
the execution is within the jit transform.
Args:
tapes (Sequence[.QuantumTape]): batch of tapes to execute
Returns:
str: name of JAX interface that fits the tape parameters, "jax" or
"jax-jit"
"""
for t in tapes:
for op in t:
# Unwrap the observable from a MeasurementProcess
op = op.obs if hasattr(op, "obs") else op
if op is not None:
# Some MeasurementProcess objects have obs=None
for param in op.data:
if qml.math.is_abstract(param):
return "jax-jit"
return "jax"
|
def get_jax_interface_name(tapes):
"""Check all parameters in each tape and output the name of the suitable
JAX interface.
This function checks each tape and determines if any of the gate parameters
was transformed by a JAX transform such as ``jax.jit``. If so, it outputs
the name of the JAX interface with jit support.
Note that determining if jit support should be turned on is done by
checking if parameters are abstract. Parameters can be abstract not just
for ``jax.jit``, but for other JAX transforms (vmap, pmap, etc.) too. The
reason is that JAX doesn't have a public API for checking whether or not
the execution is within the jit transform.
Args:
tapes (Sequence[.QuantumTape]): batch of tapes to execute
Returns:
str: name of JAX interface that fits the tape parameters, "jax" or
"jax-jit"
"""
for t in tapes:
for op in t:
# Unwrap the observable from a MeasurementProcess
op = op.obs if hasattr(op, "obs") else op
if op is not None:
# Some MeasurementProcess objects have op.obs=None
for param in op.data:
if qml.math.is_abstract(param):
return "jax-jit"
return "jax"
|
12,363 |
def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
"""Run dhclient on the interface without scripts or filesystem artifacts.
@param dhclient_cmd_path: Full path to the dhclient used.
@param interface: Name of the network inteface on which to dhclient.
@param cleandir: The directory from which to run dhclient as well as store
dhcp leases.
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
"""
LOG.debug('Performing a dhcp discovery on %s', interface)
# XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
# app armor profiles which disallow running dhclient -sf <our-script-file>.
# We want to avoid running /sbin/dhclient-script because of side-effects in
# /etc/resolv.conf any any other vendor specific scripts in
# /etc/dhcp/dhclient*hooks.d.
sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient')
util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
pid_file = os.path.join(cleandir, 'dhclient.pid')
lease_file = os.path.join(cleandir, 'dhcp.leases')
# ISC dhclient needs the interface up to send initial discovery packets.
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
'-pf', pid_file, interface, '-sf', '/bin/true']
pout = util.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
# named by the pid file to daemonize (have pid 1 as its parent). If we
# try to read the lease file before daemonization happens, we might try
# to read it before the dhclient has actually written it. We also have
# to wait until the dhclient has become a daemon so we can be sure to
# kill the correct process, thus freeing cleandir to be deleted back
# up the callstack.
missing = util.wait_for_files(
[pid_file, lease_file], maxwait=5, naplen=0.01)
if missing:
LOG.warning("dhclient did not produce expected files: %s",
', '.join(os.path.basename(f) for f in missing))
return []
ppid = 'unknown'
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
try:
pid = int(pid_content)
except ValueError:
pass
else:
ppid = util.get_proc_ppid(pid)
if ppid == 1:
if len(pout) == 2:
LOG.debug('dhclient error stream: %s', pout[1])
LOG.debug('killing dhclient with pid=%s', pid)
os.kill(pid, signal.SIGKILL)
return parse_dhcp_lease_file(lease_file)
time.sleep(0.01)
LOG.error(
'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
pid_content, ppid, 0.01 * 1000
)
return parse_dhcp_lease_file(lease_file)
|
def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
"""Run dhclient on the interface without scripts or filesystem artifacts.
@param dhclient_cmd_path: Full path to the dhclient used.
@param interface: Name of the network inteface on which to dhclient.
@param cleandir: The directory from which to run dhclient as well as store
dhcp leases.
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
"""
LOG.debug('Performing a dhcp discovery on %s', interface)
# XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
# app armor profiles which disallow running dhclient -sf <our-script-file>.
# We want to avoid running /sbin/dhclient-script because of side-effects in
# /etc/resolv.conf any any other vendor specific scripts in
# /etc/dhcp/dhclient*hooks.d.
sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient')
util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
pid_file = os.path.join(cleandir, 'dhclient.pid')
lease_file = os.path.join(cleandir, 'dhcp.leases')
# ISC dhclient needs the interface up to send initial discovery packets.
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
'-pf', pid_file, interface, '-sf', '/bin/true']
pout = util.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
# named by the pid file to daemonize (have pid 1 as its parent). If we
# try to read the lease file before daemonization happens, we might try
# to read it before the dhclient has actually written it. We also have
# to wait until the dhclient has become a daemon so we can be sure to
# kill the correct process, thus freeing cleandir to be deleted back
# up the callstack.
missing = util.wait_for_files(
[pid_file, lease_file], maxwait=5, naplen=0.01)
if missing:
LOG.warning("dhclient did not produce expected files: %s",
', '.join(os.path.basename(f) for f in missing))
return []
ppid = 'unknown'
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
try:
pid = int(pid_content)
except ValueError:
pass
else:
ppid = util.get_proc_ppid(pid)
if ppid == 1:
LOG.debug('dhclient error stream: %s', stderr)
LOG.debug('killing dhclient with pid=%s', pid)
os.kill(pid, signal.SIGKILL)
return parse_dhcp_lease_file(lease_file)
time.sleep(0.01)
LOG.error(
'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
pid_content, ppid, 0.01 * 1000
)
return parse_dhcp_lease_file(lease_file)
|
5,705 |
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles.
If given and `fit` is True, plots the least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
if fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit:
# perform a linear least squares fit.
slope, intercept, r, prob, _ = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo')
if fit:
plot.plot(osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
|
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles.
If given and `fit` is True, also plots the least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
if fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit:
# perform a linear least squares fit.
slope, intercept, r, prob, _ = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo')
if fit:
plot.plot(osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
|
21,922 |
def set_time_passes(enable):
"""Enable or disable pass timers.
Parameters
----------
enable : bool
Set to True to enable pass timers.
Set to False to disable pass timers.
"""
ffi.lib.LLVMPY_SetTimePasses(c_int(enable))
|
def set_time_passes(enable):
"""Enable or disable the pass timers.
Parameters
----------
enable : bool
Set to True to enable pass timers.
Set to False to disable pass timers.
"""
ffi.lib.LLVMPY_SetTimePasses(c_int(enable))
|
3,888 |
def lazy_importorskip(modname, minversion=None, reason=None):
module = pytest.importorskip(modname, minversion, reason)
if isinstance(module, DelayedImportErrorModule):
if reason is None:
reason = "Could not import {modname!r}. Lazy import delayed reporting."
raise pytest.skip(reason, allow_module_level=True)
return module
|
def lazy_importerskip(modname, minversion=None, reason=None):
module = pytest.importorskip(modname, minversion, reason)
if isinstance(module, DelayedImportErrorModule):
if reason is None:
reason = "Could not import {modname!r}. Lazy import delayed reporting."
raise pytest.skip(reason, allow_module_level=True)
return module
|
5,459 |
def init(opts):
"""
Opens the connection with the network device.
"""
try:
napalm_device = salt.utils.napalm.get_device(opts)
__context__["napalm_device"] = {
"opts": opts,
"id": opts["id"],
"network_device": salt.utils.napalm.get_device(opts),
"details": {"initialized": True},
}
return True
except Exception as error: # pylint: disable=broad-except
log.warn("Unable to run init.")
return False
|
def init(opts):
"""
Opens the connection with the network device.
"""
try:
napalm_device = salt.utils.napalm.get_device(opts)
__context__["napalm_device"] = {
"opts": opts,
"id": opts["id"],
"network_device": salt.utils.napalm.get_device(opts),
"details": {"initialized": True},
}
return True
except Exception as error: # pylint: disable=broad-except
log.warn("Unable to run init: %s", error)
return False
|
11,775 |
def as_bunch(dct):
"""Custom decoder to deserialize Ginga's Bunch.Bunch class.
Usage
-----
json.loads(buf, )
"""
if '__bunch__' in dct:
d = dct.copy()
del d['__bunch__']
return Bunch.Bunch(d)
return dct
|
def as_bunch(dct):
"""Custom decoder to deserialize Ginga's Bunch.Bunch class.
Usage
-----
json.loads(buf, )
"""
if '__bunch__' in dct:
d = dct.copy()
del d['__bunch__']
return Bunch.Bunch(d)
return dct
|
13,909 |
def _gather_coverage_from_line(
state: _ParserState,
line: _Line,
*,
coverage: FileCoverage,
line_is_excluded: Callable[[int], bool],
branch_is_excluded: Callable[[int], bool],
context: _Context,
) -> _ParserState:
"""
Interpret a Line, updating the FileCoverage, and transitioning ParserState.
The function handles all possible Line variants, and dies otherwise:
>>> _gather_coverage_from_line(_ParserState(), "illegal line type",
... coverage=..., line_is_excluded=..., branch_is_excluded=..., context=...)
Traceback (most recent call last):
AssertionError: Unexpected variant: 'illegal line type'
"""
# pylint: disable=too-many-return-statements,too-many-branches
# pylint: disable=no-else-return # make life easier for type checkers
if isinstance(line, _SourceLine):
lineno = line.lineno
is_excluded = line_is_excluded(lineno)
noncode, count = _line_noncode_and_count(
line,
flags=context.flags,
is_excluded=is_excluded,
is_function=bool(state.deferred_functions),
)
# FIXME this can't yet use the merge() functions
# due to inconsistency in handling of the noncode flag
if noncode:
get_or_create_line_coverage(coverage, lineno).noncode = True
if count is not None:
get_or_create_line_coverage(coverage, lineno).count += count
# handle deferred functions
for function in state.deferred_functions:
_add_coverage_for_function(coverage, line.lineno, function, context)
return _ParserState(
lineno=line.lineno,
line_contents=line.source_code,
is_excluded=is_excluded,
)
elif state.is_recovering:
return state # skip until the next _SourceLine
elif isinstance(line, _FunctionLine):
# Defer handling of the function tag until the next source line.
# This is important to get correct line number information.
return state._replace(deferred_functions=[*state.deferred_functions, line])
elif isinstance(line, _BranchLine):
branchno, count, annotation = line
exclusion_reason = _branch_can_be_excluded(line, state, context.flags)
if exclusion_reason:
logger.debug(
f"Excluding unreachable branch on line {state.lineno} in file {context.filename}: {exclusion_reason}"
)
return state
line_cov = coverage.lines[state.lineno] # must already exist
is_excluded = branch_is_excluded(state.lineno)
if not is_excluded:
insert_branch_coverage(
line_cov,
branchno,
BranchCoverage(
count=count,
fallthrough=(annotation == "fallthrough"),
throw=(annotation == "throw"),
),
)
return state
# ignore metadata in this phase
elif isinstance(line, _MetadataLine):
return state
# currently, the parser just ignores specialization sections
elif isinstance(line, (_SpecializationMarkerLine, _SpecializationNameLine)):
return state
# ignore unused line types, such as specialization sections
elif isinstance(line, (_CallLine, _UnconditionalLine, _BlockLine)):
return state
else:
return _assert_never(line)
|
def _gather_coverage_from_line(
state: _ParserState,
line: _Line,
*,
coverage: FileCoverage,
line_is_excluded: Callable[[int], bool],
branch_is_excluded: Callable[[int], bool],
context: _Context,
) -> _ParserState:
"""
Interpret a Line, updating the FileCoverage, and transitioning ParserState.
The function handles all possible Line variants, and dies otherwise:
>>> _gather_coverage_from_line(_ParserState(), "illegal line type",
... coverage=..., line_is_excluded=..., branch_is_excluded=..., context=...)
Traceback (most recent call last):
AssertionError: Unexpected variant: 'illegal line type'
"""
# pylint: disable=too-many-return-statements,too-many-branches
# pylint: disable=no-else-return # make life easier for type checkers
if isinstance(line, _SourceLine):
lineno = line.lineno
is_excluded = line_is_excluded(lineno)
noncode, count = _line_noncode_and_count(
line,
flags=context.flags,
is_excluded=is_excluded,
is_function=bool(state.deferred_functions),
)
# FIXME this can't yet use the merge() functions
# due to inconsistency in handling of the noncode flag
if noncode:
get_or_create_line_coverage(coverage, lineno).noncode = True
if count is not None:
get_or_create_line_coverage(coverage, lineno).count += count
# handle deferred functions
for function in state.deferred_functions:
_add_coverage_for_function(coverage, line.lineno, function, context)
return _ParserState(
lineno=line.lineno,
line_contents=line.source_code,
is_excluded=is_excluded,
)
elif state.is_recovering:
return state # skip until the next _SourceLine
elif isinstance(line, _FunctionLine):
# Defer handling of the function tag until the next source line.
# This is important to get correct line number information.
return state._replace(deferred_functions=[*state.deferred_functions, line])
elif isinstance(line, _BranchLine):
branchno, count, annotation = line
exclusion_reason = _branch_can_be_excluded(line, state, context.flags)
if exclusion_reason:
logger.debug(
f"Excluding unreachable branch on line {state.lineno} in file {context.filename}: {exclusion_reason}"
)
return state
line_cov = coverage.lines[state.lineno] # must already exist
if not branch_is_excluded(state.lineno):
insert_branch_coverage(
line_cov,
branchno,
BranchCoverage(
count=count,
fallthrough=(annotation == "fallthrough"),
throw=(annotation == "throw"),
),
)
return state
# ignore metadata in this phase
elif isinstance(line, _MetadataLine):
return state
# currently, the parser just ignores specialization sections
elif isinstance(line, (_SpecializationMarkerLine, _SpecializationNameLine)):
return state
# ignore unused line types, such as specialization sections
elif isinstance(line, (_CallLine, _UnconditionalLine, _BlockLine)):
return state
else:
return _assert_never(line)
|
1,331 |
def test_type_of_target():
for group, group_examples in EXAMPLES.items():
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = r'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
y = pd.Series([1, 0, 0, 1, 0]).to_sparse()
msg = "y cannot be class 'SparseSeries'."
assert_raises_regex(ValueError, msg, type_of_target, y)
|
def test_type_of_target():
for group, group_examples in EXAMPLES.items():
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = r'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
y = pd.Series(pd.SparseArray([1, 0, 0, 1, 0]))
msg = "y cannot be class 'SparseSeries'."
assert_raises_regex(ValueError, msg, type_of_target, y)
|
32,859 |
def get_trace_url_timeout():
# type: () -> float
return float(os.environ.get("DD_TRACE_URL_TIMEOUT", default=DEFAULT_TIMEOUT))
|
def get_trace_url_timeout():
# type: () -> float
return float(get_env("trace", "url", "timeout", default=DEFAULT_TIMEOUT))
|
1,080 |
def bids_gen_info(bids_event_files,
condition_column='trial_type',
amplitude_column=None,
time_repetition=False,
):
"""Generate subject_info structure from a list of BIDS .tsv event files.
Parameters
----------
bids_event_files : list of str
Filenames of BIDS .tsv event files containing columns including:
'onset', 'duration', and 'trial_type' or the `condition_column` value.
condition_column : str
Column of files in `bids_event_files` based on the values of which
events will be sorted into different regressors
amplitude_column : str
Column of files in `bids_event_files` based on the values of which
to apply amplitudes to events. If unspecified, all events will be
represented with an amplitude of 1.
Returns
-------
list of Bunch
"""
info = []
for bids_event_file in bids_event_files:
with open(bids_event_file) as f:
f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t')
events = [{k: v for k, v in row.items()} for row in f_events]
conditions = list(set([i[condition_column] for i in events]))
runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
for condition in conditions:
selected_events = [i for i in events if i[condition_column]==condition]
onsets = [float(i['onset']) for i in selected_events]
durations = [float(i['duration']) for i in selected_events]
if time_repetition:
decimals = math.ceil(-math.log10(time_repetition))
onsets = [round(i,decimals) for i in onsets]
durations = [round(i,decimals) for i in durations]
if condition:
runinfo.conditions.append(condition)
else:
runinfo.conditions.append('e0')
runinfo.onsets.append(onsets)
runinfo.durations.append(durations)
try:
amplitudes = [float(i[amplitude_column]) for i in selected_events]
runinfo.amplitudes.append(amplitudes)
except KeyError:
runinfo.amplitudes.append([1]*len(onsets))
info.append(runinfo)
return info
|
def bids_gen_info(bids_event_files,
condition_column='trial_type',
amplitude_column=None,
time_repetition=False,
):
"""Generate subject_info structure from a list of BIDS .tsv event files.
Parameters
----------
bids_event_files : list of str
Filenames of BIDS .tsv event files containing columns including:
'onset', 'duration', and 'trial_type' or the `condition_column` value.
condition_column : str
Column of files in `bids_event_files` based on the values of which
events will be sorted into different regressors
amplitude_column : str
Column of files in `bids_event_files` based on the values of which
to apply amplitudes to events. If unspecified, all events will be
represented with an amplitude of 1.
Returns
-------
list of Bunch
"""
info = []
for bids_event_file in bids_event_files:
with open(bids_event_file) as f:
f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t')
events = [{k: v for k, v in row.items()} for row in f_events]
conditions = list(set([i[condition_column] for i in events]))
runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
for condition in conditions:
selected_events = [i for i in events if i[condition_column]==condition]
onsets = [float(i['onset']) for i in selected_events]
durations = [float(i['duration']) for i in selected_events]
if time_repetition:
decimals = math.ceil(-math.log10(time_repetition))
onsets = [round(i, decimals) for i in onsets]
durations = [round(i,decimals) for i in durations]
if condition:
runinfo.conditions.append(condition)
else:
runinfo.conditions.append('e0')
runinfo.onsets.append(onsets)
runinfo.durations.append(durations)
try:
amplitudes = [float(i[amplitude_column]) for i in selected_events]
runinfo.amplitudes.append(amplitudes)
except KeyError:
runinfo.amplitudes.append([1]*len(onsets))
info.append(runinfo)
return info
|
8,383 |
def air_to_vac(wavelength, scheme='inversion', method='Griesen2006', co2=None,
precision=1e-12, maxiter=30):
"""
Converts air to vacuum wavelengths using different methods.
Parameters
----------
wavelength : `Quantity` object (number or sequence)
Air wavelengths with an astropy.unit.
scheme : str, optional
How the to convert from vacuum to air wavelengths. Options are:
'inversion' (default) - result is simply the inversion (1 / n) of the
refraction index of air. Griesen et al. (2006) report that the error
in naively inverting is less than 10^-9.
'Piskunov' - uses an analytical solution used derived by Nikolai Piskunov
and used by the Vienna Atomic Line Database (VALD).
'iteration' - uses an iterative scheme to invert the index of refraction.
method : str, optional
Only used if scheme is 'inversion' or 'iteration'. One of the methods
in refraction_index().
co2 : number, optional
Atmospheric CO2 concentration in ppm. Only used of scheme='inversion' and
method='Ciddor1996'. If not given, a default concentration of 450 ppm is used.
precision : float
Maximum fractional in refraction conversion beyond which iteration will
be stopped. Only used if scheme='iteration'.
maxiter : integer
Maximum number of iterations to run. Only used if scheme='iteration'.
Returns
-------
vac_wavelength : `Quantity` object (number or sequence)
Vacuum wavelengths with the same unit as wavelength.
"""
VALID_SCHEMES = ['inversion', 'iteration', 'piskunov']
assert isinstance(scheme, str), 'scheme must be a string'
scheme = scheme.lower()
if scheme == 'inversion':
refr = refraction_index(wavelength, method=method, co2=co2)
#return wavelength * refr
elif scheme == 'piskunov':
wlum = wavelength.to(u.angstrom).value
sigma2 = (1e4 / wlum)**2
refr = (8.336624212083e-5 + 2.408926869968e-2 / (130.1065924522 - sigma2) +
1.599740894897e-4 / (38.92568793293 - sigma2)) + 1
#return wavelength * refr
elif scheme == 'iteration':
# Refraction index is a function of vacuum wavelengths.
# Iterate to get index of refraction that gives air wavelength that
# is consistent with the reverse transformation.
counter = 0
result = wavelength.copy()
refr = refraction_index(wavelength, method=method, co2=co2)
while True:
counter += 1
diff = wavelength * refr - result
if abs(diff.max().value) < precision:
break
#return wavelength * conv
if counter > maxiter:
raise RuntimeError("Reached maximum number of iterations "
"without reaching desired precision level.")
result += diff
refr = refraction_index(result, method=method, co2=co2)
else:
raise ValueError("Method must be one of " + ", ".join(VALID_SCHEMES))
return wavelength * refr
|
def air_to_vac(wavelength, scheme='inversion', method='Griesen2006', co2=None,
precision=1e-12, maxiter=30):
"""
Converts air to vacuum wavelengths using different methods.
Parameters
----------
wavelength : `Quantity` object (number or sequence)
Air wavelengths with an astropy.unit.
scheme : str, optional
How the to convert from vacuum to air wavelengths. Options are:
'inversion' (default) - result is simply the inversion (1 / n) of the
refraction index of air. Griesen et al. (2006) report that the error
in naively inverting is less than 10^-9.
'Piskunov' - uses an analytical solution used derived by Nikolai Piskunov
and used by the Vienna Atomic Line Database (VALD).
'iteration' - uses an iterative scheme to invert the index of refraction.
method : str, optional
Only used if scheme is 'inversion' or 'iteration'. One of the methods
in refraction_index().
co2 : number, optional
Atmospheric CO2 concentration in ppm. Only used of scheme='inversion' and
method='Ciddor1996'. If not given, a default concentration of 450 ppm is used.
precision : float
Maximum fractional value in refraction conversion beyond at which iteration will
be stopped. Only used if scheme='iteration'.
maxiter : integer
Maximum number of iterations to run. Only used if scheme='iteration'.
Returns
-------
vac_wavelength : `Quantity` object (number or sequence)
Vacuum wavelengths with the same unit as wavelength.
"""
VALID_SCHEMES = ['inversion', 'iteration', 'piskunov']
assert isinstance(scheme, str), 'scheme must be a string'
scheme = scheme.lower()
if scheme == 'inversion':
refr = refraction_index(wavelength, method=method, co2=co2)
#return wavelength * refr
elif scheme == 'piskunov':
wlum = wavelength.to(u.angstrom).value
sigma2 = (1e4 / wlum)**2
refr = (8.336624212083e-5 + 2.408926869968e-2 / (130.1065924522 - sigma2) +
1.599740894897e-4 / (38.92568793293 - sigma2)) + 1
#return wavelength * refr
elif scheme == 'iteration':
# Refraction index is a function of vacuum wavelengths.
# Iterate to get index of refraction that gives air wavelength that
# is consistent with the reverse transformation.
counter = 0
result = wavelength.copy()
refr = refraction_index(wavelength, method=method, co2=co2)
while True:
counter += 1
diff = wavelength * refr - result
if abs(diff.max().value) < precision:
break
#return wavelength * conv
if counter > maxiter:
raise RuntimeError("Reached maximum number of iterations "
"without reaching desired precision level.")
result += diff
refr = refraction_index(result, method=method, co2=co2)
else:
raise ValueError("Method must be one of " + ", ".join(VALID_SCHEMES))
return wavelength * refr
|
56,228 |
def filter_objects(objects, iou_threshold, prob_threshold):
# Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
objects = sorted(objects, key=lambda obj : obj['confidence'], reverse=True)
for i in range(len(objects)):
if objects[i]['confidence'] == 0:
continue
for j in range(i + 1, len(objects)):
# We perform IOU only on objects of same class
if(objects[i]['class_id'] != objects[j]['class_id']): continue
if intersection_over_union(objects[i], objects[j]) > iou_threshold:
objects[j]['confidence'] = 0
return tuple(obj for obj in objects if obj['confidence'] >= prob_threshold)
|
def filter_objects(objects, iou_threshold, prob_threshold):
# Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
objects = sorted(objects, key=lambda obj : obj['confidence'], reverse=True)
for i in range(len(objects)):
if objects[i]['confidence'] == 0:
continue
for j in range(i + 1, len(objects)):
# We perform IOU only on objects of same class
if objects[i]['class_id'] != objects[j]['class_id']:
continue
if intersection_over_union(objects[i], objects[j]) > iou_threshold:
objects[j]['confidence'] = 0
return tuple(obj for obj in objects if obj['confidence'] >= prob_threshold)
|
30,570 |
def get_runnable_tests(tests_num, conf=None, id_set=None, server_version='0'):
"""Gets runnable tests for the server version"""
if not id_set:
with open("./Tests/id_set.json", 'r') as conf_file:
id_set = json.load(conf_file)
if not conf:
with open("./Tests/conf.json", 'r') as conf_file:
conf = json.load(conf_file)
tests = set([])
test_ids = get_test_ids(conf=conf)[0]
rand = random.Random(time.time())
while len(tests) < tests_num:
test = rand.choice(test_ids)
if is_test_runnable(test, id_set, conf, server_version):
tests.add(test)
return tests
|
def get_random_tests(tests_num, conf=None, id_set=None, server_version='0'):
"""Gets runnable tests for the server version"""
if not id_set:
with open("./Tests/id_set.json", 'r') as conf_file:
id_set = json.load(conf_file)
if not conf:
with open("./Tests/conf.json", 'r') as conf_file:
conf = json.load(conf_file)
tests = set([])
test_ids = get_test_ids(conf=conf)[0]
rand = random.Random(time.time())
while len(tests) < tests_num:
test = rand.choice(test_ids)
if is_test_runnable(test, id_set, conf, server_version):
tests.add(test)
return tests
|
34,108 |
def train_model(config):
score = config["model_id"]
# Import model libraries, etc...
# Load data and train model code here...
# Return final stats. You can also return intermediate progress
# using Tune if needed.
# To return your model, you could write it to storage and return its
# URI in this dict, or return it as a tune Checkpoint:
# https://docs.ray.io/en/latest/tune/tutorials/tune-checkpoints.html
return {"score": score, "other_data": ...}
# __step1_end__
|
def train_model(config):
score = config["model_id"]
# Import model libraries, etc...
# Load data and train model code here...
# Return final stats. You can also return intermediate progress
# using ray.air.session.report() if needed.
# To return your model, you could write it to storage and return its
# URI in this dict, or return it as a tune Checkpoint:
# https://docs.ray.io/en/latest/tune/tutorials/tune-checkpoints.html
return {"score": score, "other_data": ...}
# __step1_end__
|
14,349 |
def unpackPStrings(data, nbStrings):
strings = []
index = 0
while nbStrings > 0:
length = byteord(data[index])
strings.append(tostr(data[index+1:index+1+length], encoding="latin1"))
index = index + 1 + length
nbStrings = nbStrings - 1
dataLen = len(data)
if (index < dataLen):
log.warning("%d extra bytes in post.stringData array", dataLen - index)
return strings
|
def unpackPStrings(data, nbStrings):
strings = []
index = 0
while nbStrings > 0:
length = byteord(data[index])
strings.append(tostr(data[index+1:index+1+length], encoding="latin1"))
index = index + 1 + length
nbStrings -= 1
dataLen = len(data)
if (index < dataLen):
log.warning("%d extra bytes in post.stringData array", dataLen - index)
return strings
|
31,591 |
def upload_core_packs_config(storage_bucket: Any, build_number: str, index_folder_path: str,
artifacts_dir: Optional[str] = None):
"""Uploads corepacks.json file configuration to bucket. Corepacks file includes core packs for server installation.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
build_number (str): circleCI build number.
index_folder_path (str): The index folder path.
artifacts_dir: The CI artifacts directory to upload the index.json to.
"""
core_packs_public_urls = []
found_core_packs = set()
for pack in os.scandir(index_folder_path):
if pack.is_dir() and pack.name in GCPConfig.CORE_PACKS_LIST:
pack_metadata_path = os.path.join(index_folder_path, pack.name, Pack.METADATA)
if not os.path.exists(pack_metadata_path):
logging.critical(f"{pack.name} pack {Pack.METADATA} is missing in {GCPConfig.INDEX_NAME}")
sys.exit(1)
with open(pack_metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
pack_current_version = metadata.get('currentVersion', Pack.PACK_INITIAL_VERSION)
core_pack_relative_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, pack.name,
pack_current_version, f"{pack.name}.zip")
core_pack_public_url = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name, core_pack_relative_path)
if not storage_bucket.blob(core_pack_relative_path).exists():
logging.critical(f"{pack.name} pack does not exist under {core_pack_relative_path} path")
sys.exit(1)
core_packs_public_urls.append(core_pack_public_url)
found_core_packs.add(pack.name)
if len(found_core_packs) != len(GCPConfig.CORE_PACKS_LIST):
missing_core_packs = set(GCPConfig.CORE_PACKS_LIST) ^ found_core_packs
logging.critical(f"Number of defined core packs are: {len(GCPConfig.CORE_PACKS_LIST)}")
logging.critical(f"Actual number of found core packs are: {len(found_core_packs)}")
logging.critical(f"Missing core packs are: {missing_core_packs}")
sys.exit(1)
corepacks_json_path = os.path.join(index_folder_path, f'{GCPConfig.CORE_PACK_FILE_NAME}')
with open(corepacks_json_path, 'w+') as corepacks_file:
# construct core pack data with public gcs urls
core_packs_data = {
'corePacks': core_packs_public_urls,
'buildNumber': build_number
}
json.dump(core_packs_data, corepacks_file, indent=4)
if artifacts_dir:
# Store corepacks.json in CircleCI artifacts
shutil.copyfile(
os.path.join(index_folder_path, f'{GCPConfig.CORE_PACK_FILE_NAME}'),
os.path.join(artifacts_dir, f'{GCPConfig.CORE_PACK_FILE_NAME}'),
)
logging.success(f"Finished coping {GCPConfig.CORE_PACK_FILE_NAME} to artifacts.")
|
def upload_core_packs_config(storage_bucket: Any, build_number: str, index_folder_path: str,
artifacts_dir: Optional[str] = None):
"""Uploads corepacks.json file configuration to bucket. Corepacks file includes core packs for server installation.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
build_number (str): circleCI build number.
index_folder_path (str): The index folder path.
artifacts_dir: The CI artifacts directory to upload the index.json to.
"""
core_packs_public_urls = []
found_core_packs = set()
for pack in os.scandir(index_folder_path):
if pack.is_dir() and pack.name in GCPConfig.CORE_PACKS_LIST:
pack_metadata_path = os.path.join(index_folder_path, pack.name, Pack.METADATA)
if not os.path.exists(pack_metadata_path):
logging.critical(f"{pack.name} pack {Pack.METADATA} is missing in {GCPConfig.INDEX_NAME}")
sys.exit(1)
with open(pack_metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
pack_current_version = metadata.get('currentVersion', Pack.PACK_INITIAL_VERSION)
core_pack_relative_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, pack.name,
pack_current_version, f"{pack.name}.zip")
core_pack_public_url = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name, core_pack_relative_path)
if not storage_bucket.blob(core_pack_relative_path).exists():
logging.critical(f"{pack.name} pack does not exist under {core_pack_relative_path} path")
sys.exit(1)
core_packs_public_urls.append(core_pack_public_url)
found_core_packs.add(pack.name)
if len(found_core_packs) != len(GCPConfig.CORE_PACKS_LIST):
missing_core_packs = set(GCPConfig.CORE_PACKS_LIST) ^ found_core_packs
logging.critical(f"Number of defined core packs are: {len(GCPConfig.CORE_PACKS_LIST)}")
logging.critical(f"Actual number of found core packs are: {len(found_core_packs)}")
logging.critical(f"Missing core packs are: {missing_core_packs}")
sys.exit(1)
corepacks_json_path = os.path.join(index_folder_path, f'{GCPConfig.CORE_PACK_FILE_NAME}')
with open(corepacks_json_path, 'w+') as corepacks_file:
# construct core pack data with public gcs urls
core_packs_data = {
'corePacks': core_packs_public_urls,
'buildNumber': build_number
}
json.dump(core_packs_data, corepacks_file, indent=4)
if artifacts_dir:
# Store corepacks.json in CircleCI artifacts
shutil.copyfile(
os.path.join(index_folder_path, f'{GCPConfig.CORE_PACK_FILE_NAME}'),
os.path.join(artifacts_dir, f'{GCPConfig.CORE_PACK_FILE_NAME}'),
)
logging.success(f"Finished copying {GCPConfig.CORE_PACK_FILE_NAME} to artifacts dir.")
|
29,724 |
def get_device_mig_mode(device):
"""Get MIG mode for a device index or UUID
Parameters
----------
device: ``int``, ``bytes`` or``str``
An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID
of a CUDA (either GPU or MIG) device.
Returns
-------
out: ``list``
A ``list`` with two integers ``[current_mode, pending_mode]``.
"""
init_once()
try:
device_index = int(device)
handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)
except ValueError:
uuid = device if isinstance(device, bytes) else bytes(device, "utf-8")
handle = pynvml.nvmlDeviceGetHandleByUUID(uuid)
try:
return pynvml.nvmlDeviceGetMigMode(handle)
except pynvml.NVMLError_NotSupported:
return [0, 0]
|
def get_device_mig_mode(device):
"""Get MIG mode for a device index or UUID
Parameters
----------
device : int, bytes, or str
An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID
of a CUDA (either GPU or MIG) device.
Returns
-------
out: ``list``
A ``list`` with two integers ``[current_mode, pending_mode]``.
"""
init_once()
try:
device_index = int(device)
handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)
except ValueError:
uuid = device if isinstance(device, bytes) else bytes(device, "utf-8")
handle = pynvml.nvmlDeviceGetHandleByUUID(uuid)
try:
return pynvml.nvmlDeviceGetMigMode(handle)
except pynvml.NVMLError_NotSupported:
return [0, 0]
|
48,211 |
def get_array(module):
"""Return storage array object or fail"""
global VXOS_VERSION
array = module.params['array']
user = module.params.get('user', None)
password = module.params.get('password', None)
if user and password:
system = VexataAPIProxy(array, user, password, verify_cert=False)
elif environ.get('VEXATA_USER') and environ.get('VEXATA_PASSWORD'):
user = environ.get('VEXATA_USER')
password = environ.get('VEXATA_PASSWORD')
system = VexataAPIProxy(array, user, password, verify_cert=False)
else:
module.fail_json(msg='The user/password are required to be passed in to '
'the module as arguments or by setting the '
'VEXATA_USER and VEXATA_PASSWORD environment variables.')
try:
if system.test_connection():
VXOS_VERSION = get_version(system.iocs())
return system
else:
module.fail_json(msg='Test connection to array failed.')
except Exception as e:
module.fail_json(msg='Vexata API access failed: {0}'.format(str(e)))
|
def get_array(module):
"""Return storage array object or fail"""
global VXOS_VERSION
array = module.params['array']
user = module.params.get('user', None)
password = module.params.get('password', None)
if user and password:
system = VexataAPIProxy(array, user, password, verify_cert=False)
elif environ.get('VEXATA_USER') and environ.get('VEXATA_PASSWORD'):
user = environ.get('VEXATA_USER')
password = environ.get('VEXATA_PASSWORD')
system = VexataAPIProxy(array, user, password, verify_cert=False)
else:
module.fail_json(msg='The user/password are required to be passed in to '
'the module as arguments or by setting the '
'VEXATA_USER and VEXATA_PASSWORD environment variables.')
try:
if system.test_connection():
VXOS_VERSION = get_version(system.iocs())
return system
else:
module.fail_json(msg='Test connection to array failed.')
except Exception as e:
from ansible.module_utils._text import to_native
...
module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e)))
|
14,306 |
def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel):
vsindex_dict = {}
vsindex_by_key = {}
varDataList = []
masterSupports = []
default_charstrings = top_dicts[0].CharStrings
for gid, gname in enumerate(glyphOrder):
all_cs = [
_get_cs(td.CharStrings, gname)
for td in top_dicts]
if len([gs for gs in all_cs if gs is not None]) == 1:
continue
model, model_cs = masterModel.getSubModel(all_cs)
# create the first pass CFF2 charstring, from
# the default charstring.
default_charstring = model_cs[0]
var_pen = CFF2CharStringMergePen([], gname, num_masters, 0)
# We need to override outlineExtractor because these
# charstrings do have widths in the 'program'; we need to drop these
# values rather than post assertion error for them.
default_charstring.outlineExtractor = MergeOutlineExtractor
default_charstring.draw(var_pen)
# Add the coordinates from all the other regions to the
# blend lists in the CFF2 charstring.
region_cs = model_cs[1:]
for region_idx, region_charstring in enumerate(region_cs, start=1):
var_pen.restart(region_idx)
region_charstring.outlineExtractor = MergeOutlineExtractor
region_charstring.draw(var_pen)
# Collapse each coordinate list to a blend operator and its args.
new_cs = var_pen.getCharString(
private=default_charstring.private,
globalSubrs=default_charstring.globalSubrs,
var_model=model, optimize=True)
default_charstrings[gname] = new_cs
if (not var_pen.seen_moveto) or ('blend' not in new_cs.program):
# If this is not a marking glyph, or if there are no blend
# arguments, then we can use vsindex 0. No need to
# check if we need a new vsindex.
continue
# If the charstring required a new model, create
# a VarData table to go with, and set vsindex.
key = tuple(v is not None for v in all_cs)
try:
vsindex = vsindex_by_key[key]
except KeyError:
vsindex = _add_new_vsindex(model, key,masterSupports, vsindex_dict,
vsindex_by_key, varDataList)
# We do not need to check for an existing new_cs.private.vsindex,
# as we know it doesn't exist yet.
if vsindex != 0:
new_cs.program[:0] = [vsindex, 'vsindex']
# If there is no variation in any of the charstrings, then vsindex_dict
# never gets built. This is could still be needed if there is variation
# in the PrivatDict, so we will build the default data for vsindex = 0.
if not vsindex_dict:
key = (True)*num_masters
_add_new_vsindex(model, key, masterSupports, vsindex_dict,
vsindex_by_key, varDataList)
cvData = CVarData(varDataList=varDataList, masterSupports=masterSupports,
vsindex_dict=vsindex_dict)
# XXX To do: optimize use of vsindex between the PrivateDicts and
# charstrings
return cvData
|
def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel):
vsindex_dict = {}
vsindex_by_key = {}
varDataList = []
masterSupports = []
default_charstrings = top_dicts[0].CharStrings
for gid, gname in enumerate(glyphOrder):
all_cs = [
_get_cs(td.CharStrings, gname)
for td in top_dicts]
if len([gs for gs in all_cs if gs is not None]) == 1:
continue
model, model_cs = masterModel.getSubModel(all_cs)
# create the first pass CFF2 charstring, from
# the default charstring.
default_charstring = model_cs[0]
var_pen = CFF2CharStringMergePen([], gname, num_masters, 0)
# We need to override outlineExtractor because these
# charstrings do have widths in the 'program'; we need to drop these
# values rather than post assertion error for them.
default_charstring.outlineExtractor = MergeOutlineExtractor
default_charstring.draw(var_pen)
# Add the coordinates from all the other regions to the
# blend lists in the CFF2 charstring.
region_cs = model_cs[1:]
for region_idx, region_charstring in enumerate(region_cs, start=1):
var_pen.restart(region_idx)
region_charstring.outlineExtractor = MergeOutlineExtractor
region_charstring.draw(var_pen)
# Collapse each coordinate list to a blend operator and its args.
new_cs = var_pen.getCharString(
private=default_charstring.private,
globalSubrs=default_charstring.globalSubrs,
var_model=model, optimize=True)
default_charstrings[gname] = new_cs
if (not var_pen.seen_moveto) or ('blend' not in new_cs.program):
# If this is not a marking glyph, or if there are no blend
# arguments, then we can use vsindex 0. No need to
# check if we need a new vsindex.
continue
# If the charstring required a new model, create
# a VarData table to go with, and set vsindex.
key = tuple(v is not None for v in all_cs)
try:
vsindex = vsindex_by_key[key]
except KeyError:
vsindex = _add_new_vsindex(model, key,masterSupports, vsindex_dict,
vsindex_by_key, varDataList)
# We do not need to check for an existing new_cs.private.vsindex,
# as we know it doesn't exist yet.
if vsindex != 0:
new_cs.program[:0] = [vsindex, 'vsindex']
# If there is no variation in any of the charstrings, then vsindex_dict
# never gets built. This is could still be needed if there is variation
# in the PrivatDict, so we will build the default data for vsindex = 0.
if not vsindex_dict:
key = (True,) * num_masters
_add_new_vsindex(model, key, masterSupports, vsindex_dict,
vsindex_by_key, varDataList)
cvData = CVarData(varDataList=varDataList, masterSupports=masterSupports,
vsindex_dict=vsindex_dict)
# XXX To do: optimize use of vsindex between the PrivateDicts and
# charstrings
return cvData
|
41,891 |
def test_generate_contour_plot_for_few_observations() -> None:
study = prepare_study_with_trials(less_than_two=True)
trials = study.trials
# `x_axis` has one observation.
params = ["param_a", "param_b"]
contour, scatter = _generate_contour_subplot(
trials, params[0], params[1], StudyDirection.MINIMIZE, {}
)
assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None
# `y_axis` has one observation.
params = ["param_b", "param_a"]
contour, scatter = _generate_contour_subplot(
trials, params[0], params[1], StudyDirection.MINIMIZE, {}
)
assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None
|
def test_generate_contour_plot_for_few_observations() -> None:
study = prepare_study_with_trials(less_than_two=True)
trials = study.trials
# `x_axis` has one observation.
params = ["param_a", "param_b"]
contour, scatter = _generate_contour_subplot(
trials, params[0], params[1], StudyDirection.MINIMIZE
)
assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None
# `y_axis` has one observation.
params = ["param_b", "param_a"]
contour, scatter = _generate_contour_subplot(
trials, params[0], params[1], StudyDirection.MINIMIZE, {}
)
assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None
|
6,147 |
def RPCClientSelector(*args, **kwargs): # We use same interface as RPCClient
"""
Select the correct RPCClient, instanciate it, and return it
:param args: URL can be just "system/service" or "dips://domain:port/system/service"
"""
# We detect if we need to use a specific class for the HTTPS client
if 'httpsClient' in kwargs:
TornadoRPCClient = kwargs.pop('httpsClient')
else:
TornadoRPCClient = TornadoClient
# We have to make URL resolution BEFORE the RPCClient or TornadoClient to determine wich one we want to use
# URL is defined as first argument (called serviceName) in RPCClient
try:
serviceName = args[0]
gLogger.verbose("Trying to autodetect client for %s" % serviceName)
if not isURL(serviceName):
completeUrl = getServiceURL(serviceName)
gLogger.verbose("URL resolved: %s" % completeUrl)
else:
completeUrl = serviceName
if completeUrl.startswith("http"):
gLogger.info("Using HTTPS for service %s" % serviceName)
rpc = TornadoRPCClient(*args, **kwargs)
else:
rpc = RPCClient(*args, **kwargs)
except Exception:
# If anything went wrong in the resolution, we return default RPCClient
# So the comportement is exactly the same as before implementation of Tornado
rpc = RPCClient(*args, **kwargs)
return rpc
|
def RPCClientSelector(*args, **kwargs): # We use same interface as RPCClient
"""
Select the correct RPCClient, instanciate it, and return it
:param args: URL can be just "system/service" or "dips://domain:port/system/service"
"""
# We detect if we need to use a specific class for the HTTPS client
if 'httpsClient' in kwargs:
TornadoRPCClient = kwargs.pop('httpsClient')
else:
TornadoRPCClient = TornadoClient
# We have to make URL resolution BEFORE the RPCClient or TornadoClient to determine wich one we want to use
# URL is defined as first argument (called serviceName) in RPCClient
try:
serviceName = args[0]
gLogger.verbose("Trying to autodetect client for %s" % serviceName)
if not isURL(serviceName):
completeUrl = getServiceURL(serviceName)
gLogger.verbose("URL resolved: %s" % completeUrl)
else:
completeUrl = serviceName
if completeUrl.startswith("http"):
gLogger.info("Using HTTPS for service %s" % serviceName)
rpc = TornadoRPCClient(*args, **kwargs)
else:
rpc = RPCClient(*args, **kwargs)
except Exception:
# If anything went wrong in the resolution, we return default RPCClient
# So the behaviour is exactly the same as before the implementation of Tornado
rpc = RPCClient(*args, **kwargs)
return rpc
|
44,141 |
def observable(fermion_ops, init_term=0, mapping="jordan_wigner", wires=None):
r"""Builds the Fermion many-body observable whose expectation value can be
measured in PennyLane.
The second-quantized operator of the Fermion many-body system can combine one-particle
and two-particle operators as in the case of electronic Hamiltonians :math:`\hat{H}`:
.. math::
\hat{H} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t}^{(1)} +
\cdots + \hat{t}^{(n)} \vert \beta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta
+ \frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
\langle \alpha, \beta \vert \hat{v}^{(1)} + \cdots + \hat{v}^{(n)}
\vert \gamma, \delta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger
\hat{c}_\gamma \hat{c}_\delta
In the latter equations the indices :math:`\alpha, \beta, \gamma, \delta` run over the
basis of single-particle states. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}`
are the particle creation and annihilation operators, respectively.
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix element of
the single-particle operator :math:`\hat{t}` entering the observable. For example,
in electronic structure calculations, this is the case for: the kinetic energy operator,
the nuclei Coulomb potential, or any other external fields included in the Hamiltonian.
On the other hand, :math:`\langle \alpha, \beta \vert \hat{v} \vert \gamma, \delta \rangle`
denotes the matrix element of the two-particle operator :math:`\hat{v}`, for example, the
Coulomb interaction between the electrons.
- The observable is built by adding the operators
:math:`\sum_{\alpha, \beta} t_{\alpha\beta}^{(i)}
\hat{c}_\alpha^\dagger \hat{c}_\beta` and
:math:`\frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
v_{\alpha\beta\gamma\delta}^{(i)}
\hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger \hat{c}_\gamma \hat{c}_\delta`.
- Second-quantized operators contributing to the
many-body observable must be represented using the `FermionOperator
<https://github.com/quantumlib/OpenFermion/blob/master/docs/
tutorials/intro_to_openfermion.ipynb>`_ data structure as implemented in OpenFermion.
See the functions :func:`~.one_particle` and :func:`~.two_particle` to build the
FermionOperator representations of one-particle and two-particle operators.
- The function uses tools of `OpenFermion <https://github.com/quantumlib/OpenFermion>`_
to map the resulting fermionic Hamiltonian to the basis of Pauli matrices via the
Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is converted
to a PennyLane observable by the function :func:`~.convert_observable`.
Args:
fermion_ops (list[FermionOperator]): list containing the FermionOperator data structures
representing the one-particle and/or two-particle operators entering the many-body
observable
init_term (float): Any quantity required to initialize the many-body observable. For
example, this can be used to pass the nuclear-nuclear repulsion energy :math:`V_{nn}`
which is typically included in the electronic Hamiltonian of molecules.
mapping (str): Specifies the fermion-to-qubit mapping. Input values can
be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
pennylane.Hamiltonian: the fermionic-to-qubit transformed observable
**Example**
>>> t = FermionOperator("0^ 0", 0.5) + FermionOperator("1^ 1", 0.25)
>>> v = FermionOperator("1^ 0^ 0 1", -0.15) + FermionOperator("2^ 0^ 2 0", 0.3)
>>> print(observable([t, v], mapping="jordan_wigner"))
(0.2625) [I0]
+ (-0.1375) [Z0]
+ (-0.0875) [Z1]
+ (-0.0375) [Z0 Z1]
+ (0.075) [Z2]
+ (-0.075) [Z0 Z2]
"""
if mapping.strip().lower() not in ("jordan_wigner", "bravyi_kitaev"):
raise TypeError(
f"The '{mapping}' transformation is not available. \n "
f"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'."
)
# Initialize the FermionOperator
mb_obs = openfermion.ops.FermionOperator("") * init_term
for ops in fermion_ops:
if not isinstance(ops, openfermion.ops.FermionOperator):
raise TypeError(
f"Elements in the lists are expected to be of type 'FermionOperator'; got {type(ops)}"
)
mb_obs += ops
# Map the fermionic operator to a qubit operator
if mapping.strip().lower() == "bravyi_kitaev":
return qml.qchem.convert.import_operator(
openfermion.transforms.bravyi_kitaev(mb_obs), wires=wires
)
return qml.qchem.convert.import_operator(
openfermion.transforms.jordan_wigner(mb_obs), wires=wires
)
|
def observable(fermion_ops, init_term=0, mapping="jordan_wigner", wires=None):
r"""Builds the fermionic many-body observable whose expectation value can be
measured in PennyLane.
The second-quantized operator of the Fermion many-body system can combine one-particle
and two-particle operators as in the case of electronic Hamiltonians :math:`\hat{H}`:
.. math::
\hat{H} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t}^{(1)} +
\cdots + \hat{t}^{(n)} \vert \beta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta
+ \frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
\langle \alpha, \beta \vert \hat{v}^{(1)} + \cdots + \hat{v}^{(n)}
\vert \gamma, \delta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger
\hat{c}_\gamma \hat{c}_\delta
In the latter equations the indices :math:`\alpha, \beta, \gamma, \delta` run over the
basis of single-particle states. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}`
are the particle creation and annihilation operators, respectively.
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix element of
the single-particle operator :math:`\hat{t}` entering the observable. For example,
in electronic structure calculations, this is the case for: the kinetic energy operator,
the nuclei Coulomb potential, or any other external fields included in the Hamiltonian.
On the other hand, :math:`\langle \alpha, \beta \vert \hat{v} \vert \gamma, \delta \rangle`
denotes the matrix element of the two-particle operator :math:`\hat{v}`, for example, the
Coulomb interaction between the electrons.
- The observable is built by adding the operators
:math:`\sum_{\alpha, \beta} t_{\alpha\beta}^{(i)}
\hat{c}_\alpha^\dagger \hat{c}_\beta` and
:math:`\frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
v_{\alpha\beta\gamma\delta}^{(i)}
\hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger \hat{c}_\gamma \hat{c}_\delta`.
- Second-quantized operators contributing to the
many-body observable must be represented using the `FermionOperator
<https://github.com/quantumlib/OpenFermion/blob/master/docs/
tutorials/intro_to_openfermion.ipynb>`_ data structure as implemented in OpenFermion.
See the functions :func:`~.one_particle` and :func:`~.two_particle` to build the
FermionOperator representations of one-particle and two-particle operators.
- The function uses tools of `OpenFermion <https://github.com/quantumlib/OpenFermion>`_
to map the resulting fermionic Hamiltonian to the basis of Pauli matrices via the
Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is converted
to a PennyLane observable by the function :func:`~.convert_observable`.
Args:
fermion_ops (list[FermionOperator]): list containing the FermionOperator data structures
representing the one-particle and/or two-particle operators entering the many-body
observable
init_term (float): Any quantity required to initialize the many-body observable. For
example, this can be used to pass the nuclear-nuclear repulsion energy :math:`V_{nn}`
which is typically included in the electronic Hamiltonian of molecules.
mapping (str): Specifies the fermion-to-qubit mapping. Input values can
be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
pennylane.Hamiltonian: the fermionic-to-qubit transformed observable
**Example**
>>> t = FermionOperator("0^ 0", 0.5) + FermionOperator("1^ 1", 0.25)
>>> v = FermionOperator("1^ 0^ 0 1", -0.15) + FermionOperator("2^ 0^ 2 0", 0.3)
>>> print(observable([t, v], mapping="jordan_wigner"))
(0.2625) [I0]
+ (-0.1375) [Z0]
+ (-0.0875) [Z1]
+ (-0.0375) [Z0 Z1]
+ (0.075) [Z2]
+ (-0.075) [Z0 Z2]
"""
if mapping.strip().lower() not in ("jordan_wigner", "bravyi_kitaev"):
raise TypeError(
f"The '{mapping}' transformation is not available. \n "
f"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'."
)
# Initialize the FermionOperator
mb_obs = openfermion.ops.FermionOperator("") * init_term
for ops in fermion_ops:
if not isinstance(ops, openfermion.ops.FermionOperator):
raise TypeError(
f"Elements in the lists are expected to be of type 'FermionOperator'; got {type(ops)}"
)
mb_obs += ops
# Map the fermionic operator to a qubit operator
if mapping.strip().lower() == "bravyi_kitaev":
return qml.qchem.convert.import_operator(
openfermion.transforms.bravyi_kitaev(mb_obs), wires=wires
)
return qml.qchem.convert.import_operator(
openfermion.transforms.jordan_wigner(mb_obs), wires=wires
)
|
5,787 |
def scale(
sample: npt.ArrayLike,
l_bounds: npt.ArrayLike,
u_bounds: npt.ArrayLike,
*,
reverse: bool = False
) -> np.ndarray:
r"""Sample scaling from unit hypercube to different bounds.
To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
with :math:`a` the lower bounds and :math:`b` the upper bounds.
The following transformation is used:
.. math::
(b - a) \cdot \text{sample} + a
Parameters
----------
sample : array_like (n, d)
Sample to scale.
l_bounds, u_bounds : array_like (d,)
Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed
data. If `reverse` is True, range of the original data to transform
to the unit hypercube.
reverse : bool, optional
Reverse the transformation from different bounds to the unit hypercube.
Default is False.
Returns
-------
sample : array_like (n, d)
Scaled sample.
Examples
--------
Transform 3 samples in the unit hypercube to bounds:
>>> from scipy.stats import qmc
>>> l_bounds = [-2, 0]
>>> u_bounds = [6, 5]
>>> sample = [[0.5 , 0.75],
... [0.5 , 0.5],
... [0.75, 0.25]]
>>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
>>> sample_scaled
array([[2. , 3.75],
[2. , 2.5 ],
[4. , 1.25]])
And convert back to the unit hypercube:
>>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True)
>>> sample_
array([[0.5 , 0.75],
[0.5 , 0.5 ],
[0.75, 0.25]])
"""
sample = np.asarray(sample)
lower = np.atleast_1d(l_bounds)
upper = np.atleast_1d(u_bounds)
# Checking bounds and sample
if not sample.ndim == 2:
raise ValueError('Sample is not a 2D array')
lower, upper = np.broadcast_arrays(lower, upper)
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent a < b')
if len(lower) != sample.shape[1]:
raise ValueError('Sample dimension is different than bounds dimension')
if not reverse:
# Checking that sample is within the hypercube
if not ((sample.max() <= 1.) and (sample.min() >= 0.)):
raise ValueError('Sample is not in unit hypercube')
return sample * (upper - lower) + lower
else:
# Checking that sample is within the bounds
if not (np.all(sample >= lower) and np.all(sample <= upper)):
raise ValueError('Sample is out of bounds')
return (sample - lower) / (upper - lower)
|
def scale(
sample: npt.ArrayLike,
l_bounds: npt.ArrayLike,
u_bounds: npt.ArrayLike,
*,
reverse: bool = False
) -> np.ndarray:
r"""Sample scaling from unit hypercube to different bounds.
To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
with :math:`a` the lower bounds and :math:`b` the upper bounds.
The following transformation is used:
.. math::
(b - a) \cdot \text{sample} + a
Parameters
----------
sample : array_like (n, d)
Sample to scale.
l_bounds, u_bounds : array_like (d,)
Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed
data. If `reverse` is True, range of the original data to transform
to the unit hypercube.
reverse : bool, optional
Reverse the transformation from different bounds to the unit hypercube.
Default is False.
Returns
-------
sample : array_like (n, d)
Scaled sample.
Examples
--------
Transform 3 samples in the unit hypercube to bounds:
>>> from scipy.stats import qmc
>>> l_bounds = [-2, 0]
>>> u_bounds = [6, 5]
>>> sample = [[0.5 , 0.75],
... [0.5 , 0.5],
... [0.75, 0.25]]
>>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
>>> sample_scaled
array([[2. , 3.75],
[2. , 2.5 ],
[4. , 1.25]])
And convert back to the unit hypercube:
>>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True)
>>> sample_
array([[0.5 , 0.75],
[0.5 , 0.5 ],
[0.75, 0.25]])
"""
sample = np.asarray(sample)
lower = np.atleast_1d(l_bounds)
upper = np.atleast_1d(u_bounds)
# Checking bounds and sample
if not sample.ndim == 2:
raise ValueError('Sample is not a 2D array')
lower, upper = np.broadcast_arrays(lower, upper)
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent a < b')
if len(lower) != sample.shape[1]:
raise ValueError('Sample dimension is different than bounds dimension')
if not reverse:
# Checking that sample is within the hypercube
if (sample.max() > 1.) or (sample.min() < 0.):
raise ValueError('Sample is not in unit hypercube')
return sample * (upper - lower) + lower
else:
# Checking that sample is within the bounds
if not (np.all(sample >= lower) and np.all(sample <= upper)):
raise ValueError('Sample is out of bounds')
return (sample - lower) / (upper - lower)
|
21,957 |
def seq_closurePhase(SLC_list, date12_list_all, ifgram_stack, ref_phase, n, box):
"""
Input parameters:
SLC_list : list of SLC dates
date12_list_all: date12 of all the interferograms stored in the ifgramstack file
ifgram_stack: stack file
refphase : reference phase
n : connection level of the closure phase
box : bounding box for the patch
Output: cp_w : stack of wrapped sequential closure phases of connection n
"""
cp_idx = []
NSLC = len(SLC_list)
for i in range(NSLC-n):
ifgram = []
flag = True
for j in range(n):
ifgram.append('{}_{}'.format(SLC_list[i+j],SLC_list[i+j+1]))
ifgram.append('{}_{}'.format(SLC_list[i],SLC_list[i+n]))
for ifgram_name in ifgram:
if ifgram_name not in date12_list_all:
flag = False # if missing an interferogram, we won't make the corresponding closure phase
if flag:
cp_idx.append([date12_list_all.index(ifgram[j]) for j in range(n+1)])
cp_idx = np.array(cp_idx, np.int16)
cp_idx = np.unique(cp_idx, axis = 0)
num_cp = len(cp_idx)
print('Number of closure measurements expected, ', len(SLC_list)-n)
print('Number of closure measurements found, ', num_cp)
if num_cp < len(SLC_list)-n:
print('Missing interferograms, abort')
raise Exception("Some interferograms are missing")
box_width = box[2] - box[0]
box_length = box[3] - box[1]
phase = readfile.read(ifgram_stack, box=box,print_msg=False)[0]
cp_w = np.zeros((num_cp, box_length, box_width), np.float32)
for i in range(num_cp):
cp0_w = np.zeros ((box_length, box_width), np.float32)
for j in range(n):
idx = cp_idx[i,j]
cp0_w = cp0_w + phase[idx,:,:] - ref_phase[idx]
idx = cp_idx[i,n]
cp0_w = cp0_w - (phase[idx,:,:]-ref_phase[idx])
cp_w[i,:,:] = np.angle(np.exp(1j*cp0_w))
return cp_w
|
def seq_closurePhase(SLC_list, date12_list_all, ifgram_stack, ref_phase, n, box):
"""
Input parameters:
SLC_list : list of SLC dates
date12_list_all: date12 of all the interferograms stored in the ifgramstack file
ifgram_stack: stack file
refphase : reference phase
n : connection level of the closure phase
box : bounding box for the patch
Output: cp_w : stack of wrapped sequential closure phases of connection n
"""
cp_idx = []
NSLC = len(SLC_list)
for i in range(NSLC-n):
ifgram = []
flag = True
for j in range(n):
ifgram.append('{}_{}'.format(SLC_list[i+j],SLC_list[i+j+1]))
ifgram.append('{}_{}'.format(SLC_list[i],SLC_list[i+n]))
for ifgram_name in ifgram:
if ifgram_name not in date12_list_all:
flag = False # if missing an interferogram, we won't make the corresponding closure phase
if flag:
cp_idx.append([date12_list_all.index(ifgram[j]) for j in range(n+1)])
cp_idx = np.array(cp_idx, np.int16)
cp_idx = np.unique(cp_idx, axis = 0)
num_cp = len(cp_idx)
print('Number of closure measurements expected, ', nslc-n)
print('Number of closure measurements found, ', num_cp)
if num_cp < len(SLC_list)-n:
print('Missing interferograms, abort')
raise Exception("Some interferograms are missing")
box_width = box[2] - box[0]
box_length = box[3] - box[1]
phase = readfile.read(ifgram_stack, box=box,print_msg=False)[0]
cp_w = np.zeros((num_cp, box_length, box_width), np.float32)
for i in range(num_cp):
cp0_w = np.zeros ((box_length, box_width), np.float32)
for j in range(n):
idx = cp_idx[i,j]
cp0_w = cp0_w + phase[idx,:,:] - ref_phase[idx]
idx = cp_idx[i,n]
cp0_w = cp0_w - (phase[idx,:,:]-ref_phase[idx])
cp_w[i,:,:] = np.angle(np.exp(1j*cp0_w))
return cp_w
|
5,439 |
def test_minion_reconnection_against_one_live_master(
event_listener,
salt_mm_failover_master_1,
salt_mm_failover_master_2,
salt_mm_failover_minion_1,
salt_mm_failover_minion_2,
mm_failover_master_1_salt_cli,
mm_failover_master_2_salt_cli,
):
"""
Test that mininons reconnect to a live master.
To work well with salt factories, the minions will reconnect to the master the were connected to in conftest.py.
We should keep this test directly after `test_failover_to_second_master`, to ensure all minions are initially
connected to the second master. A more thorough test.
"""
start_time = time.time()
with salt_mm_failover_minion_1.stopped(), salt_mm_failover_minion_2.stopped():
pass
event_patterns = [
(minion.id, "salt/minion/{}/start".format(minion.id))
for minion in (salt_mm_failover_minion_1, salt_mm_failover_minion_2)
]
event_listener.wait_for_events(
event_patterns,
timeout=salt_mm_failover_minion_1.config["master_alive_interval"] * 2,
after_time=start_time,
)
_run_echo_for_all_possibilities(
[mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],
[salt_mm_failover_minion_1, salt_mm_failover_minion_2],
)
# We are getting the return events associated with each minion
minion_1_pattern = "salt/job/*/ret/{}".format(salt_mm_failover_minion_1.id)
minion_2_pattern = "salt/job/*/ret/{}".format(salt_mm_failover_minion_2.id)
minion_1_ret_events = event_listener.get_events(
[
(salt_mm_failover_master_1.id, minion_1_pattern),
(salt_mm_failover_master_2.id, minion_1_pattern),
],
after_time=start_time,
)
minion_2_ret_events = event_listener.get_events(
[
(salt_mm_failover_master_1.id, minion_2_pattern),
(salt_mm_failover_master_2.id, minion_2_pattern),
],
after_time=start_time,
)
# Each minion should only return to one master
assert len(minion_1_ret_events) == 1
assert len(minion_2_ret_events) == 1
|
def test_minion_reconnection_against_one_live_master(
event_listener,
salt_mm_failover_master_1,
salt_mm_failover_master_2,
salt_mm_failover_minion_1,
salt_mm_failover_minion_2,
mm_failover_master_1_salt_cli,
mm_failover_master_2_salt_cli,
):
"""
Test that mininons reconnect to a live master.
To work well with salt factories, the minions will reconnect to the master they were connected to in conftest.py.
We should keep this test directly after `test_failover_to_second_master`, to ensure all minions are initially
connected to the second master. A more thorough test.
"""
start_time = time.time()
with salt_mm_failover_minion_1.stopped(), salt_mm_failover_minion_2.stopped():
pass
event_patterns = [
(minion.id, "salt/minion/{}/start".format(minion.id))
for minion in (salt_mm_failover_minion_1, salt_mm_failover_minion_2)
]
event_listener.wait_for_events(
event_patterns,
timeout=salt_mm_failover_minion_1.config["master_alive_interval"] * 2,
after_time=start_time,
)
_run_echo_for_all_possibilities(
[mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],
[salt_mm_failover_minion_1, salt_mm_failover_minion_2],
)
# We are getting the return events associated with each minion
minion_1_pattern = "salt/job/*/ret/{}".format(salt_mm_failover_minion_1.id)
minion_2_pattern = "salt/job/*/ret/{}".format(salt_mm_failover_minion_2.id)
minion_1_ret_events = event_listener.get_events(
[
(salt_mm_failover_master_1.id, minion_1_pattern),
(salt_mm_failover_master_2.id, minion_1_pattern),
],
after_time=start_time,
)
minion_2_ret_events = event_listener.get_events(
[
(salt_mm_failover_master_1.id, minion_2_pattern),
(salt_mm_failover_master_2.id, minion_2_pattern),
],
after_time=start_time,
)
# Each minion should only return to one master
assert len(minion_1_ret_events) == 1
assert len(minion_2_ret_events) == 1
|
31,645 |
def custom_build_iterator(client: Client, feed: Dict, limit: int = 0, **kwargs) -> List:
"""
This function replace the build iterator function in JsonFeedApiModule in order to enable paging specific to api.
Paginf is done using
"""
url = feed.get('url', client.url)
fetch_time = feed.get('fetch_time')
start_date, end_date = parse_date_range(fetch_time, utc=True, to_timestamp=True)
integration_context = get_integration_context()
last_fetch = integration_context.get(f"{feed.get('indicator_type')}_fetch_time")
# sorting and count are used for paging purposes
params = {'lastUpdatedFrom': last_fetch if last_fetch else start_date, 'sort': 'earliest', 'count': '100'}
result: List[Dict] = []
should_continue = True
total_count = 0
request_headers = {'user-agent': USER_AGENT}
while should_continue:
r = requests.get(
url=url,
verify=client.verify,
auth=client.auth,
cert=client.cert,
headers=request_headers,
params=params,
**kwargs
)
try:
r.raise_for_status()
data = r.json()
current_result = jmespath.search(expression=feed.get('extractor'), data=data)
if current_result:
if not total_count:
total_count = limit if limit else data.get('actorTotalCount')
result = result + current_result
params['from'] = result[-1].get('activeFrom')
# gets next page reference and handles paging.
should_continue = total_count > len(result)
except ValueError as VE:
raise ValueError(f'Could not parse returned data to Json. \n\nError massage: {VE}')
except requests.exceptions.ConnectTimeout as exception:
err_msg = 'Connection Timeout Error - potential reasons might be that the Server URL parameter' \
' is incorrect or that the Server is not accessible from your host.'
raise DemistoException(err_msg, exception)
except requests.exceptions.SSLError as exception:
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ProxyError as exception:
err_msg = 'Proxy Error - if the \'Use system proxy\' checkbox in the integration configuration is' \
' selected, try clearing the checkbox.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ConnectionError as exception:
# Get originating Exception in Exception chain
error_class = str(exception.__class__)
err_type = '<' + error_class[error_class.find('\'') + 1: error_class.rfind('\'')] + '>'
err_msg = 'Verify that the server URL parameter' \
' is correct and that you have access to the server from your host.' \
'\nError Type: {}\nError Number: [{}]\nMessage: {}\n' \
.format(err_type, exception.errno, exception.strerror)
raise DemistoException(err_msg, exception)
set_integration_context({f"{feed.get('indicator_type')}_fetch_time": str(end_date)})
return result
|
def custom_build_iterator(client: Client, feed: Dict, limit: int = 0, **kwargs) -> List:
"""
This function replace the build iterator function in JsonFeedApiModule in order to enable paging specific to api.
Paginf is done using
"""
url = feed.get('url', client.url)
fetch_time = feed.get('fetch_time')
start_date, end_date = parse_date_range(fetch_time, utc=True, to_timestamp=True)
integration_context = get_integration_context()
last_fetch = integration_context.get(f"{feed.get('indicator_type')}_fetch_time")
# sorting and count are used for paging purposes
params = {'lastUpdatedFrom': last_fetch if last_fetch else start_date, 'sort': 'earliest', 'count': '100'}
result: List[Dict] = []
should_continue = True
total_count = 0
request_headers = {'user-agent': USER_AGENT}
while should_continue:
r = requests.get(
url=url,
verify=client.verify,
auth=client.auth,
cert=client.cert,
headers=client.headers,
params=params,
**kwargs
)
try:
r.raise_for_status()
data = r.json()
current_result = jmespath.search(expression=feed.get('extractor'), data=data)
if current_result:
if not total_count:
total_count = limit if limit else data.get('actorTotalCount')
result = result + current_result
params['from'] = result[-1].get('activeFrom')
# gets next page reference and handles paging.
should_continue = total_count > len(result)
except ValueError as VE:
raise ValueError(f'Could not parse returned data to Json. \n\nError massage: {VE}')
except requests.exceptions.ConnectTimeout as exception:
err_msg = 'Connection Timeout Error - potential reasons might be that the Server URL parameter' \
' is incorrect or that the Server is not accessible from your host.'
raise DemistoException(err_msg, exception)
except requests.exceptions.SSLError as exception:
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ProxyError as exception:
err_msg = 'Proxy Error - if the \'Use system proxy\' checkbox in the integration configuration is' \
' selected, try clearing the checkbox.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ConnectionError as exception:
# Get originating Exception in Exception chain
error_class = str(exception.__class__)
err_type = '<' + error_class[error_class.find('\'') + 1: error_class.rfind('\'')] + '>'
err_msg = 'Verify that the server URL parameter' \
' is correct and that you have access to the server from your host.' \
'\nError Type: {}\nError Number: [{}]\nMessage: {}\n' \
.format(err_type, exception.errno, exception.strerror)
raise DemistoException(err_msg, exception)
set_integration_context({f"{feed.get('indicator_type')}_fetch_time": str(end_date)})
return result
|
24,424 |
def construct_pytest_options(
check,
verbose=0,
color=None,
enter_pdb=False,
debug=False,
bench=False,
latest_metrics=False,
coverage=False,
junit=False,
marker='',
test_filter='',
pytest_args='',
e2e=False,
ddtrace=False,
):
# Prevent no verbosity
pytest_options = f'--verbosity={verbose or 1}'
if not verbose:
pytest_options += ' --tb=short'
if color is not None:
pytest_options += ' --color=yes' if color else ' --color=no'
if enter_pdb:
# Drop to PDB on first failure, then end test session
pytest_options += ' --pdb -x'
if debug:
pytest_options += ' --log-level=debug -s'
if bench:
pytest_options += ' --benchmark-only --benchmark-cprofile=tottime'
else:
pytest_options += ' --benchmark-skip'
if latest_metrics:
pytest_options += ' --run-latest-metrics'
if ddtrace:
pytest_options += ' --ddtrace'
if junit:
test_group = 'e2e' if e2e else 'unit'
pytest_options += (
# junit report file must contain the env name to handle multiple envs
# $TOX_ENV_NAME is a tox injected variable
# See https://tox.readthedocs.io/en/latest/config.html#injected-environment-variables
f' --junit-xml=.junit/test-{test_group}-$TOX_ENV_NAME.xml'
# Junit test results class prefix
f' --junit-prefix={check}'
)
if coverage:
pytest_options += (
# Located at the root of each repo
' --cov-config=../.coveragerc'
# Use the same .coverage file to aggregate results
' --cov-append'
# Show no coverage report until the end
' --cov-report='
# This will be formatted to the appropriate coverage paths for each package
' {}'
)
if marker:
pytest_options += f' -m "{marker}"'
if test_filter:
pytest_options += f' -k "{test_filter}"'
if pytest_args:
pytest_options += f' {pytest_args}'
return pytest_options
|
def construct_pytest_options(
check,
verbose=0,
color=None,
enter_pdb=False,
debug=False,
bench=False,
latest_metrics=False,
coverage=False,
junit=False,
marker='',
test_filter='',
pytest_args='',
e2e=False,
ddtrace=False,
):
# Prevent no verbosity
pytest_options = f'--verbosity={verbose or 1}'
if not verbose:
pytest_options += ' --tb=short'
if color is not None:
pytest_options += ' --color=yes' if color else ' --color=no'
if enter_pdb:
# Drop to PDB on first failure, then end test session
pytest_options += ' --pdb -x'
if debug:
pytest_options += ' --log-level=debug -s'
if bench:
pytest_options += ' --benchmark-only --benchmark-cprofile=tottime'
else:
pytest_options += ' --benchmark-skip'
if latest:
pytest_options += ' --run-latest-metrics'
if ddtrace:
pytest_options += ' --ddtrace'
if junit:
test_group = 'e2e' if e2e else 'unit'
pytest_options += (
# junit report file must contain the env name to handle multiple envs
# $TOX_ENV_NAME is a tox injected variable
# See https://tox.readthedocs.io/en/latest/config.html#injected-environment-variables
f' --junit-xml=.junit/test-{test_group}-$TOX_ENV_NAME.xml'
# Junit test results class prefix
f' --junit-prefix={check}'
)
if coverage:
pytest_options += (
# Located at the root of each repo
' --cov-config=../.coveragerc'
# Use the same .coverage file to aggregate results
' --cov-append'
# Show no coverage report until the end
' --cov-report='
# This will be formatted to the appropriate coverage paths for each package
' {}'
)
if marker:
pytest_options += f' -m "{marker}"'
if test_filter:
pytest_options += f' -k "{test_filter}"'
if pytest_args:
pytest_options += f' {pytest_args}'
return pytest_options
|
59,672 |
def test_project_output_filename(dataframe):
"""
Run project by passing in an ASCII text file as input.
"""
with GMTTempFile() as tmpfile:
output = project(
data=dataframe,
center=[0, -1],
azimuth=45,
flat_earth=True,
outfile=tmpfile.name,
)
assert output is None # check that output is None since outfile is set
assert os.path.exists(path=tmpfile.name) # check that outfile exists at path
output = pd.read_csv(tmpfile.name, sep="\t", header=None)
assert output.shape == (1, 6)
npt.assert_allclose(
output.iloc[0],
[0.000000, 0.000000, 0.707107, 0.707107, 0.500000, -0.500000],
rtol=1e-5,
)
|
def test_project_output_filename(dataframe):
"""
Run project by passing in a pandas.DataFrame, and output to an ASCII txt
file.
"""
with GMTTempFile() as tmpfile:
output = project(
data=dataframe,
center=[0, -1],
azimuth=45,
flat_earth=True,
outfile=tmpfile.name,
)
assert output is None # check that output is None since outfile is set
assert os.path.exists(path=tmpfile.name) # check that outfile exists at path
output = pd.read_csv(tmpfile.name, sep="\t", header=None)
assert output.shape == (1, 6)
npt.assert_allclose(
output.iloc[0],
[0.000000, 0.000000, 0.707107, 0.707107, 0.500000, -0.500000],
rtol=1e-5,
)
|
35,026 |
def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True):
"""Select the best implementation from the op strategy.
If use_autotvm is True, it'll first try to find the best implementation
based on AutoTVM profile results. If no AutoTVM profile result is found,
it'll choose the implementation with highest plevel.
If use_autotvm is False, it'll directly choose the implementation with
highest plevel.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
use_autotvm : bool
Whether query AutoTVM to pick the best.
Returns
-------
ret : tuple(relay.op.OpImplementation, List[tvm.te.Tensor])
The best op implementation and the corresponding output tensors.
"""
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)
# Disable autotvm if auto_scheduler is enabled.
# (i.e., always return the implementation with the highest priority for auto-scheduler).
if PassContext.current().config.get("relay.backend.use_auto_scheduler", False):
use_autotvm = False
# If not use autotvm, always return the implementation with the highest priority
if not use_autotvm:
logger.info(
"Using %s for %s based on highest priority (%d)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
outs = best_plevel_impl.compute(attrs, inputs, out_type)
return best_plevel_impl, outs
# Otherwise, try autotvm templates
outputs = {}
workloads = {}
best_autotvm_impl = None
best_cfg = None
dispatch_ctx = autotvm.task.DispatchContext.current
old_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
for impl in all_impls:
outs = impl.compute(attrs, inputs, out_type)
outputs[impl] = outs
workload = autotvm.task.get_workload(outs)
workloads[impl] = workload
if workload is None:
# Not an AutoTVM tunable implementation
continue
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback:
# Skip fallback config
continue
logger.info("Implementation %s for %s has cost %.2e", impl.name, op.name, cfg.cost)
if best_cfg is None or best_cfg.cost > cfg.cost:
best_autotvm_impl = impl
best_cfg = cfg
autotvm.GLOBAL_SCOPE.silent = old_silent
if best_autotvm_impl:
# The best autotvm implementation definitely doesn't use fallback config
logger.info(
"Using %s for %s based on lowest cost (%.2e)",
best_autotvm_impl.name,
op.name,
best_cfg.cost,
)
return best_autotvm_impl, outputs[best_autotvm_impl]
# Use the implementation with highest plevel
if workloads[best_plevel_impl] is not None:
msg = (
"Cannot find config for target=%s, workload=%s. A fallback configuration "
"is used, which may bring great performance regression."
% (target, workloads[best_plevel_impl])
)
if (
not autotvm.env.GLOBAL_SCOPE.silent
and msg not in autotvm.task.DispatchContext.warning_messages
):
autotvm.task.DispatchContext.warning_messages.add(msg)
if autotvm_logger.level == logging.DEBUG: # only print if in debug mode
autotvm_logger.warning(msg)
else:
global _first_warning
if _first_warning == True:
_first_warning = False
info_msg = "One or more operators have not been tuned. Please tune your model "
"for better performance or use DEBUG logging level to see more details."
autotvm_logger.warning(info_msg)
logger.info(
"Using %s for %s based on highest priority (%s)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
return best_plevel_impl, outputs[best_plevel_impl]
|
def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True):
"""Select the best implementation from the op strategy.
If use_autotvm is True, it'll first try to find the best implementation
based on AutoTVM profile results. If no AutoTVM profile result is found,
it'll choose the implementation with highest plevel.
If use_autotvm is False, it'll directly choose the implementation with
highest plevel.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
use_autotvm : bool
Whether query AutoTVM to pick the best.
Returns
-------
ret : tuple(relay.op.OpImplementation, List[tvm.te.Tensor])
The best op implementation and the corresponding output tensors.
"""
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)
# Disable autotvm if auto_scheduler is enabled.
# (i.e., always return the implementation with the highest priority for auto-scheduler).
if PassContext.current().config.get("relay.backend.use_auto_scheduler", False):
use_autotvm = False
# If not use autotvm, always return the implementation with the highest priority
if not use_autotvm:
logger.info(
"Using %s for %s based on highest priority (%d)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
outs = best_plevel_impl.compute(attrs, inputs, out_type)
return best_plevel_impl, outs
# Otherwise, try autotvm templates
outputs = {}
workloads = {}
best_autotvm_impl = None
best_cfg = None
dispatch_ctx = autotvm.task.DispatchContext.current
old_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
for impl in all_impls:
outs = impl.compute(attrs, inputs, out_type)
outputs[impl] = outs
workload = autotvm.task.get_workload(outs)
workloads[impl] = workload
if workload is None:
# Not an AutoTVM tunable implementation
continue
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback:
# Skip fallback config
continue
logger.info("Implementation %s for %s has cost %.2e", impl.name, op.name, cfg.cost)
if best_cfg is None or best_cfg.cost > cfg.cost:
best_autotvm_impl = impl
best_cfg = cfg
autotvm.GLOBAL_SCOPE.silent = old_silent
if best_autotvm_impl:
# The best autotvm implementation definitely doesn't use fallback config
logger.info(
"Using %s for %s based on lowest cost (%.2e)",
best_autotvm_impl.name,
op.name,
best_cfg.cost,
)
return best_autotvm_impl, outputs[best_autotvm_impl]
# Use the implementation with highest plevel
if workloads[best_plevel_impl] is not None:
msg = (
"Cannot find config for target=%s, workload=%s. A fallback configuration "
"is used, which may bring great performance regression."
% (target, workloads[best_plevel_impl])
)
if (
not autotvm.env.GLOBAL_SCOPE.silent
and msg not in autotvm.task.DispatchContext.warning_messages
):
autotvm.task.DispatchContext.warning_messages.add(msg)
if autotvm_logger.level == logging.DEBUG: # only print if in debug mode
autotvm_logger.warning(msg)
else:
global _first_warning
if _first_warning:
_first_warning = False
info_msg = "One or more operators have not been tuned. Please tune your model "
"for better performance or use DEBUG logging level to see more details."
autotvm_logger.warning(info_msg)
logger.info(
"Using %s for %s based on highest priority (%s)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
return best_plevel_impl, outputs[best_plevel_impl]
|
57,997 |
def test_azure_firewall_get_command(requests_mock):
"""
Scenario: Retrieve azure firewall information.
Given:
- User has provided valid credentials.
When:
- azure-firewall-get called.
Then:
- Ensure number of items is correct.
- Ensure outputs prefix is correct.
- Ensure a sample value from the API matches what is generated in the context.
"""
from AzureFirewall import azure_firewall_get_command
authorization_mock(requests_mock)
client = get_client_mock()
firewall_name = 'xsoar-firewall'
url = f'{BASE_URL}/azureFirewalls/{firewall_name}'
mock_response = json.loads(load_mock_response('test_data/firewall/firewall_get.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(firewall_names=firewall_name)
result = azure_firewall_get_command(client, command_arguments)
assert len(result[0].outputs) == 1
assert result[0].outputs_prefix == 'AzureFirewall.Firewall'
assert result[0].outputs[0].get('name') == firewall_name
|
def test_azure_firewall_get_command(requests_mock):
"""
Scenario: Retrieve azure firewall information.
Given:
- User has provided valid credentials.
When:
- azure-firewall-get called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the firewall name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_get_command
authorization_mock(requests_mock)
client = get_client_mock()
firewall_name = 'xsoar-firewall'
url = f'{BASE_URL}/azureFirewalls/{firewall_name}'
mock_response = json.loads(load_mock_response('test_data/firewall/firewall_get.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(firewall_names=firewall_name)
result = azure_firewall_get_command(client, command_arguments)
assert len(result[0].outputs) == 1
assert result[0].outputs_prefix == 'AzureFirewall.Firewall'
assert result[0].outputs[0].get('name') == firewall_name
|
5,446 |
def networks():
"""
Provides a list of connected networks
"""
nets = salt.utils.network.ip_networks()
return {"networks": nets}
|
def networks():
"""
Provides a list of connected networks
"""
nets = salt.utils.network.ip_networks()
return {"networks": nets}
|
17,720 |
def load_file(filename,relative_to_module=None,compound=None,
coords_only=False,rigid=False,backend=None,
infer_hierarchy=True,**kwargs):
""" Helper function to load from files
Loading and converting a topology to mb.Compound from file. User can specify
prefered backend, or else it will be handle by default prefered backend.
Parameters
----------
file_name : str
Name of the file from which to load atom and bond information from
relative_to_module : str, optional, default=None
Instead of looking in the current working directory,
look for the file where this module is defined. This
is typically used in Compound classes that will be
instantiated from a different directory (such as the
Compounds located in mbuid.lib
compound : mb.Compound, optional, default=None
Existing compound to load atom and bond information from.
New structure will be added to the existing compound
as a sub compound.
coords_only : bool, optional, default=False
Only load the coordinates into an existing compound.
rigid : bool, optional, default=False
Treat the compound as a rigid body
backend : str, optional, default=None
Backend used to load structure from file. if not specified, a default
backend (extension specific) will be used.
infer_hierarchy : bool, optional, default=True
If True, infer hierarchy from chains and residues
**kwargs : keyword arguments
Keyword arguments passed to mdTraj for loading
Return
------
compound : mb.Compound
"""
# Initialize a compound if none is given
if not compound:
compound = mb.Compound()
# Need to come up with a different dict structure
default_backend = {
'.json':'internal',
'.xyz':'internal',
'.sdf':'pybel',
'.pdb':'mdtraj',
}
# Handle mbuild *.py files containing a class that wraps a structure file
# in its own folder. E.g., you build a system from ~/foo.py and it imports
# from ~/bar/baz.py where baz.py loads ~/bar/baz.pdb.
if relative_to_module:
script_path = os.path.realpath(
sys.modules[relative_to_module].__file__)
file_dir = os.path.dirname(script_path)
filename = os.path.join(file_dir, filename)
# Handle the case of a xyz and json file, which must use an internal reader
extension = os.path.splitext(filename)[-1]
if not backend:
try:
# Try matching backend based on extension
backend = default_backend[extension]
except KeyError:
# Else use default backend
backend = 'mdtraj'
# First check internal readers
if backend == 'internal':
# Handle json format
if extension == '.json':
# This doesn't seem to handle the case when compound is given
compound = compound_from_json(filename)
return compound
# Handle xyz file
if extension == '.xyz' and not 'top' in kwargs:
if coords_only:
tmp = read_xyz(filename)
if tmp.n_particles != compound.n_particles:
raise ValueError('Number of atoms in {filename}'
'does not match {compound}'.format(**locals()))
ref_and_compound = zip(tmp._particles(include_ports=False),
compound.particles(include_ports=False))
for ref_particle, particle in ref_and_compound:
particle.pos = ref_particle.pos
else:
compound = read_xyz(filename, compound=compound)
elif extension == '.xyz' and 'top' in kwargs:
backend = 'mdtraj'
# Then pybel reader
elif backend == 'pybel':
pybel = import_('pybel')
if extension == '.sdf':
pybel_mol = pybel.readfile('sdf', filename)
# pybel returns a generator, so we grab the first molecule of a
# list of len 1.
# Raise ValueError if there are more molecules
pybel_mol = [i for i in pybel_mol]
if len(pybel_mol) == 1:
compound = from_pybel(
pybel_mol=pybel_mol[0],
compound=compound,
coords_only=coords_only,
infer_hierarchy=infer_hierarchy)
else:
raise ValueError('More tahn one pybel molecule in file,'
'more than one pybel molecule is not supported')
# text file detected, asssume contain smiles string
elif extension == '.txt':
# Fail-safe measure
compound = load_smiles(filename, compound)
# Then parmed reader
elif backend == 'parmed':
warn('Using parmed reader. Bonds may be inferred '
'from inter-particle distances and standard '
'residue templates')
structure = pmd.load_file(filename, structure=True, **kwargs)
compound = from_parmed(
structure=structure,
compound=compound,
coords_only=coords_only,
infer_hierarchy=infer_hierarchy)
# Then mdtraj reader
elif backend == 'mdtraj':
traj = md.load(filename, **kwargs)
compound = from_trajectory(
traj=traj,
compound=compound,
frame=-1,
coords_only=coords_only,
infer_hierarchy=infer_hierarchy)
if rigid:
compound.label_rigid_bodies()
# Note: 'Input not supported' error will be handled
# by the corresponding backend
return compound
|
def load_file(filename,relative_to_module=None,compound=None,
coords_only=False,rigid=False,backend=None,
infer_hierarchy=True,**kwargs):
""" Helper function to load from files
Loading and converting a topology to mb.Compound from file. User can specify
a prefered backend, or else it will be handled by default a backend based on file extension.
Parameters
----------
file_name : str
Name of the file from which to load atom and bond information from
relative_to_module : str, optional, default=None
Instead of looking in the current working directory,
look for the file where this module is defined. This
is typically used in Compound classes that will be
instantiated from a different directory (such as the
Compounds located in mbuid.lib
compound : mb.Compound, optional, default=None
Existing compound to load atom and bond information from.
New structure will be added to the existing compound
as a sub compound.
coords_only : bool, optional, default=False
Only load the coordinates into an existing compound.
rigid : bool, optional, default=False
Treat the compound as a rigid body
backend : str, optional, default=None
Backend used to load structure from file. if not specified, a default
backend (extension specific) will be used.
infer_hierarchy : bool, optional, default=True
If True, infer hierarchy from chains and residues
**kwargs : keyword arguments
Keyword arguments passed to mdTraj for loading
Return
------
compound : mb.Compound
"""
# Initialize a compound if none is given
if not compound:
compound = mb.Compound()
# Need to come up with a different dict structure
default_backend = {
'.json':'internal',
'.xyz':'internal',
'.sdf':'pybel',
'.pdb':'mdtraj',
}
# Handle mbuild *.py files containing a class that wraps a structure file
# in its own folder. E.g., you build a system from ~/foo.py and it imports
# from ~/bar/baz.py where baz.py loads ~/bar/baz.pdb.
if relative_to_module:
script_path = os.path.realpath(
sys.modules[relative_to_module].__file__)
file_dir = os.path.dirname(script_path)
filename = os.path.join(file_dir, filename)
# Handle the case of a xyz and json file, which must use an internal reader
extension = os.path.splitext(filename)[-1]
if not backend:
try:
# Try matching backend based on extension
backend = default_backend[extension]
except KeyError:
# Else use default backend
backend = 'mdtraj'
# First check internal readers
if backend == 'internal':
# Handle json format
if extension == '.json':
# This doesn't seem to handle the case when compound is given
compound = compound_from_json(filename)
return compound
# Handle xyz file
if extension == '.xyz' and not 'top' in kwargs:
if coords_only:
tmp = read_xyz(filename)
if tmp.n_particles != compound.n_particles:
raise ValueError('Number of atoms in {filename}'
'does not match {compound}'.format(**locals()))
ref_and_compound = zip(tmp._particles(include_ports=False),
compound.particles(include_ports=False))
for ref_particle, particle in ref_and_compound:
particle.pos = ref_particle.pos
else:
compound = read_xyz(filename, compound=compound)
elif extension == '.xyz' and 'top' in kwargs:
backend = 'mdtraj'
# Then pybel reader
elif backend == 'pybel':
pybel = import_('pybel')
if extension == '.sdf':
pybel_mol = pybel.readfile('sdf', filename)
# pybel returns a generator, so we grab the first molecule of a
# list of len 1.
# Raise ValueError if there are more molecules
pybel_mol = [i for i in pybel_mol]
if len(pybel_mol) == 1:
compound = from_pybel(
pybel_mol=pybel_mol[0],
compound=compound,
coords_only=coords_only,
infer_hierarchy=infer_hierarchy)
else:
raise ValueError('More tahn one pybel molecule in file,'
'more than one pybel molecule is not supported')
# text file detected, asssume contain smiles string
elif extension == '.txt':
# Fail-safe measure
compound = load_smiles(filename, compound)
# Then parmed reader
elif backend == 'parmed':
warn('Using parmed reader. Bonds may be inferred '
'from inter-particle distances and standard '
'residue templates')
structure = pmd.load_file(filename, structure=True, **kwargs)
compound = from_parmed(
structure=structure,
compound=compound,
coords_only=coords_only,
infer_hierarchy=infer_hierarchy)
# Then mdtraj reader
elif backend == 'mdtraj':
traj = md.load(filename, **kwargs)
compound = from_trajectory(
traj=traj,
compound=compound,
frame=-1,
coords_only=coords_only,
infer_hierarchy=infer_hierarchy)
if rigid:
compound.label_rigid_bodies()
# Note: 'Input not supported' error will be handled
# by the corresponding backend
return compound
|
52,129 |
def _update_labels(sm, deployment):
"""
Updating the deployment's labels.
This function replaces the existing deployment's lables with the new labels
that were passed in the request.
If a new label already exists, it won't be created again.
If an existing label is not in the new labels list, it will be deleted.
"""
new_labels = _get_labels(request.json)
if new_labels is None:
return
rm = get_resource_manager()
new_labels_set = set(new_labels)
existing_labels = sm.list(
models.DeploymentLabel,
filters={'_deployment_fk': deployment._storage_id}
)
existing_labels_tup = set(
(label.key, label.value) for label in existing_labels)
labels_to_create = new_labels_set - existing_labels_tup
raw_labels_to_delete = existing_labels_tup - new_labels_set
labels_to_delete = [
label for label in existing_labels if
(label.key, label.value) in raw_labels_to_delete]
for label in labels_to_delete:
sm.delete(label)
rm.create_deployment_labels(deployment, labels_to_create)
|
def _update_labels(sm, deployment):
"""
Updating the deployment's labels.
This function replaces the existing deployment's lables with the new labels
that were passed in the request.
If a new label already exists, it won't be created again.
If an existing label is not in the new labels list, it will be deleted.
"""
new_labels = _get_labels(request.json)
if new_labels is None:
return
rm = get_resource_manager()
new_labels_set = set(new_labels)
existing_labels = sm.list(
models.DeploymentLabel,
filters={'_deployment_fk': deployment._storage_id}
)
existing_labels_tup = set(
(label.key, label.value) for label in existing_labels)
labels_to_create = new_labels_set - existing_labels_tup
for label in existing_labels:
if (label.key, label.value) not in new_labels_set:
sm.delete(label)
for label in labels_to_delete:
sm.delete(label)
rm.create_deployment_labels(deployment, labels_to_create)
|
22,663 |
def rst2md(text, heading_levels):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the Jupyter notebooks
Parameters
----------
text: str
RST input to be converted to MD
heading_levels: dict
Mapping of heading style ``(over_char, under_char)`` to heading level.
Note that ``over_char`` is `None` when only underline is present.
"""
# Characters recommend for use with headings
# https://docutils.readthedocs.io/en/sphinx-docs/user/rst/quickstart.html#sections
adornment_characters = "=`:.'\"~^_*+#<>-"
headings = re.compile(
r'(?P<pre>\A|^[ \t]*\n)' # Start of string or blank line above
r'(?:(?P<over>[{0}])(?P=over)*\n[ \t]*)?' # Over, with heading space
r'(?P<heading>\S[^\n]*)\n' # Heading itself
r'(?P<under>(?(over)(?P=over)|[{0}]))(?P=under)*$' # if over make same
r''.format(adornment_characters),
flags=re.M)
text = re.sub(
headings,
lambda match: '{1}{0} {2}'.format(
'#'*heading_levels[match.group('over', 'under')],
*match.group('pre', 'heading')),
text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'\begin{{align}}{0}\end{{align}}'.format(
match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+?)`', re.DOTALL)
text = re.sub(inline_math, r'$\1$', text)
directives = ('warning', 'note')
for directive in directives:
directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^ .+)*)'
% directive, flags=re.M)
text = re.sub(directive_re,
partial(directive_fun, directive=directive), text)
links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M)
text = re.sub(links, '', text)
refs = re.compile(r':ref:`')
text = re.sub(refs, '`', text)
contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n',
flags=re.M)
text = re.sub(contents, '', text)
images = re.compile(
r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*',
flags=re.M)
text = re.sub(
images, lambda match: '\n'.format(
match.group(1).strip(), (match.group(2) or '').strip()), text)
return text
|
def rst2md(text, heading_levels):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the Jupyter notebooks
Parameters
----------
text: str
RST input to be converted to MD
heading_levels: dict
Mapping of heading style ``(over_char, under_char)`` to heading level.
Note that ``over_char`` is `None` when only underline is present.
"""
# Characters recommended for use with headings
# https://docutils.readthedocs.io/en/sphinx-docs/user/rst/quickstart.html#sections
adornment_characters = "=`:.'\"~^_*+#<>-"
headings = re.compile(
r'(?P<pre>\A|^[ \t]*\n)' # Start of string or blank line above
r'(?:(?P<over>[{0}])(?P=over)*\n[ \t]*)?' # Over, with heading space
r'(?P<heading>\S[^\n]*)\n' # Heading itself
r'(?P<under>(?(over)(?P=over)|[{0}]))(?P=under)*$' # if over make same
r''.format(adornment_characters),
flags=re.M)
text = re.sub(
headings,
lambda match: '{1}{0} {2}'.format(
'#'*heading_levels[match.group('over', 'under')],
*match.group('pre', 'heading')),
text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'\begin{{align}}{0}\end{{align}}'.format(
match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+?)`', re.DOTALL)
text = re.sub(inline_math, r'$\1$', text)
directives = ('warning', 'note')
for directive in directives:
directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^ .+)*)'
% directive, flags=re.M)
text = re.sub(directive_re,
partial(directive_fun, directive=directive), text)
links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M)
text = re.sub(links, '', text)
refs = re.compile(r':ref:`')
text = re.sub(refs, '`', text)
contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n',
flags=re.M)
text = re.sub(contents, '', text)
images = re.compile(
r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*',
flags=re.M)
text = re.sub(
images, lambda match: '\n'.format(
match.group(1).strip(), (match.group(2) or '').strip()), text)
return text
|
27,937 |
def generate_matrix(shape, dtype=float, **kwargs):
r"""Generates a random matrix with given singular values.
This function generates a random NumPy matrix (or a set of matrices) that
has specified singular values. It can be used to generate the inputs for a
test that can be instable when the input value behaves bad.
Notation: denote the shape of the generated array by :math:`(B..., M, N)`,
and :math:`K = min\{M, N\}`. :math:`B...` may be an empty sequence.
Args:
shape (tuple of int): Shape of the generated array, i.e.,
:math:`(B..., M, N)`.
dtype: Dtype of the generated array.
singular_values (array-like): Singular values of the generated
matrices. It must be broadcastable to shape :math:`(B..., K)`.
"""
singular_values, = argument.parse_kwargs(
kwargs, ('singular_values', None),
)
if len(shape) <= 1:
raise ValueError(
'shpae {} is invalid for matrices: too few axes'.format(shape)
)
k_shape = shape[:-2] + (min(shape[-2:]),)
# TODO(beam2d): consider supporting integer/boolean matrices
dtype = numpy.dtype(dtype)
if dtype.kind not in 'fc':
raise ValueError('dtype {} is not supported'.format(dtype))
if singular_values is None:
raise TypeError('singular_values is not given')
singular_values = numpy.asarray(singular_values)
if (singular_values < 0).any():
raise ValueError('negative singular value is given')
# Generate random matrices with given singular values. We simply generate
# orthogonal vectors using SVD on random matrices and then combine them
# with the given singular values.
a = numpy.random.randn(*shape)
if dtype.kind == 'c':
a = a + 1j * numpy.random.randn(*shape)
u, _, vh = numpy.linalg.svd(a, full_matrices=False)
a = numpy.einsum('...ik,...k,...kj->...ij', u, singular_values, vh)
return a.astype(dtype)
|
def generate_matrix(shape, dtype=float, **kwargs):
r"""Generates a random matrix with given singular values.
This function generates a random NumPy matrix (or a set of matrices) that
has specified singular values. It can be used to generate the inputs for a
test that can be instable when the input value behaves bad.
Notation: denote the shape of the generated array by :math:`(B..., M, N)`,
and :math:`K = min\{M, N\}`. :math:`B...` may be an empty sequence.
Args:
shape (tuple of int): Shape of the generated array, i.e.,
:math:`(B..., M, N)`.
dtype: Dtype of the generated array.
singular_values (array-like): Singular values of the generated
matrices. It must be broadcastable to shape :math:`(B..., K)`.
"""
singular_values, = argument.parse_kwargs(
kwargs, ('singular_values', None),
)
if len(shape) <= 1:
raise ValueError(
'shpae {} is invalid for matrices: too few axes'.format(shape)
)
k_shape = shape[:-2] + (min(shape[-2:]),)
# TODO(beam2d): consider supporting integer/boolean matrices
dtype = numpy.dtype(dtype)
if dtype.kind not in 'fc':
raise ValueError('dtype {} is not supported'.format(dtype))
if singular_values is None:
raise TypeError('singular_values is not given')
singular_values = numpy.asarray(singular_values)
if not numpy.isrealobj(singular_values) or (singular_values < 0).any():
raise ValueError('negative singular value is given')
# Generate random matrices with given singular values. We simply generate
# orthogonal vectors using SVD on random matrices and then combine them
# with the given singular values.
a = numpy.random.randn(*shape)
if dtype.kind == 'c':
a = a + 1j * numpy.random.randn(*shape)
u, _, vh = numpy.linalg.svd(a, full_matrices=False)
a = numpy.einsum('...ik,...k,...kj->...ij', u, singular_values, vh)
return a.astype(dtype)
|
27,198 |
def generate_class_string(
typename,
props,
description,
namespace,
prop_reorder_exceptions=None,
max_props=None,
):
"""Dynamically generate class strings to have nicely formatted docstrings,
keyword arguments, and repr.
Inspired by http://jameso.be/2013/08/06/namedtuple.html
Parameters
----------
typename
props
description
namespace
prop_reorder_exceptions
Returns
-------
string
"""
# TODO _prop_names, _type, _namespace, and available_properties
# can be modified by a Dash JS developer via setattr
# TODO - Tab out the repr for the repr of these components to make it
# look more like a hierarchical tree
# TODO - Include "description" "defaultValue" in the repr and docstring
#
# TODO - Handle "required"
#
# TODO - How to handle user-given `null` values? I want to include
# an expanded docstring like Dropdown(value=None, id=None)
# but by templating in those None values, I have no way of knowing
# whether a property is None because the user explicitly wanted
# it to be `null` or whether that was just the default value.
# The solution might be to deal with default values better although
# not all component authors will supply those.
c = '''class {typename}(Component):
"""{docstring}"""
_children_props = {children_props}
_base_nodes = {base_nodes}
_namespace = '{namespace}'
_type = '{typename}'
@_explicitize_args
def __init__(self, {default_argtext}):
self._prop_names = {list_of_valid_keys}
self._valid_wildcard_attributes =\
{list_of_valid_wildcard_attr_prefixes}
self.available_properties = {list_of_valid_keys}
self.available_wildcard_properties =\
{list_of_valid_wildcard_attr_prefixes}
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs and excess named props
args = {{k: _locals[k] for k in _explicit_args}}
for k in {required_props}:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
args_without_children = {{k: _locals[k] for k in _explicit_args if k != 'children'}}
super({typename}, self).__init__({argtext})
'''
filtered_props = (
filter_props(props)
if (prop_reorder_exceptions is not None and typename in prop_reorder_exceptions)
or (prop_reorder_exceptions is not None and "ALL" in prop_reorder_exceptions)
else reorder_props(filter_props(props))
)
wildcard_prefixes = repr(parse_wildcards(props))
list_of_valid_keys = repr(list(map(str, filtered_props.keys())))
docstring = create_docstring(
component_name=typename,
props=filtered_props,
description=description,
prop_reorder_exceptions=prop_reorder_exceptions,
).replace("\r\n", "\n")
prohibit_events(props)
# pylint: disable=unused-variable
prop_keys = list(props.keys())
if "children" in props:
prop_keys.remove("children")
default_argtext = "children=None, "
argtext = "children=children, **args_without_children"
else:
default_argtext = ""
argtext = "**args_without_children"
default_arglist = [
(
f"{p:s}=Component.REQUIRED"
if props[p]["required"]
else f"{p:s}=Component.UNDEFINED"
)
for p in prop_keys
if not p.endswith("-*") and p not in python_keywords and p != "setProps"
]
if max_props:
final_max_props = max_props - (1 if "children" in props else 0)
if len(default_arglist) > final_max_props:
default_arglist = default_arglist[:final_max_props]
docstring += (
"\n\n"
"Note: due to the large number of props for this component,\n"
"not all of them appear in the constructor signature, but\n"
"they may still be used as keyword arguments."
)
default_argtext += ", ".join(default_arglist + ["**kwargs"])
required_args = required_props(filtered_props)
nodes = collect_nodes({k: v for k, v in props.items() if k != "children"})
return c.format(
typename=typename,
namespace=namespace,
filtered_props=filtered_props,
list_of_valid_wildcard_attr_prefixes=wildcard_prefixes,
list_of_valid_keys=list_of_valid_keys,
docstring=docstring,
default_argtext=default_argtext,
argtext=argtext,
required_props=required_args,
children_props=nodes,
base_nodes=filter_base_nodes(nodes) + ["children"],
)
|
def generate_class_string(
typename,
props,
description,
namespace,
prop_reorder_exceptions=None,
max_props=None,
):
"""Dynamically generate class strings to have nicely formatted docstrings,
keyword arguments, and repr.
Inspired by http://jameso.be/2013/08/06/namedtuple.html
Parameters
----------
typename
props
description
namespace
prop_reorder_exceptions
Returns
-------
string
"""
# TODO _prop_names, _type, _namespace, and available_properties
# can be modified by a Dash JS developer via setattr
# TODO - Tab out the repr for the repr of these components to make it
# look more like a hierarchical tree
# TODO - Include "description" "defaultValue" in the repr and docstring
#
# TODO - Handle "required"
#
# TODO - How to handle user-given `null` values? I want to include
# an expanded docstring like Dropdown(value=None, id=None)
# but by templating in those None values, I have no way of knowing
# whether a property is None because the user explicitly wanted
# it to be `null` or whether that was just the default value.
# The solution might be to deal with default values better although
# not all component authors will supply those.
c = '''class {typename}(Component):
"""{docstring}"""
_children_props = {children_props}
_base_nodes = {base_nodes}
_namespace = '{namespace}'
_type = '{typename}'
@_explicitize_args
def __init__(self, {default_argtext}):
self._prop_names = {list_of_valid_keys}
self._valid_wildcard_attributes =\
{list_of_valid_wildcard_attr_prefixes}
self.available_properties = {list_of_valid_keys}
self.available_wildcard_properties =\
{list_of_valid_wildcard_attr_prefixes}
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs and excess named props
args = {{k: _locals[k] for k in _explicit_args}}
for k in {required_props}:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
args_without_children = {{k: _locals[k] for k in _explicit_args if k != 'children'}}
super({typename}, self).__init__({argtext})
'''
filtered_props = (
filter_props(props)
if (prop_reorder_exceptions is not None and typename in prop_reorder_exceptions)
or (prop_reorder_exceptions is not None and "ALL" in prop_reorder_exceptions)
else reorder_props(filter_props(props))
)
wildcard_prefixes = repr(parse_wildcards(props))
list_of_valid_keys = repr(list(map(str, filtered_props.keys())))
docstring = create_docstring(
component_name=typename,
props=filtered_props,
description=description,
prop_reorder_exceptions=prop_reorder_exceptions,
).replace("\r\n", "\n")
prohibit_events(props)
# pylint: disable=unused-variable
prop_keys = list(props.keys())
if "children" in props and "children" in list_of_valid_keys:
prop_keys.remove("children")
default_argtext = "children=None, "
argtext = "children=children, **args_without_children"
else:
default_argtext = ""
argtext = "**args_without_children"
default_arglist = [
(
f"{p:s}=Component.REQUIRED"
if props[p]["required"]
else f"{p:s}=Component.UNDEFINED"
)
for p in prop_keys
if not p.endswith("-*") and p not in python_keywords and p != "setProps"
]
if max_props:
final_max_props = max_props - (1 if "children" in props else 0)
if len(default_arglist) > final_max_props:
default_arglist = default_arglist[:final_max_props]
docstring += (
"\n\n"
"Note: due to the large number of props for this component,\n"
"not all of them appear in the constructor signature, but\n"
"they may still be used as keyword arguments."
)
default_argtext += ", ".join(default_arglist + ["**kwargs"])
required_args = required_props(filtered_props)
nodes = collect_nodes({k: v for k, v in props.items() if k != "children"})
return c.format(
typename=typename,
namespace=namespace,
filtered_props=filtered_props,
list_of_valid_wildcard_attr_prefixes=wildcard_prefixes,
list_of_valid_keys=list_of_valid_keys,
docstring=docstring,
default_argtext=default_argtext,
argtext=argtext,
required_props=required_args,
children_props=nodes,
base_nodes=filter_base_nodes(nodes) + ["children"],
)
|
59,304 |
def assemble(asm, mode=CS_MODE_ARM):
if asm in assembly_cache[mode]:
return binascii.unhexlify(assembly_cache[mode][asm])
return binascii.unhexlify(_ks_assemble(asm, mode=mode))
|
def assemble(asm: str, mode=CS_MODE_ARM) -> bytes:
"""
Assemble the given string.
An assembly cache is first checked, and if there is no entry there, then Keystone is used.
"""
if asm in assembly_cache[mode]:
return binascii.unhexlify(assembly_cache[mode][asm])
return binascii.unhexlify(_ks_assemble(asm, mode=mode))
|
377 |
def check_no_unexpected_results(mypy_lines: Iterator[str]):
"""Compares mypy results with list of known PASSING files.
Exits the process with non-zero exit code upon unexpected results.
"""
df = mypy_to_pandas(mypy_lines)
all_files = {
str(fp).replace(str(DP_ROOT), "").strip(os.sep).replace(os.sep, "/")
for fp in DP_ROOT.glob("pymc/**/*.py")
if not "tests" in str(fp)
}
failing = set(df.reset_index().file.str.replace(os.sep, "/", regex=False))
if not failing.issubset(all_files):
raise Exception(
"Mypy should have ignored these files:\n"
+ "\n".join(sorted(map(str, failing - all_files)))
)
passing = all_files - failing
expected_passing = set(PASSING.strip().split("\n")) - {""}
unexpected_failing = expected_passing - passing
unexpected_passing = passing - expected_passing
if not unexpected_failing:
print(f"{len(passing)}/{len(all_files)} files passes as expected.")
else:
print(f"{len(unexpected_failing)} files unexpectedly failed:")
print("\n".join(sorted(map(str, unexpected_failing))))
sys.exit(1)
if unexpected_passing:
print(f"{len(unexpected_passing)} files unexpectedly passed the type checks:")
print("\n".join(sorted(map(str, unexpected_passing))))
print("This is good news! Go to scripts/run-mypy.py and add them to the list.")
sys.exit(1)
return
|
def check_no_unexpected_results(mypy_lines: Iterator[str]):
"""Compares mypy results with list of known PASSING files.
Exits the process with non-zero exit code upon unexpected results.
"""
df = mypy_to_pandas(mypy_lines)
all_files = {
str(fp).replace(str(DP_ROOT), "").strip(os.sep).replace(os.sep, "/")
for fp in DP_ROOT.glob("pymc/**/*.py")
if not "tests" in str(fp)
}
failing = set(df.reset_index().file.str.replace(os.sep, "/", regex=False))
if not failing.issubset(all_files):
raise Exception(
"Mypy should have ignored these files:\n"
+ "\n".join(sorted(map(str, failing - all_files)))
)
passing = all_files - failing
expected_passing = set(PASSING.strip().split("\n")) - {""}
unexpected_failing = expected_passing - passing
unexpected_passing = passing - expected_passing
if not unexpected_failing:
print(f"{len(passing)}/{len(all_files)} files pass as expected.")
else:
print(f"{len(unexpected_failing)} files unexpectedly failed:")
print("\n".join(sorted(map(str, unexpected_failing))))
sys.exit(1)
if unexpected_passing:
print(f"{len(unexpected_passing)} files unexpectedly passed the type checks:")
print("\n".join(sorted(map(str, unexpected_passing))))
print("This is good news! Go to scripts/run-mypy.py and add them to the list.")
sys.exit(1)
return
|
11,699 |
def parametrize(tests, arity=None):
'''Helper for parametrizing pytest tests.
Expect a list of lambdas, one per test. Each lambda must return
the parameters for its respecting test.
Test identifiers will be automatically generated, from the test
number and its lambda definition line (1.10, 2.12, 3.20, ...).
If arity is None, the arguments being parametrized will be automatically
set from the function last arguments, according to the numbers of
parameters for each test.
'''
ids = []
argvalues = []
for n, t in enumerate(tests):
line = inspect.getsourcelines(t)[1]
ids.append('%u:%u' % (n+1, line))
argvalues.append(t())
if arity is None:
arity = len(argvalues[0])
assert arity > 0
def decorator(fn):
argnames = list(
parameter.name
for parameter in inspect.signature(fn).parameters.values()
if parameter.default is inspect.Parameter.empty
)[-arity:]
if arity == 1:
argnames = argnames[0]
return pytest.mark.parametrize(argnames, argvalues, ids=ids)(fn)
return decorator
|
def parametrize(tests, arity=None):
'''Helper for parametrizing pytest tests.
Expects a list of lambdas, one per test. Each lambda must return
the parameters for its respecting test.
Test identifiers will be automatically generated, from the test
number and its lambda definition line (1.10, 2.12, 3.20, ...).
If arity is None, the arguments being parametrized will be automatically
set from the function last arguments, according to the numbers of
parameters for each test.
'''
ids = []
argvalues = []
for n, t in enumerate(tests):
line = inspect.getsourcelines(t)[1]
ids.append('%u:%u' % (n+1, line))
argvalues.append(t())
if arity is None:
arity = len(argvalues[0])
assert arity > 0
def decorator(fn):
argnames = list(
parameter.name
for parameter in inspect.signature(fn).parameters.values()
if parameter.default is inspect.Parameter.empty
)[-arity:]
if arity == 1:
argnames = argnames[0]
return pytest.mark.parametrize(argnames, argvalues, ids=ids)(fn)
return decorator
|
38,808 |
def load_config(filenames=None):
ret = _SiteConfig()
getlogger().debug('Loading the generic configuration')
ret.add_config(settings.site_configuration, '<builtin>')
if filenames:
getlogger().debug(f'Loading configuration files: {filenames!r}')
for filename in filenames:
_, ext = os.path.splitext(filename)
if ext == '.py':
ret.add_python_config(filename)
elif ext == '.json':
ret.add_json_config(filename)
else:
raise ConfigError(f"unknown configuration file type: "
f"'{filename}'")
return ret
|
def load_config(*config_files):
ret = _SiteConfig()
getlogger().debug('Loading the generic configuration')
ret.add_config(settings.site_configuration, '<builtin>')
if filenames:
getlogger().debug(f'Loading configuration files: {filenames!r}')
for filename in filenames:
_, ext = os.path.splitext(filename)
if ext == '.py':
ret.add_python_config(filename)
elif ext == '.json':
ret.add_json_config(filename)
else:
raise ConfigError(f"unknown configuration file type: "
f"'{filename}'")
return ret
|
33,197 |
def parse_niftis(layout, niftis, subj, config, **kwargs):
"""
Loop through niftis in a BIDSLayout and generate the appropriate description
type for each scan. Compile all of the descriptions into a list.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
niftis : :obj:`list` or :obj:`grabbit.core.File`
List of nifti files in layout corresponding to subject/session combo.
subj : :obj:`str`
Subject ID.
config : :obj:`dict`
Configuration info for methods generation.
"""
kwargs = {k: v for k, v in kwargs.items() if v is not None}
description_list = []
skip_task = {} # Only report each task once
for nifti_struct in niftis:
nii_file = nifti_struct.path
metadata = layout.get_metadata(nii_file)
if not metadata:
LOGGER.warning('No json file found for %s', nii_file)
else:
import nibabel as nib
img = nib.load(nii_file)
# Assume all data were acquired the same way.
if not description_list:
description_list.append(general_acquisition_info(metadata))
if nifti_struct.entities['datatype'] == 'func':
if not skip_task.get(nifti_struct.entities['task'], False):
echos = layout.get_echoes(subject=subj, extension=[".nii", ".nii.gz"],
task=nifti_struct.entities['task'],
**kwargs)
n_echos = len(echos)
if n_echos > 0:
metadata['EchoTime'] = []
for echo in sorted(echos):
echo_struct = layout.get(subject=subj, echo=echo,
extension=[".nii", ".nii.gz"],
task=nifti_struct.entities['task'],
**kwargs)[0]
echo_file = echo_struct.path
echo_meta = layout.get_metadata(echo_file)
metadata['EchoTime'].append(echo_meta['EchoTime'])
n_runs = len(layout.get_runs(subject=subj,
task=nifti_struct.entities['task'],
**kwargs))
description_list.append(func_info(nifti_struct.entities['task'],
n_runs, metadata, img,
config))
skip_task[nifti_struct.entities['task']] = True
elif nifti_struct.entities['datatype'] == 'anat':
suffix = nifti_struct.entities['suffix']
if suffix.endswith('w'):
suffix = suffix[:-1] + '-weighted'
description_list.append(anat_info(suffix, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'dwi':
bval_file = nii_file.replace('.nii', '.bval')
description_list.append(dwi_info(bval_file, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'fmap':
description_list.append(fmap_info(metadata, img, config,
layout))
return description_list
|
def parse_niftis(layout, niftis, subj, config, **kwargs):
"""
Loop through niftis in a BIDSLayout and generate the appropriate description
type for each scan. Compile all of the descriptions into a list.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
niftis : :obj:`list` or :obj:`grabbit.core.File`
List of nifti files in layout corresponding to subject/session combo.
subj : :obj:`str`
Subject ID.
config : :obj:`dict`
Configuration info for methods generation.
"""
kwargs = {k: v for k, v in kwargs.items() if v is not None}
description_list = []
skip_task = {} # Only report each task once
for nifti_struct in niftis:
nii_file = nifti_struct.path
metadata = layout.get_metadata(nii_file)
if not metadata:
LOGGER.warning('No json file found for %s', nii_file)
else:
import nibabel as nib
img = nib.load(nii_file)
# Assume all data were acquired the same way.
if not description_list:
description_list.append(general_acquisition_info(metadata))
if nifti_struct.entities['datatype'] == 'func':
if not skip_task.get(nifti_struct.entities['task'], False):
echos = layout.get_echoes(subject=subj, extension=[".nii", ".nii.gz"],
task=nifti_struct.entities['task'],
**kwargs)
n_echos = len(echos)
if n_echos > 0:
metadata['EchoTime'] = []
for echo in sorted(echos):
echo_struct = layout.get(subject=subj, echo=echo,
extension=[".nii", ".nii.gz"],
task=nifti_struct.entities['task'],
**kwargs)[0]
echo_file = echo_struct.path
echo_meta = layout.get_metadata(echo_file)
metadata['EchoTime'].append(echo_meta['EchoTime'])
n_runs = len(layout.get_runs(subject=subj,
task=nifti_struct.entities['task'],
**kwargs))
description_list.append(func_info(nifti_struct.entities['task'],
n_runs, metadata, img,
config))
skip_task[nifti_struct.entities['task']] = True
elif nifti_struct.entities['datatype'] == 'anat':
suffix = nifti_struct.entities['suffix']
if suffix.endswith('w'):
suffix = suffix[:-1] + '-weighted'
description_list.append(anat_info(suffix, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'dwi':
bval_file = nii_file.replace('.nii', '.bval').replace('.nii.gz', '.bval')
description_list.append(dwi_info(bval_file, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'fmap':
description_list.append(fmap_info(metadata, img, config,
layout))
return description_list
|
42,112 |
def test_dominates() -> None:
def create_trial(values: List[float], state: TrialState = TrialState.COMPLETE) -> FrozenTrial:
return optuna.trial.create_trial(values=values, state=state)
directions = [StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE]
def check_domination(t0: FrozenTrial, t1: FrozenTrial) -> None:
assert _dominates(t0, t1, directions)
assert not _dominates(t1, t0, directions)
def check_nondomination(t0: FrozenTrial, t1: FrozenTrial) -> None:
assert not _dominates(t0, t1, directions)
assert not _dominates(t1, t0, directions)
# The numbers of objectives for `t0` and `t1` don't match.
with pytest.raises(ValueError):
t0 = create_trial([1]) # One objective.
t1 = create_trial([1, 2]) # Two objectives.
_dominates(t0, t1, directions)
# The numbers of objectives and directions don't match.
with pytest.raises(ValueError):
t0 = create_trial([1]) # One objective.
t1 = create_trial([1]) # One objective.
_dominates(t0, t1, directions)
# `t0` dominates `t1`.
t0 = create_trial([0, 2])
t1 = create_trial([1, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, 1])
t1 = create_trial([1, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, 2])
t1 = create_trial([float("inf"), 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([float("inf"), 2])
t1 = create_trial([float("inf"), 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([-float("inf"), float("inf")])
t1 = create_trial([0, 1])
check_domination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([1, 1])
t1 = create_trial([1, 1])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([0, 1])
t1 = create_trial([1, 2])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([-float("inf"), 1])
t1 = create_trial([0, 2])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([float("inf"), float("inf")])
t1 = create_trial([float("inf"), float("inf")])
check_nondomination(t0, t1)
for t0_state in [TrialState.FAIL, TrialState.WAITING, TrialState.PRUNED]:
t0 = create_trial([1, 1], t0_state)
for t1_state in [
TrialState.COMPLETE,
TrialState.FAIL,
TrialState.WAITING,
TrialState.PRUNED,
]:
# If `t0` has not the COMPLETE state, it never dominates other trials.
t1 = create_trial([0, 2], t1_state)
if t1_state == TrialState.COMPLETE:
# If `t0` isn't COMPLETE and `t1` is COMPLETE, `t1` dominates `t0`.
check_domination(t1, t0)
else:
# If `t1` isn't COMPLETE, it doesn't dominate others.
check_nondomination(t0, t1)
|
def test_dominates() -> None:
def create_trial(values: List[float], state: TrialState = TrialState.COMPLETE) -> FrozenTrial:
return optuna.trial.create_trial(values=values, state=state)
directions = [StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE]
def check_domination(t0: FrozenTrial, t1: FrozenTrial) -> None:
assert _dominates(t0, t1, directions)
assert not _dominates(t1, t0, directions)
def check_nondomination(t0: FrozenTrial, t1: FrozenTrial) -> None:
assert not _dominates(t0, t1, directions)
assert not _dominates(t1, t0, directions)
# The numbers of objectives for `t0` and `t1` don't match.
with pytest.raises(ValueError):
t0 = create_trial([1]) # One objective.
t1 = create_trial([1, 2]) # Two objectives.
_dominates(t0, t1, directions)
# The numbers of objectives and directions don't match.
with pytest.raises(ValueError):
t0 = create_trial([1]) # One objective.
t1 = create_trial([1]) # One objective.
_dominates(t0, t1, directions)
# `t0` dominates `t1`.
t0 = create_trial([0, 2])
t1 = create_trial([1, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, 1])
t1 = create_trial([1, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, 2])
t1 = create_trial([float("inf"), 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([float("inf"), 2])
t1 = create_trial([float("inf"), 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([-float("inf"), float("inf")])
t1 = create_trial([0, 1])
check_domination(t0, t1)
# `t0` and `t1` are incomparable.
t0 = create_trial([1, 1])
t1 = create_trial([1, 1])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([0, 1])
t1 = create_trial([1, 2])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([-float("inf"), 1])
t1 = create_trial([0, 2])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([float("inf"), float("inf")])
t1 = create_trial([float("inf"), float("inf")])
check_nondomination(t0, t1)
for t0_state in [TrialState.FAIL, TrialState.WAITING, TrialState.PRUNED]:
t0 = create_trial([1, 1], t0_state)
for t1_state in [
TrialState.COMPLETE,
TrialState.FAIL,
TrialState.WAITING,
TrialState.PRUNED,
]:
# If `t0` has not the COMPLETE state, it never dominates other trials.
t1 = create_trial([0, 2], t1_state)
if t1_state == TrialState.COMPLETE:
# If `t0` isn't COMPLETE and `t1` is COMPLETE, `t1` dominates `t0`.
check_domination(t1, t0)
else:
# If `t1` isn't COMPLETE, it doesn't dominate others.
check_nondomination(t0, t1)
|
11,875 |
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`. See :ref:`file-handling`.
:param fp: A filename (string), pathlib.Path object or a file object.
The file object must implement ``file.read``,
``file.seek`, and ``file.tell`` methods,
and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception FileNotFoundError: If the file cannot be found.
:exception PIL.UnidentifiedImageError: If the image cannot be opened and
identified.
:exception ValueError: If the ``mode`` is not "r", or if a ``StringIO``
instance is used for ``fp``.
"""
if mode != "r":
raise ValueError(f"bad mode {repr(mode)}")
elif isinstance(fp, io.StringIO):
raise ValueError(
"StringIO cannot be used to open an image. "
"Binary data must be used instead."
)
exclusive_fp = False
filename = ""
if isinstance(fp, Path):
filename = str(fp.resolve())
elif isPath(fp):
filename = fp
if filename:
fp = builtins.open(filename, "rb")
exclusive_fp = True
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
exclusive_fp = True
prefix = fp.read(16)
preinit()
accept_warnings = []
def _open_core(fp, filename, prefix):
for i in ID:
try:
factory, accept = OPEN[i]
result = not accept or accept(prefix)
if type(result) in [str, bytes]:
accept_warnings.append(result)
elif result:
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error):
# Leave disabled by default, spams the logs with image
# opening failures that are entirely expected.
# logger.debug("", exc_info=True)
continue
except BaseException:
if exclusive_fp:
fp.close()
raise
return None
im = _open_core(fp, filename, prefix)
if im is None:
if init():
im = _open_core(fp, filename, prefix)
if im:
im._exclusive_fp = exclusive_fp
return im
if exclusive_fp:
fp.close()
for message in accept_warnings:
warnings.warn(message)
raise UnidentifiedImageError(
"cannot identify image file %r" % (filename if filename else fp)
)
|
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
raise ValueError(f"bad mode {mode!r}")
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`. See :ref:`file-handling`.
:param fp: A filename (string), pathlib.Path object or a file object.
The file object must implement ``file.read``,
``file.seek`, and ``file.tell`` methods,
and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception FileNotFoundError: If the file cannot be found.
:exception PIL.UnidentifiedImageError: If the image cannot be opened and
identified.
:exception ValueError: If the ``mode`` is not "r", or if a ``StringIO``
instance is used for ``fp``.
"""
if mode != "r":
raise ValueError(f"bad mode {repr(mode)}")
elif isinstance(fp, io.StringIO):
raise ValueError(
"StringIO cannot be used to open an image. "
"Binary data must be used instead."
)
exclusive_fp = False
filename = ""
if isinstance(fp, Path):
filename = str(fp.resolve())
elif isPath(fp):
filename = fp
if filename:
fp = builtins.open(filename, "rb")
exclusive_fp = True
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
exclusive_fp = True
prefix = fp.read(16)
preinit()
accept_warnings = []
def _open_core(fp, filename, prefix):
for i in ID:
try:
factory, accept = OPEN[i]
result = not accept or accept(prefix)
if type(result) in [str, bytes]:
accept_warnings.append(result)
elif result:
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error):
# Leave disabled by default, spams the logs with image
# opening failures that are entirely expected.
# logger.debug("", exc_info=True)
continue
except BaseException:
if exclusive_fp:
fp.close()
raise
return None
im = _open_core(fp, filename, prefix)
if im is None:
if init():
im = _open_core(fp, filename, prefix)
if im:
im._exclusive_fp = exclusive_fp
return im
if exclusive_fp:
fp.close()
for message in accept_warnings:
warnings.warn(message)
raise UnidentifiedImageError(
"cannot identify image file %r" % (filename if filename else fp)
)
|
13,568 |
def arnoldi(A, E, l, b):
"""Compute an Arnoldi factorization.
Computes matrices :math:`V_l` and :math:`H_l` and a vector :math:`f_l` such that
.. math::
A V_l = V_l H_l + f_l e_l^T.
Additionally it holds that :math:`V_l^T V_l` is the identity matrix and :math:`H_l`
is an upper Hessenberg matrix. If `E` is not `None` it holds
.. math::
E^{-1} A V_l = V_l H_l + f_l e_l^T.
Parameters
----------
A
The |Operator| A.
E
The |Operator| E.
l
The length of the Arnoldi factorization.
b
A |VectorArray| which is used as the initial vector for the iteration.
Returns
-------
V
A |VectorArray| whose columns span an orthogonal basis for R^l.
H
A |NumPy array| which is an upper Hessenberg matrix.
f
A |VectorArray| which represents the residual vector of the Arnoldi factorzation.
"""
v = b * (1 / b.l2_norm()[0])
H = np.zeros((l, l))
V = A.source.empty(reserve=l)
V.append(v)
for i in range(l):
v = E.apply_inverse(A.apply(v))
V.append(v)
_, R = gram_schmidt(V, return_R=True, atol=0, rtol=0, offset=len(V) - 1, copy=False)
H[:i + 2, i] = R[:l, i + 1]
v = V[-1]
return V[:l], H, v * R[l, l]
|
def arnoldi(A, E, l, b):
"""Compute an Arnoldi factorization.
Computes matrices :math:`V_l` and :math:`H_l` and a vector :math:`f_l` such that
.. math::
A V_l = V_l H_l + f_l e_l^T.
Additionally it holds that :math:`V_l^T V_l` is the identity matrix and :math:`H_l`
is an upper Hessenberg matrix. If `E` is not `None` it holds
.. math::
E^{-1} A V_l = V_l H_l + f_l e_l^T.
Parameters
----------
A
The |Operator| A.
E
The |Operator| E.
l
The length of the Arnoldi factorization.
b
A |VectorArray| which is used as the initial vector for the iteration.
Returns
-------
V
A |VectorArray| whose columns span an orthogonal basis for R^l.
H
A |NumPy array| which is an upper Hessenberg matrix.
f
A |VectorArray| which represents the residual vector of the Arnoldi factorization.
"""
v = b * (1 / b.l2_norm()[0])
H = np.zeros((l, l))
V = A.source.empty(reserve=l)
V.append(v)
for i in range(l):
v = E.apply_inverse(A.apply(v))
V.append(v)
_, R = gram_schmidt(V, return_R=True, atol=0, rtol=0, offset=len(V) - 1, copy=False)
H[:i + 2, i] = R[:l, i + 1]
v = V[-1]
return V[:l], H, v * R[l, l]
|
29,086 |
def _check_if_island_server(server: str) -> bool:
logger.debug(f"Trying to connect to server: {server}")
try:
_ = IslandApiClient(server)
return True
except IslandAPIConnectionError as err:
logger.error(f"Unable to connect to server/relay {server}: {err}")
except IslandAPITimeoutError as err:
logger.error(f"Timed out while connecting to server/relay {server}: {err}")
except IslandAPIError as err:
logger.error(
f"Exception encountered when trying to connect to server/relay {server}: {err}"
)
return False
|
def _check_if_island_server(server: str) -> bool:
logger.debug(f"Trying to connect to server: {server}")
try:
IslandApiClient(server)
return True
except IslandAPIConnectionError as err:
logger.error(f"Unable to connect to server/relay {server}: {err}")
except IslandAPITimeoutError as err:
logger.error(f"Timed out while connecting to server/relay {server}: {err}")
except IslandAPIError as err:
logger.error(
f"Exception encountered when trying to connect to server/relay {server}: {err}"
)
return False
|
49,295 |
def parse_header_parameters(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(";" + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find("=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace("\\\\", "\\").replace('\\"', '"')
pdict[name] = value
return key, pdict
|
def parse_header_parameters(line):
"""
Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find("=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace("\\\\", "\\").replace('\\"', '"')
pdict[name] = value
return key, pdict
|
33,874 |
def override_task_or_actor_runtime_env(
child_runtime_env: ParsedRuntimeEnv,
parent_runtime_env: ParsedRuntimeEnv) -> ParsedRuntimeEnv:
"""Merge the given child runtime env with the parent runtime env.
If running in a driver, the current runtime env comes from the
JobConfig. Otherwise, we are running in a worker for an actor or
task, and the current runtime env comes from the current TaskSpec.
By default, the child runtime env inherits non-specified options from the
parent. There are two exceptions to this:
- The env_vars dictionaries are merged, so environment variables
not specified by the child are still inherited from the parent.
Returns:
The resulting merged ParsedRuntimeEnv.
"""
assert child_runtime_env is not None
assert parent_runtime_env is not None
# Override environment variables.
result_env_vars = copy.deepcopy(parent_runtime_env.get("env_vars") or {})
child_env_vars = child_runtime_env.get("env_vars") or {}
result_env_vars.update(child_env_vars)
# Inherit all other non-specified options from the parent.
result = copy.deepcopy(parent_runtime_env)
result.update(child_runtime_env)
if len(result_env_vars) > 0:
result["env_vars"] = result_env_vars
# NOTE(architkulkarni): This allows worker caching code in C++ to
# check if a runtime env is empty without deserializing it.
assert all(val is not None for val in result.values())
return result
|
def override_task_or_actor_runtime_env(
child_runtime_env: ParsedRuntimeEnv,
parent_runtime_env: ParsedRuntimeEnv) -> ParsedRuntimeEnv:
"""Merge the given child runtime env with the parent runtime env.
If running in a driver, the current runtime env comes from the
JobConfig. Otherwise, we are running in a worker for an actor or
task, and the current runtime env comes from the current TaskSpec.
By default, the child runtime env inherits non-specified options from the
parent. There is one exception to this:
- The env_vars dictionaries are merged, so environment variables
not specified by the child are still inherited from the parent.
Returns:
The resulting merged ParsedRuntimeEnv.
"""
assert child_runtime_env is not None
assert parent_runtime_env is not None
# Override environment variables.
result_env_vars = copy.deepcopy(parent_runtime_env.get("env_vars") or {})
child_env_vars = child_runtime_env.get("env_vars") or {}
result_env_vars.update(child_env_vars)
# Inherit all other non-specified options from the parent.
result = copy.deepcopy(parent_runtime_env)
result.update(child_runtime_env)
if len(result_env_vars) > 0:
result["env_vars"] = result_env_vars
# NOTE(architkulkarni): This allows worker caching code in C++ to
# check if a runtime env is empty without deserializing it.
assert all(val is not None for val in result.values())
return result
|
9,058 |
def test_nick_command_from_callable_regex_pattern(mockbot):
@plugin.nickname_commands('do .*')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
# create rule from a cleaned callable
rule = rules.NickCommand.from_callable(mockbot.settings, handler)
# does not match on ".do anything"
line = ':Foo!foo@example.com PRIVMSG #sopel :TestBot: do anything'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert not results, 'Regex command are not allowed since Sopel 8.0'
# match on ".do .*"
line = ':Foo!foo@example.com PRIVMSG #sopel :TestBot: do .*'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'Exactly 1 command must match'
result = results[0]
assert result.group(0) == 'TestBot: do .*'
assert result.group(1) == 'do .*'
assert result.group(2) is None
assert result.group(3) is None
assert result.group(4) is None
assert result.group(5) is None
assert result.group(6) is None
|
def test_nick_command_from_callable_regex_pattern(mockbot):
@plugin.nickname_commands('do .*')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
# create rule from a cleaned callable
rule = rules.NickCommand.from_callable(mockbot.settings, handler)
# does not match on ".do anything"
line = ':Foo!foo@example.com PRIVMSG #sopel :TestBot: do anything'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert not results, 'Regex commands are not allowed since Sopel 8.0'
# match on ".do .*"
line = ':Foo!foo@example.com PRIVMSG #sopel :TestBot: do .*'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'Exactly 1 command must match'
result = results[0]
assert result.group(0) == 'TestBot: do .*'
assert result.group(1) == 'do .*'
assert result.group(2) is None
assert result.group(3) is None
assert result.group(4) is None
assert result.group(5) is None
assert result.group(6) is None
|
52,152 |
def apply_pack_owner_group(pack_path):
"""
Switch owner group of the pack / virtualenv directory to the configured
group.
NOTE: This requires sudo access.
"""
pack_group = utils.get_pack_group()
if pack_group:
LOG.debug('Changing owner group of "{}" directory to {}'.format(pack_path, pack_group))
if SUDO_BINARY:
args = ['sudo', 'chgrp', '-R', pack_group, pack_path]
else:
# Environments where sudo is not available (e.g. docker)
args = ['chgrp', '-R', pack_group, pack_path]
exit_code, _, stderr, _ = shell.run_command(args)
if exit_code != 0:
# Non fatal, but we still log it
LOG.debug('Failed to change owner group on directory "{}" to "{}":'.format(pack_path, pack_group))
LOG.debug(stderr)
return True
|
def apply_pack_owner_group(pack_path):
"""
Switch owner group of the pack / virtualenv directory to the configured
group.
NOTE: This requires sudo access.
"""
pack_group = utils.get_pack_group()
if pack_group:
LOG.debug('Changing owner group of "{}" directory to {}'.format(pack_path, pack_group))
if SUDO_BINARY:
args = ['sudo', 'chgrp', '-R', pack_group, pack_path]
else:
# Environments where sudo is not available (e.g. docker)
args = ['chgrp', '-R', pack_group, pack_path]
exit_code, _, stderr, _ = shell.run_command(args)
if exit_code != 0:
# Non fatal, but we still log it
LOG.debug('Failed to change owner group on directory "{}" to "{}": {}'
.format(pack_path, pack_group, stderr))
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.