id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
31,662 |
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abnormal-security-submit-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
5,922 |
def handle_option_line(
opts, # type: Values
filename, # type: str
lineno, # type: int
finder=None, # type: Optional[PackageFinder]
options=None, # type: Optional[optparse.Values]
session=None, # type: Optional[PipSession]
):
# type: (...) -> None
# percolate hash-checking option upward
if opts.require_hashes and options:
options.require_hashes = opts.require_hashes
# set finder options
elif finder:
find_links = finder.find_links
index_urls = finder.index_urls
if opts.index_url:
index_urls = [opts.index_url]
if opts.no_index is True:
index_urls = []
if opts.extra_index_urls:
index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
find_links.append(value)
search_scope = SearchScope(
find_links=find_links,
index_urls=index_urls,
)
finder.search_scope = search_scope
if opts.pre:
finder.set_allow_all_prereleases()
if opts.prefer_binary:
finder.set_prefer_binary()
if session:
for host in opts.trusted_hosts or []:
source = 'line {} of {}'.format(lineno, filename)
session.add_trusted_host(host, source=source)
|
def handle_option_line(
opts, # type: Values
filename, # type: str
lineno, # type: int
finder=None, # type: Optional[PackageFinder]
options=None, # type: Optional[optparse.Values]
session=None, # type: Optional[PipSession]
):
# type: (...) -> None
# percolate hash-checking option upward
if options and opts.require_hashes:
options.require_hashes = opts.require_hashes
# set finder options
elif finder:
find_links = finder.find_links
index_urls = finder.index_urls
if opts.index_url:
index_urls = [opts.index_url]
if opts.no_index is True:
index_urls = []
if opts.extra_index_urls:
index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
find_links.append(value)
search_scope = SearchScope(
find_links=find_links,
index_urls=index_urls,
)
finder.search_scope = search_scope
if opts.pre:
finder.set_allow_all_prereleases()
if opts.prefer_binary:
finder.set_prefer_binary()
if session:
for host in opts.trusted_hosts or []:
source = 'line {} of {}'.format(lineno, filename)
session.add_trusted_host(host, source=source)
|
8,683 |
def handle_init(options):
"""Use config's wizard to initialize a new configuration file for the bot
:param options: argument parser's parsed options
.. note::
Due to how the config's wizard works, the configuration filename's
extension must be ``.cfg``.
"""
config_filename = utils.find_config(
config.DEFAULT_HOMEDIR,
getattr(options, 'config', None) or 'default')
config_name, ext = os.path.splitext(config_filename)
if ext and ext != '.cfg':
tools.stderr('Configuration wizard accepts .cfg file only')
return 1
elif not ext:
config_filename = config_name + '.cfg'
if os.path.isfile(config_filename):
tools.stderr('Configuration file %s already exists' % config_filename)
return 1
print('Starting Sopel config wizard for: %s' % config_filename)
config._wizard('all', config_name)
|
def handle_init(options):
"""Use config's wizard to initialize a new configuration file for the bot
:param options: argument parser's parsed options
.. note::
Due to how the config wizard works, the configuration filename's
extension must be ``.cfg``.
"""
config_filename = utils.find_config(
config.DEFAULT_HOMEDIR,
getattr(options, 'config', None) or 'default')
config_name, ext = os.path.splitext(config_filename)
if ext and ext != '.cfg':
tools.stderr('Configuration wizard accepts .cfg file only')
return 1
elif not ext:
config_filename = config_name + '.cfg'
if os.path.isfile(config_filename):
tools.stderr('Configuration file %s already exists' % config_filename)
return 1
print('Starting Sopel config wizard for: %s' % config_filename)
config._wizard('all', config_name)
|
40,092 |
def make_response_vulnerability_report(vulnerability_type, vulnerability_report):
os_vulns = []
non_os_vulns = []
# Convert the response from json to the model type
image_vulnerabilities = ImageVulnerabilitiesReport.from_json(vulnerability_report)
for result in image_vulnerabilities.results:
if not result.vulnerability or not result.artifact:
logger.warn(
"Missing vulnerability and artifact data in match record, skipping"
)
continue
record = {
"vuln": result.vulnerability.vulnerability_id,
"severity": result.vulnerability.severity,
"url": result.vulnerability.link,
"fix": ",".join(result.fix.versions) if result.fix.versions else None,
"package": "{}-{}".format(result.artifact.name, result.artifact.version),
"package_name": result.artifact.name,
"package_version": result.artifact.version,
"package_type": result.artifact.pkg_type,
"package_cpe": result.artifact.cpe,
"package_cpe23": result.artifact.cpe23,
"package_path": result.artifact.pkg_path,
"feed": result.vulnerability.feed,
"feed_group": result.vulnerability.feed_group,
"nvd_data": result.vulnerability.cvss_scores_nvd,
"vendor_data": result.vulnerability.cvss_scores_vendor,
}
if result.artifact.pkg_type in nonos_package_types:
non_os_vulns.append(record)
else:
os_vulns.append(record)
if vulnerability_type == "os":
return os_vulns
elif vulnerability_type == "non-os":
return non_os_vulns
else:
return os_vulns + non_os_vulns
|
def make_response_vulnerability_report(vulnerability_type, vulnerability_report):
os_vulns = []
non_os_vulns = []
# Convert the response from json to the model type
image_vulnerabilities = ImageVulnerabilitiesReport.from_json(vulnerability_report)
for result in image_vulnerabilities.results:
if not result.vulnerability or not result.artifact:
logger.warn(
"Missing vulnerability and artifact data in match record, skipping"
)
continue
record = {
"vuln": result.vulnerability.vulnerability_id,
"severity": result.vulnerability.severity,
"url": result.vulnerability.link,
"fix": ",".join(result.fix.versions) if result.fix and result.fix.versions else "None",
"package": "{}-{}".format(result.artifact.name, result.artifact.version),
"package_name": result.artifact.name,
"package_version": result.artifact.version,
"package_type": result.artifact.pkg_type,
"package_cpe": result.artifact.cpe,
"package_cpe23": result.artifact.cpe23,
"package_path": result.artifact.pkg_path,
"feed": result.vulnerability.feed,
"feed_group": result.vulnerability.feed_group,
"nvd_data": result.vulnerability.cvss_scores_nvd,
"vendor_data": result.vulnerability.cvss_scores_vendor,
}
if result.artifact.pkg_type in nonos_package_types:
non_os_vulns.append(record)
else:
os_vulns.append(record)
if vulnerability_type == "os":
return os_vulns
elif vulnerability_type == "non-os":
return non_os_vulns
else:
return os_vulns + non_os_vulns
|
14,802 |
def setup(hass, config):
"""Set up the opnsense component."""
conf = config[DOMAIN]
url = conf[CONF_URL]
api_key = conf[CONF_API_KEY]
api_secret = conf[CONF_API_SECRET]
verify_ssl = conf.get(CONF_VERIFY_SSL, False)
tracker_interfaces = conf.get(CONF_TRACKER_INTERFACE, None)
from pyopnsense import diagnostics
if tracker_interfaces:
# Verify that specified tracker interfaces are valid
netinsight_client = diagnostics.NetworkInsightClient(
api_key, api_secret, url, verify_ssl
)
interfaces = list(netinsight_client.get_interfaces().values())
for interface in tracker_interfaces:
if interface not in interfaces:
_LOGGER.error(
"Specified OPNsense tracker interface %s is not " "found", interface
)
return False
else:
tracker_interfaces = ["LAN"]
interfaces_client = diagnostics.InterfaceClient(
api_key, api_secret, url, verify_ssl
)
clients = {"interfaces": interfaces_client}
hass.data[OPNSENSE_DATA] = clients
hass.async_create_task(
async_load_platform(hass, "device_tracker", DOMAIN, tracker_interfaces, config)
)
return True
|
def setup(hass, config):
"""Set up the opnsense component."""
conf = config[DOMAIN]
url = conf[CONF_URL]
api_key = conf[CONF_API_KEY]
api_secret = conf[CONF_API_SECRET]
verify_ssl = conf.get(CONF_VERIFY_SSL, False)
tracker_interfaces = conf.get(CONF_TRACKER_INTERFACE, None)
from pyopnsense import diagnostics
if tracker_interfaces:
# Verify that specified tracker interfaces are valid
netinsight_client = diagnostics.NetworkInsightClient(
api_key, api_secret, url, verify_ssl
)
interfaces = list(netinsight_client.get_interfaces().values())
for interface in tracker_interfaces:
if interface not in interfaces:
_LOGGER.error(
"Specified OPNsense tracker interface %s is not found", interface
)
return False
else:
tracker_interfaces = ["LAN"]
interfaces_client = diagnostics.InterfaceClient(
api_key, api_secret, url, verify_ssl
)
clients = {"interfaces": interfaces_client}
hass.data[OPNSENSE_DATA] = clients
hass.async_create_task(
async_load_platform(hass, "device_tracker", DOMAIN, tracker_interfaces, config)
)
return True
|
2,073 |
def plot_det_curve(
estimator,
X,
y,
*,
sample_weight=None,
response_method="auto",
name=None,
ax=None,
pos_label=None,
**kwargs
):
"""Plot detection error tradeoff (DET) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 0.24
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
pos_label : str or int, default=None
The label of the positive class.
When `pos_label=None`, if `y_true` is in {-1, 1} or {0, 1},
`pos_label` is set to 1, otherwise an error will be raised.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
roc_auc_score : Compute the area under the ROC curve
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
"""
check_matplotlib_support('plot_det_curve')
y_pred, pos_label = _get_response(
X, estimator, response_method, pos_label=pos_label
)
fpr, fnr, _ = det_curve(
y, y_pred, pos_label=pos_label, sample_weight=sample_weight,
)
name = estimator.__class__.__name__ if name is None else name
viz = DetCurveDisplay(
fpr=fpr,
fnr=fnr,
estimator_name=name,
pos_label=pos_label
)
return viz.plot(ax=ax, name=name, **kwargs)
|
def plot_det_curve(
estimator,
X,
y,
*,
sample_weight=None,
response_method="auto",
name=None,
ax=None,
pos_label=None,
**kwargs
):
"""Plot detection error tradeoff (DET) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 0.24
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the predicted target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
pos_label : str or int, default=None
The label of the positive class.
When `pos_label=None`, if `y_true` is in {-1, 1} or {0, 1},
`pos_label` is set to 1, otherwise an error will be raised.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
roc_auc_score : Compute the area under the ROC curve
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
"""
check_matplotlib_support('plot_det_curve')
y_pred, pos_label = _get_response(
X, estimator, response_method, pos_label=pos_label
)
fpr, fnr, _ = det_curve(
y, y_pred, pos_label=pos_label, sample_weight=sample_weight,
)
name = estimator.__class__.__name__ if name is None else name
viz = DetCurveDisplay(
fpr=fpr,
fnr=fnr,
estimator_name=name,
pos_label=pos_label
)
return viz.plot(ax=ax, name=name, **kwargs)
|
17,888 |
def is_acl_allowed(nick, id, acl):
if not getattr(settings, "ACL", None):
logging.warning(
"%s was just allowed to perform actions in %s because no ACL settings exist. This can be a security risk." % (
nick,
acl,
)
)
return True
for a in acl:
acl_members = get_acl_members(a)
if id in acl_members or nick.lower() in [x.lower() for x in acl_members]:
return True
return False
|
def is_acl_allowed(id, acl):
if not getattr(settings, "ACL", None):
logging.warning(
"%s was just allowed to perform actions in %s because no ACL settings exist. This can be a security risk." % (
nick,
acl,
)
)
return True
for a in acl:
acl_members = get_acl_members(a)
if id in acl_members or nick.lower() in [x.lower() for x in acl_members]:
return True
return False
|
41,677 |
def get_numpy_benchmarks():
root = Path(__file__).resolve().parent / "benchmarks"
filenames = list(root.iterdir());
filenames.sort()
for filename in filenames:
name = filename.stem
if name in SKIP:
continue
content = parse_numpy_benchmark(filename)
content += (
"import numpy as np\n"
"_ = np.empty(())\n"
"setup = setup + '\\nfrom __main__ import {}'\n"
"from timeit import Timer\n"
"t = Timer(run, setup)\n"
"r = t.repeat(11, 40)\n"
"r.remove(min(r))\n"
"r.remove(max(r))\n"
"print(np.mean(r))\n".format(name)
)
yield name, content
|
def get_numpy_benchmarks():
root = Path(__file__).resolve().parent / "benchmarks"
for filename in sorted(root.iterdir()):
name = filename.stem
if name in SKIP:
continue
content = parse_numpy_benchmark(filename)
content += (
"import numpy as np\n"
"_ = np.empty(())\n"
"setup = setup + '\\nfrom __main__ import {}'\n"
"from timeit import Timer\n"
"t = Timer(run, setup)\n"
"r = t.repeat(11, 40)\n"
"r.remove(min(r))\n"
"r.remove(max(r))\n"
"print(np.mean(r))\n".format(name)
)
yield name, content
|
42,065 |
def test_plot_parallel_coordinate() -> None:
# Test with no trial.
study = create_study()
figure = plot_parallel_coordinate(study)
assert len(list(figure.get_figure().axes)) == 0 + 1
study = prepare_study_with_trials(with_c_d=False)
# Test with a trial.
figure = plot_parallel_coordinate(study)
assert len(list(figure.get_figure().axes)) == 3 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
assert fig.axes[3].get_ylim() == (0.0, 2.0)
assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0]
# Test with a trial to select parameter.
figure = plot_parallel_coordinate(study, params=["param_a"])
assert len(list(figure.get_figure().axes)) == 2 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_parallel_coordinate(
study, params=["param_a"], target=lambda t: t.params["param_b"]
)
assert len(list(figure.get_figure().axes)) == 2 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [2.0, 0.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
# Test with a customized target name.
figure = plot_parallel_coordinate(study, target_name="Target Name")
assert len(list(figure.get_figure().axes)) == 3 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Target Name"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
assert fig.axes[3].get_ylim() == (0.0, 2.0)
assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0]
# Test with wrong params that do not exist in trials
with pytest.raises(ValueError, match="Parameter optuna does not exist in your study."):
plot_parallel_coordinate(study, params=["optuna", "optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_parallel_coordinate(study)
assert len(figure.get_lines()) == 0
|
def test_plot_parallel_coordinate() -> None:
# Test with no trial.
study = create_study()
figure = plot_parallel_coordinate(study)
assert len(list(figure.get_figure().axes)) == 0 + 1
study = prepare_study_with_trials(with_c_d=False)
# Test with a trial.
figure = plot_parallel_coordinate(study)
assert len(figure.get_figure().axes) == 3 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
assert fig.axes[3].get_ylim() == (0.0, 2.0)
assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0]
# Test with a trial to select parameter.
figure = plot_parallel_coordinate(study, params=["param_a"])
assert len(list(figure.get_figure().axes)) == 2 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_parallel_coordinate(
study, params=["param_a"], target=lambda t: t.params["param_b"]
)
assert len(list(figure.get_figure().axes)) == 2 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Objective Value"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [2.0, 0.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
# Test with a customized target name.
figure = plot_parallel_coordinate(study, target_name="Target Name")
assert len(list(figure.get_figure().axes)) == 3 + 1
fig = figure.get_figure()
assert fig.axes[1].get_ylabel() == "Target Name"
assert fig.axes[1].get_ylim() == (0.0, 2.0)
assert len(figure.findobj(LineCollection)) == 1
assert figure.findobj(LineCollection)[0].get_array().tolist()[:-1] == [0.0, 2.0, 1.0]
assert fig.axes[2].get_ylim() == (1.0, 2.5)
assert [
fig.axes[2].get_lines()[0].get_ydata()[0],
fig.axes[2].get_lines()[0].get_ydata()[-1],
] == [1.0, 2.5]
assert fig.axes[3].get_ylim() == (0.0, 2.0)
assert fig.axes[3].get_lines()[0].get_ydata().tolist() == [2.0, 0.0, 1.0]
# Test with wrong params that do not exist in trials
with pytest.raises(ValueError, match="Parameter optuna does not exist in your study."):
plot_parallel_coordinate(study, params=["optuna", "optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_parallel_coordinate(study)
assert len(figure.get_lines()) == 0
|
31,667 |
def is_tag_list_valid(tag_ids):
"""checks if all the tag ids are valid"""
for tag in tag_ids:
try:
int(tag)
except ValueError:
raise DemistoException(f"Tag id has to be an integer, please change the given: '{tag}' id.")
|
def is_tag_list_valid(tag_ids):
"""Verify all the tags are valid integers."""
for tag in tag_ids:
try:
int(tag)
except ValueError:
raise DemistoException(f"Tag id has to be an integer, please change the given: '{tag}' id.")
|
17,736 |
def changeDigit_base10_to_base62_alph_num(current_digit):
'''The supplimental digits for the base10_to_base62_alph_num function,
which Converts the base 10 to base 62 '''
'''current_digit = the currenty digit for this base.
(i.e. in base10 it would be the one, ten, hundreds, or thousands places .....)'''
base62_No = 62
decimal = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61]
base62_Values = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J",
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d",
"e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
"o", "p", "q", "r", "s", "t", "u", "v", "w", "x",
"y", "z"]
for counter in range(int(base62_No-10)):
if current_digit == decimal[counter - 1]:
current_digit = base62_Values[counter - 1]
return current_digit
|
def changeDigit_base10_to_base62_alph_num(current_digit):
'''The supplimental digits for the base10_to_base62_alph_num function,
which Converts the base 10 to base 62 '''
'''current_digit = the current digit for this base.
(i.e. in base10 it would be the one, ten, hundreds, or thousands places .....)'''
base62_No = 62
decimal = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61]
base62_Values = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J",
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d",
"e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
"o", "p", "q", "r", "s", "t", "u", "v", "w", "x",
"y", "z"]
for counter in range(int(base62_No-10)):
if current_digit == decimal[counter - 1]:
current_digit = base62_Values[counter - 1]
return current_digit
|
41,485 |
def plot_results(ax, mutests, tests, test_size=0.05):
cls_obs = np.array([test[0] for test in tests]).flatten()
cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]
ax.plot(mutests, cls_obs, c='k')
for i, c in zip(range(5), ['k', 'k', 'k', 'k', 'k']):
ax.plot(mutests, cls_exp[i], c=c, linestyle='dotted' if i != 2 else 'dashed')
ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='y')
ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='g')
ax.plot(mutests, [test_size] * len(mutests), c='r')
ax.set_ylim(0, 1)
|
def plot_results(ax, mutests, tests, test_size=0.05):
cls_obs = np.array([test[0] for test in tests]).flatten()
cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]
ax.plot(mutests, cls_obs, c='k')
for i, c in zip(range(5), ['k', 'k', 'k', 'k', 'k']):
ax.plot(mutests, cls_exp[i], c=c, linestyle='dotted' if i != 2 else 'dashed')
ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='yellow')
ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='g')
ax.plot(mutests, [test_size] * len(mutests), c='r')
ax.set_ylim(0, 1)
|
5,637 |
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
hessp=None, bounds=None, constraints=(), tol=None,
callback=None, options=None):
"""Minimization of scalar function of one or more variables.
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where x is an 1-D array with shape (n,) and `args`
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where 'n' is the number of independent variables.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` and `hess` functions).
method : str or callable, optional
Type of solver. Should be one of
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
- 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
- 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
- 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
- 'differential-evolution' :ref:`(see here) <`scipy.optimize.differential_evolution`>`
- custom - a callable object (added in version 0.14.0),
see below for description.
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
depending if the problem has constraints or bounds.
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
Method for computing the gradient vector. Only for CG, BFGS,
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
trust-exact and trust-constr. If it is a callable, it should be a
function that returns the gradient vector:
``jac(x, *args) -> array_like, shape (n,)``
where x is an array with shape (n,) and `args` is a tuple with
the fixed parameters. Alternatively, the keywords
{'2-point', '3-point', 'cs'} select a finite
difference scheme for numerical estimation of the gradient. Options
'3-point' and 'cs' are available only to 'trust-constr'.
If `jac` is a Boolean and is True, `fun` is assumed to return the
gradient along with the objective function. If False, the gradient
will be estimated using '2-point' finite difference estimation.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
trust-ncg, trust-krylov, trust-exact and trust-constr. If it is
callable, it should return the Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
where x is a (n,) ndarray and `args` is a tuple with the fixed
parameters. LinearOperator and sparse matrix returns are
allowed only for 'trust-constr' method. Alternatively, the keywords
{'2-point', '3-point', 'cs'} select a finite difference scheme
for numerical estimation. Or, objects implementing
`HessianUpdateStrategy` interface can be used to approximate
the Hessian. Available quasi-Newton methods implementing
this interface are:
- `BFGS`;
- `SR1`.
Whenever the gradient is estimated via finite-differences,
the Hessian cannot be estimated with options
{'2-point', '3-point', 'cs'} and needs to be
estimated using one of the quasi-Newton strategies.
Finite-difference options {'2-point', '3-point', 'cs'} and
`HessianUpdateStrategy` are available only for 'trust-constr' method.
hessp : callable, optional
Hessian of objective function times an arbitrary vector p. Only for
Newton-CG, trust-ncg, trust-krylov, trust-constr.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. `hessp` must compute the
Hessian times an arbitrary vector:
``hessp(x, p, *args) -> ndarray shape (n,)``
where x is a (n,) ndarray, p is an arbitrary vector with
dimension (n,) and `args` is a tuple with the fixed
parameters.
bounds : sequence or `Bounds`, optional
Bounds on variables for L-BFGS-B, TNC, SLSQP, trust-constr and
differential-evolution methods. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
Constraints definition (only for COBYLA, SLSQP, trust-constr and
differential-evolution).
Constraints for 'trust-constr' and 'differential-evolution' are defined
as a single object or a list of objects specifying constraints to the
optimization problem.
Available constraints are:
- `LinearConstraint`
- `NonlinearConstraint`
Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
Each dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform. Depending on the
method each iteration may use several function evaluations.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options()`.
callback : callable, optional
Called after each iteration. For 'trust-constr' it is a callable with
the signature:
``callback(xk, OptimizeResult state) -> bool``
where ``xk`` is the current parameter vector. and ``state``
is an `OptimizeResult` object, with the same fields
as the ones from the return. If callback returns True
the algorithm execution is terminated.
For all the other methods, the signature is:
``callback(xk)``
where ``xk`` is the current parameter vector.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize_scalar : Interface to minimization algorithms for scalar
univariate functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *BFGS*.
**Unconstrained minimization**
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
applications. However, if numerical computation of derivative can be
trusted, other algorithms using the first and/or second derivatives
information might be preferred for their better performance in
general.
Method :ref:`Powell <optimize.minimize-powell>` is a modification
of Powell's method [3]_, [4]_ which is a conjugate direction
method. It performs sequential one-dimensional minimizations along
each vector of the directions set (`direc` field in `options` and
`info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken.
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
gradient algorithm by Polak and Ribiere, a variant of the
Fletcher-Reeves method described in [5]_ pp. 120-122. Only the
first derivatives are used.
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
pp. 136. It uses the first derivatives only. BFGS has proven good
performance even for non-smooth optimizations. This method also
returns an approximation of the Hessian inverse, stored as
`hess_inv` in the OptimizeResult object.
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
Newton method). It uses a CG method to the compute the search
direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm. Suitable for large-scale
problems.
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
trust-region algorithm [5]_ for unconstrained minimization. This
algorithm requires the gradient and Hessian; furthermore the
Hessian is required to be positive definite.
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
Newton conjugate gradient trust-region algorithm [5]_ for
unconstrained minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
On indefinite problems it requires usually less iterations than the
`trust-ncg` method and is recommended for medium and large-scale problems.
Method :ref:`trust-exact <optimize.minimize-trustexact>`
is a trust-region method for unconstrained minimization in which
quadratic subproblems are solved almost exactly [13]_. This
algorithm requires the gradient and the Hessian (which is
*not* required to be positive definite). It is, in many
situations, the Newton method to converge in fewer iteraction
and the most recommended for small and medium-size problems.
**Bound-Constrained minimization**
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
algorithm [6]_, [7]_ for bound constrained minimization.
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
algorithm [5]_, [8]_ to minimize a function with variables subject
to bounds. This algorithm uses gradient information; it is also
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
method described above as it wraps a C implementation and allows
each variable to be given upper and lower bounds.
**Constrained Minimization**
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
Constrained Optimization BY Linear Approximation (COBYLA) method
[9]_, [10]_, [11]_. The algorithm is based on linear
approximations to the objective function and each constraint. The
method wraps a FORTRAN implementation of the algorithm. The
constraints functions 'fun' may return either a single number
or an array or list of numbers.
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
Least SQuares Programming to minimize a function of several
variables with any combination of bounds, equality and inequality
constraints. The method wraps the SLSQP Optimization subroutine
originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into
large floating values.
Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
trust-region algorithm for constrained optimization. It swiches
between two implementations depending on the problem definition.
It is the most versatile constrained minimization algorithm
implemented in SciPy and the most appropriate for large-scale problems.
For equality constrained problems it is an implementation of Byrd-Omojokun
Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
inequality constraints are imposed as well, it swiches to the trust-region
interior point method described in [16]_. This interior point algorithm,
in turn, solves inequality constraints by introducing slack variables
and solving a sequence of equality-constrained barrier problems
for progressively smaller values of the barrier parameter.
The previously described equality constrained SQP method is
used to solve the subproblems with increasing levels of accuracy
as the iterate gets closer to a solution.
Method :ref:`differential-evolution <scipy.optimize.differential_evolution>` uses
differential evolution for constrained optimization.
**Finite-Difference Options**
For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
the gradient and the Hessian may be approximated using
three finite-difference schemes: {'2-point', '3-point', 'cs'}.
The scheme 'cs' is, potentially, the most accurate but it
requires the function to correctly handles complex inputs and to
be differentiable in the complex plane. The scheme '3-point' is more
accurate than '2-point' but requires twice as much operations.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using a frontend to this method such as `scipy.optimize.basinhopping`
or a different library. You can simply pass a callable as the ``method``
parameter.
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `callback`, `hess`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. Also, if
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
`fun` returns just the function values and `jac` is converted to a function
returning the Jacobian. The method shall return an `OptimizeResult`
object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
References
----------
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
Minimization. The Computer Journal 7: 308-13.
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
respectable, in Numerical Analysis 1995: Proceedings of the 1995
Dundee Biennial Conference in Numerical Analysis (Eds. D F
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
191-208.
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
a function of several variables without calculating derivatives. The
Computer Journal 7: 155-162.
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
Numerical Recipes (any edition), Cambridge University Press.
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
Springer New York.
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
Algorithm for Bound Constrained Optimization. SIAM Journal on
Scientific and Statistical Computing 16 (5): 1190-1208.
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
optimization. ACM Transactions on Mathematical Software 23 (4):
550-560.
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
1984. SIAM Journal of Numerical Analysis 21: 770-778.
.. [9] Powell, M J D. A direct search optimization method that models
the objective and constraint functions by linear interpolation.
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
.. [10] Powell M J D. Direct search algorithms for optimization
calculations. 1998. Acta Numerica 7: 287-336.
.. [11] Powell M J D. A view of algorithms for optimization without
derivatives. 2007.Cambridge University Technical Report DAMTP
2007/NA03
.. [12] Kraft, D. A software package for sequential quadratic
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
Center -- Institute for Flight Mechanics, Koln, Germany.
.. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
Trust region methods. 2000. Siam. pp. 169-200.
.. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
implementation of the GLTR method for iterative solution of
the trust region problem", https://arxiv.org/abs/1611.04718
.. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
Trust-Region Subproblem using the Lanczos Method",
SIAM J. Optim., 9(2), 504--525, (1999).
.. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
An interior point algorithm for large-scale nonlinear programming.
SIAM Journal on Optimization 9.4: 877-900.
.. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
implementation of an algorithm for large-scale equality constrained
optimization. SIAM Journal on Optimization 8.3: 682-706.
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function (and its respective derivatives) is implemented in `rosen`
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
>>> from scipy.optimize import minimize, rosen, rosen_der
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
>>> res.x
array([ 1., 1., 1., 1., 1.])
Now using the *BFGS* algorithm, using the first derivative and a few
options:
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
... options={'gtol': 1e-6, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 26
Function evaluations: 31
Gradient evaluations: 31
>>> res.x
array([ 1., 1., 1., 1., 1.])
>>> print(res.message)
Optimization terminated successfully.
>>> res.hess_inv
array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]])
Next, consider a minimization problem with several constraints (namely
Example 16.4 from [5]_). The objective function is:
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
There are three constraints defined as:
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
And variables must be positive, hence the following bounds:
>>> bnds = ((0, None), (0, None))
The optimization problem is solved using the SLSQP method as:
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
x0 = np.asarray(x0)
if x0.dtype.kind in np.typecodes["AllInteger"]:
x0 = np.asarray(x0, dtype=float)
if not isinstance(args, tuple):
args = (args,)
if method is None:
# Select automatically
if constraints:
method = 'SLSQP'
elif bounds is not None:
method = 'L-BFGS-B'
else:
method = 'BFGS'
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
# check if optional parameters are supported by the selected method
# - jac
if meth in ('nelder-mead', 'powell', 'cobyla',
'differential-evolution') and bool(jac):
warn('Method %s does not use gradient information (jac).' % method,
RuntimeWarning)
# - hess
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', 'trust-exact', '_custom') and hess is not None:
warn('Method %s does not use Hessian information (hess).' % method,
RuntimeWarning)
# - hessp
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', '_custom') \
and hessp is not None:
warn('Method %s does not use Hessian-vector product '
'information (hessp).' % method, RuntimeWarning)
# - constraints or bounds
if (meth in ('nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'dogleg',
'trust-ncg') and (bounds is not None or np.any(constraints))):
warn('Method %s cannot handle constraints nor bounds.' % method,
RuntimeWarning)
if meth in ('l-bfgs-b', 'tnc') and np.any(constraints):
warn('Method %s cannot handle constraints.' % method,
RuntimeWarning)
if meth == 'cobyla' and bounds is not None:
warn('Method %s cannot handle bounds.' % method,
RuntimeWarning)
if meth == 'differential-evolution' and bounds is None:
warn('Method %s requires bounds.' % method,
RuntimeWarning)
# - callback
if (meth in ('cobyla',) and callback is not None):
warn('Method %s does not support callback.' % method, RuntimeWarning)
# - return_all
if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp',
'differential-evolution') and
options.get('return_all', False)):
warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# check gradient vector
if meth == 'trust-constr':
if type(jac) is bool:
if jac:
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = '2-point'
elif jac is None:
jac = '2-point'
elif not callable(jac) and jac not in ('2-point', '3-point', 'cs'):
raise ValueError("Unsupported jac definition.")
else:
if jac in ('2-point', '3-point', 'cs'):
if jac in ('3-point', 'cs'):
warn("Only 'trust-constr' method accept %s "
"options for 'jac'. Using '2-point' instead." % jac)
jac = None
elif not callable(jac):
if bool(jac):
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth == 'nelder-mead':
options.setdefault('xatol', tol)
options.setdefault('fatol', tol)
if meth in ('newton-cg', 'powell', 'tnc'):
options.setdefault('xtol', tol)
if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
options.setdefault('ftol', tol)
if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
'trust-ncg', 'trust-exact', 'trust-krylov'):
options.setdefault('gtol', tol)
if meth in ('cobyla', '_custom', 'differential-evolution'):
options.setdefault('tol', tol)
if meth == 'trust-constr':
options.setdefault('xtol', tol)
options.setdefault('gtol', tol)
options.setdefault('barrier_tol', tol)
if bounds is not None:
bounds = standardize_bounds(bounds, x0, meth)
if constraints is not None:
constraints = standardize_constraints(constraints, x0, meth)
if meth == '_custom':
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
bounds=bounds, constraints=constraints,
callback=callback, **options)
elif meth == 'nelder-mead':
return _minimize_neldermead(fun, x0, args, callback, **options)
elif meth == 'powell':
return _minimize_powell(fun, x0, args, callback, **options)
elif meth == 'cg':
return _minimize_cg(fun, x0, args, jac, callback, **options)
elif meth == 'bfgs':
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
**options)
elif meth == 'l-bfgs-b':
return _minimize_lbfgsb(fun, x0, args, jac, bounds,
callback=callback, **options)
elif meth == 'tnc':
return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
**options)
elif meth == 'cobyla':
return _minimize_cobyla(fun, x0, args, constraints, **options)
elif meth == 'slsqp':
return _minimize_slsqp(fun, x0, args, jac, bounds,
constraints, callback=callback, **options)
elif meth == 'trust-constr':
return _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
bounds, constraints,
callback=callback, **options)
elif meth == 'dogleg':
return _minimize_dogleg(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'trust-ncg':
return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-krylov':
return _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-exact':
return _minimize_trustregion_exact(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'differential-evolution':
return differential_evolution(fun, bounds, args=args, constraints=constraints,
callback=callback, **options)
else:
raise ValueError('Unknown solver %s' % method)
|
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
hessp=None, bounds=None, constraints=(), tol=None,
callback=None, options=None):
"""Minimization of scalar function of one or more variables.
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where x is an 1-D array with shape (n,) and `args`
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where 'n' is the number of independent variables.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` and `hess` functions).
method : str or callable, optional
Type of solver. Should be one of
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
- 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
- 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
- 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
- 'differential-evolution' :ref:`(see here) <`scipy.optimize.differential_evolution`>`
- custom - a callable object (added in version 0.14.0),
see below for description.
- 'differential-evolution' :ref:`(see here) <optimize.differential_evolution>`
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
depending if the problem has constraints or bounds.
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
Method for computing the gradient vector. Only for CG, BFGS,
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
trust-exact and trust-constr. If it is a callable, it should be a
function that returns the gradient vector:
``jac(x, *args) -> array_like, shape (n,)``
where x is an array with shape (n,) and `args` is a tuple with
the fixed parameters. Alternatively, the keywords
{'2-point', '3-point', 'cs'} select a finite
difference scheme for numerical estimation of the gradient. Options
'3-point' and 'cs' are available only to 'trust-constr'.
If `jac` is a Boolean and is True, `fun` is assumed to return the
gradient along with the objective function. If False, the gradient
will be estimated using '2-point' finite difference estimation.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
trust-ncg, trust-krylov, trust-exact and trust-constr. If it is
callable, it should return the Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
where x is a (n,) ndarray and `args` is a tuple with the fixed
parameters. LinearOperator and sparse matrix returns are
allowed only for 'trust-constr' method. Alternatively, the keywords
{'2-point', '3-point', 'cs'} select a finite difference scheme
for numerical estimation. Or, objects implementing
`HessianUpdateStrategy` interface can be used to approximate
the Hessian. Available quasi-Newton methods implementing
this interface are:
- `BFGS`;
- `SR1`.
Whenever the gradient is estimated via finite-differences,
the Hessian cannot be estimated with options
{'2-point', '3-point', 'cs'} and needs to be
estimated using one of the quasi-Newton strategies.
Finite-difference options {'2-point', '3-point', 'cs'} and
`HessianUpdateStrategy` are available only for 'trust-constr' method.
hessp : callable, optional
Hessian of objective function times an arbitrary vector p. Only for
Newton-CG, trust-ncg, trust-krylov, trust-constr.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. `hessp` must compute the
Hessian times an arbitrary vector:
``hessp(x, p, *args) -> ndarray shape (n,)``
where x is a (n,) ndarray, p is an arbitrary vector with
dimension (n,) and `args` is a tuple with the fixed
parameters.
bounds : sequence or `Bounds`, optional
Bounds on variables for L-BFGS-B, TNC, SLSQP, trust-constr and
differential-evolution methods. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
Constraints definition (only for COBYLA, SLSQP, trust-constr and
differential-evolution).
Constraints for 'trust-constr' and 'differential-evolution' are defined
as a single object or a list of objects specifying constraints to the
optimization problem.
Available constraints are:
- `LinearConstraint`
- `NonlinearConstraint`
Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
Each dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform. Depending on the
method each iteration may use several function evaluations.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options()`.
callback : callable, optional
Called after each iteration. For 'trust-constr' it is a callable with
the signature:
``callback(xk, OptimizeResult state) -> bool``
where ``xk`` is the current parameter vector. and ``state``
is an `OptimizeResult` object, with the same fields
as the ones from the return. If callback returns True
the algorithm execution is terminated.
For all the other methods, the signature is:
``callback(xk)``
where ``xk`` is the current parameter vector.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize_scalar : Interface to minimization algorithms for scalar
univariate functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *BFGS*.
**Unconstrained minimization**
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
applications. However, if numerical computation of derivative can be
trusted, other algorithms using the first and/or second derivatives
information might be preferred for their better performance in
general.
Method :ref:`Powell <optimize.minimize-powell>` is a modification
of Powell's method [3]_, [4]_ which is a conjugate direction
method. It performs sequential one-dimensional minimizations along
each vector of the directions set (`direc` field in `options` and
`info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken.
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
gradient algorithm by Polak and Ribiere, a variant of the
Fletcher-Reeves method described in [5]_ pp. 120-122. Only the
first derivatives are used.
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
pp. 136. It uses the first derivatives only. BFGS has proven good
performance even for non-smooth optimizations. This method also
returns an approximation of the Hessian inverse, stored as
`hess_inv` in the OptimizeResult object.
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
Newton method). It uses a CG method to the compute the search
direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm. Suitable for large-scale
problems.
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
trust-region algorithm [5]_ for unconstrained minimization. This
algorithm requires the gradient and Hessian; furthermore the
Hessian is required to be positive definite.
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
Newton conjugate gradient trust-region algorithm [5]_ for
unconstrained minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
On indefinite problems it requires usually less iterations than the
`trust-ncg` method and is recommended for medium and large-scale problems.
Method :ref:`trust-exact <optimize.minimize-trustexact>`
is a trust-region method for unconstrained minimization in which
quadratic subproblems are solved almost exactly [13]_. This
algorithm requires the gradient and the Hessian (which is
*not* required to be positive definite). It is, in many
situations, the Newton method to converge in fewer iteraction
and the most recommended for small and medium-size problems.
**Bound-Constrained minimization**
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
algorithm [6]_, [7]_ for bound constrained minimization.
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
algorithm [5]_, [8]_ to minimize a function with variables subject
to bounds. This algorithm uses gradient information; it is also
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
method described above as it wraps a C implementation and allows
each variable to be given upper and lower bounds.
**Constrained Minimization**
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
Constrained Optimization BY Linear Approximation (COBYLA) method
[9]_, [10]_, [11]_. The algorithm is based on linear
approximations to the objective function and each constraint. The
method wraps a FORTRAN implementation of the algorithm. The
constraints functions 'fun' may return either a single number
or an array or list of numbers.
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
Least SQuares Programming to minimize a function of several
variables with any combination of bounds, equality and inequality
constraints. The method wraps the SLSQP Optimization subroutine
originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into
large floating values.
Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
trust-region algorithm for constrained optimization. It swiches
between two implementations depending on the problem definition.
It is the most versatile constrained minimization algorithm
implemented in SciPy and the most appropriate for large-scale problems.
For equality constrained problems it is an implementation of Byrd-Omojokun
Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
inequality constraints are imposed as well, it swiches to the trust-region
interior point method described in [16]_. This interior point algorithm,
in turn, solves inequality constraints by introducing slack variables
and solving a sequence of equality-constrained barrier problems
for progressively smaller values of the barrier parameter.
The previously described equality constrained SQP method is
used to solve the subproblems with increasing levels of accuracy
as the iterate gets closer to a solution.
Method :ref:`differential-evolution <scipy.optimize.differential_evolution>` uses
differential evolution for constrained optimization.
**Finite-Difference Options**
For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
the gradient and the Hessian may be approximated using
three finite-difference schemes: {'2-point', '3-point', 'cs'}.
The scheme 'cs' is, potentially, the most accurate but it
requires the function to correctly handles complex inputs and to
be differentiable in the complex plane. The scheme '3-point' is more
accurate than '2-point' but requires twice as much operations.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using a frontend to this method such as `scipy.optimize.basinhopping`
or a different library. You can simply pass a callable as the ``method``
parameter.
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `callback`, `hess`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. Also, if
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
`fun` returns just the function values and `jac` is converted to a function
returning the Jacobian. The method shall return an `OptimizeResult`
object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
References
----------
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
Minimization. The Computer Journal 7: 308-13.
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
respectable, in Numerical Analysis 1995: Proceedings of the 1995
Dundee Biennial Conference in Numerical Analysis (Eds. D F
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
191-208.
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
a function of several variables without calculating derivatives. The
Computer Journal 7: 155-162.
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
Numerical Recipes (any edition), Cambridge University Press.
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
Springer New York.
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
Algorithm for Bound Constrained Optimization. SIAM Journal on
Scientific and Statistical Computing 16 (5): 1190-1208.
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
optimization. ACM Transactions on Mathematical Software 23 (4):
550-560.
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
1984. SIAM Journal of Numerical Analysis 21: 770-778.
.. [9] Powell, M J D. A direct search optimization method that models
the objective and constraint functions by linear interpolation.
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
.. [10] Powell M J D. Direct search algorithms for optimization
calculations. 1998. Acta Numerica 7: 287-336.
.. [11] Powell M J D. A view of algorithms for optimization without
derivatives. 2007.Cambridge University Technical Report DAMTP
2007/NA03
.. [12] Kraft, D. A software package for sequential quadratic
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
Center -- Institute for Flight Mechanics, Koln, Germany.
.. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
Trust region methods. 2000. Siam. pp. 169-200.
.. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
implementation of the GLTR method for iterative solution of
the trust region problem", https://arxiv.org/abs/1611.04718
.. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
Trust-Region Subproblem using the Lanczos Method",
SIAM J. Optim., 9(2), 504--525, (1999).
.. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
An interior point algorithm for large-scale nonlinear programming.
SIAM Journal on Optimization 9.4: 877-900.
.. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
implementation of an algorithm for large-scale equality constrained
optimization. SIAM Journal on Optimization 8.3: 682-706.
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function (and its respective derivatives) is implemented in `rosen`
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
>>> from scipy.optimize import minimize, rosen, rosen_der
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
>>> res.x
array([ 1., 1., 1., 1., 1.])
Now using the *BFGS* algorithm, using the first derivative and a few
options:
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
... options={'gtol': 1e-6, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 26
Function evaluations: 31
Gradient evaluations: 31
>>> res.x
array([ 1., 1., 1., 1., 1.])
>>> print(res.message)
Optimization terminated successfully.
>>> res.hess_inv
array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]])
Next, consider a minimization problem with several constraints (namely
Example 16.4 from [5]_). The objective function is:
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
There are three constraints defined as:
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
And variables must be positive, hence the following bounds:
>>> bnds = ((0, None), (0, None))
The optimization problem is solved using the SLSQP method as:
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
x0 = np.asarray(x0)
if x0.dtype.kind in np.typecodes["AllInteger"]:
x0 = np.asarray(x0, dtype=float)
if not isinstance(args, tuple):
args = (args,)
if method is None:
# Select automatically
if constraints:
method = 'SLSQP'
elif bounds is not None:
method = 'L-BFGS-B'
else:
method = 'BFGS'
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
# check if optional parameters are supported by the selected method
# - jac
if meth in ('nelder-mead', 'powell', 'cobyla',
'differential-evolution') and bool(jac):
warn('Method %s does not use gradient information (jac).' % method,
RuntimeWarning)
# - hess
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', 'trust-exact', '_custom') and hess is not None:
warn('Method %s does not use Hessian information (hess).' % method,
RuntimeWarning)
# - hessp
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', '_custom') \
and hessp is not None:
warn('Method %s does not use Hessian-vector product '
'information (hessp).' % method, RuntimeWarning)
# - constraints or bounds
if (meth in ('nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'dogleg',
'trust-ncg') and (bounds is not None or np.any(constraints))):
warn('Method %s cannot handle constraints nor bounds.' % method,
RuntimeWarning)
if meth in ('l-bfgs-b', 'tnc') and np.any(constraints):
warn('Method %s cannot handle constraints.' % method,
RuntimeWarning)
if meth == 'cobyla' and bounds is not None:
warn('Method %s cannot handle bounds.' % method,
RuntimeWarning)
if meth == 'differential-evolution' and bounds is None:
warn('Method %s requires bounds.' % method,
RuntimeWarning)
# - callback
if (meth in ('cobyla',) and callback is not None):
warn('Method %s does not support callback.' % method, RuntimeWarning)
# - return_all
if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp',
'differential-evolution') and
options.get('return_all', False)):
warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# check gradient vector
if meth == 'trust-constr':
if type(jac) is bool:
if jac:
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = '2-point'
elif jac is None:
jac = '2-point'
elif not callable(jac) and jac not in ('2-point', '3-point', 'cs'):
raise ValueError("Unsupported jac definition.")
else:
if jac in ('2-point', '3-point', 'cs'):
if jac in ('3-point', 'cs'):
warn("Only 'trust-constr' method accept %s "
"options for 'jac'. Using '2-point' instead." % jac)
jac = None
elif not callable(jac):
if bool(jac):
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth == 'nelder-mead':
options.setdefault('xatol', tol)
options.setdefault('fatol', tol)
if meth in ('newton-cg', 'powell', 'tnc'):
options.setdefault('xtol', tol)
if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
options.setdefault('ftol', tol)
if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
'trust-ncg', 'trust-exact', 'trust-krylov'):
options.setdefault('gtol', tol)
if meth in ('cobyla', '_custom', 'differential-evolution'):
options.setdefault('tol', tol)
if meth == 'trust-constr':
options.setdefault('xtol', tol)
options.setdefault('gtol', tol)
options.setdefault('barrier_tol', tol)
if bounds is not None:
bounds = standardize_bounds(bounds, x0, meth)
if constraints is not None:
constraints = standardize_constraints(constraints, x0, meth)
if meth == '_custom':
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
bounds=bounds, constraints=constraints,
callback=callback, **options)
elif meth == 'nelder-mead':
return _minimize_neldermead(fun, x0, args, callback, **options)
elif meth == 'powell':
return _minimize_powell(fun, x0, args, callback, **options)
elif meth == 'cg':
return _minimize_cg(fun, x0, args, jac, callback, **options)
elif meth == 'bfgs':
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
**options)
elif meth == 'l-bfgs-b':
return _minimize_lbfgsb(fun, x0, args, jac, bounds,
callback=callback, **options)
elif meth == 'tnc':
return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
**options)
elif meth == 'cobyla':
return _minimize_cobyla(fun, x0, args, constraints, **options)
elif meth == 'slsqp':
return _minimize_slsqp(fun, x0, args, jac, bounds,
constraints, callback=callback, **options)
elif meth == 'trust-constr':
return _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
bounds, constraints,
callback=callback, **options)
elif meth == 'dogleg':
return _minimize_dogleg(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'trust-ncg':
return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-krylov':
return _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-exact':
return _minimize_trustregion_exact(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'differential-evolution':
return differential_evolution(fun, bounds, args=args, constraints=constraints,
callback=callback, **options)
else:
raise ValueError('Unknown solver %s' % method)
|
30,847 |
def get_full_timeline(detection_id, per_page=100):
''' iterate over all timeline detections later then time t '''
page = 1
done = False
activities = [] # type:ignore
last_data = {} # type:ignore
while not done:
res = http_get('/detections/{}/timeline'.format(detection_id),
params={
'page': page,
'per_page': per_page,
})
current_data = res['data']
if len(current_data) == 0 or current_data == last_data:
current_data = {}
done = True
activities.extend(current_data)
last_data = current_data
page += 1
return activities
|
def get_full_timeline(detection_id, per_page=100):
''' iterate over all timeline detections later then time t '''
page = 1
done = False
activities = [] # type:ignore
last_data = {} # type:ignore
while not done:
res = http_get('/detections/{}/timeline'.format(detection_id),
params={
'page': page,
'per_page': per_page,
})
current_data = res.get('data')
if len(current_data) == 0 or current_data == last_data:
current_data = {}
done = True
activities.extend(current_data)
last_data = current_data
page += 1
return activities
|
32,109 |
def domain_details_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
domain command: Returns domain details for a list of domains
"""
domain_string = args.get('domain')
domain_array = argToList(domain_string)
invalid_domains = []
for domain in domain_array: # Check for Valid Domain Inputs
if not REGEX_MAP['domain'].match(domain):
invalid_domains.append(domain)
if invalid_domains:
return_warning('The following Domains were found invalid: {}'.format(', '.join(invalid_domains)),
exit=len(invalid_domains) == len(domain_array))
enhanced = argToBoolean(args.get('enhanced', False))
response = client.get_domain_details(domain_array, enhanced)
domain_list = response.get("data", {}).get("results", {})
domain_map = {domain["name2"]: domain for domain in domain_list}
for domain_obj in domain_array:
if domain_obj not in domain_map:
domain_map.update({domain_obj: []})
domain_data_list = []
for domain_key, domain_data in domain_map.items():
if domain_data:
score = to_dbot_score(domain_data.get("score", 0))
dbot_score = Common.DBotScore(
indicator=domain_key,
indicator_type=DBotScoreType.DOMAIN,
integration_name='CTIX',
score=score
)
domain_standard_context = Common.Domain(
domain=domain_key,
dbot_score=dbot_score
)
domain_data_list.append(CommandResults(
readable_output=tableToMarkdown('Domain Data', domain_data, removeNull=True),
outputs_prefix='CTIX.Domain',
outputs_key_field='name2',
outputs=domain_data,
indicator=domain_standard_context
))
else:
dbot_score = Common.DBotScore(
indicator=domain_key,
indicator_type=DBotScoreType.DOMAIN,
integration_name="CTIX",
score=0,
)
domain_standard_context = Common.Domain(
domain=domain_key,
dbot_score=dbot_score
)
domain_data_list.append(CommandResults(
readable_output=f'No matches found for Domain {domain_key}',
outputs_prefix='CTIX.Domain',
outputs_key_field='name2',
outputs=domain_data,
indicator=domain_standard_context
))
return domain_data_list
|
def domain_details_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
domain command: Returns domain details for a list of domains
"""
domain_string = args.get('domain')
domain_array = argToList(domain_string)
invalid_domains = []
for domain in domain_array: # Check for Valid Domain Inputs
if not REGEX_MAP['domain'].match(domain):
invalid_domains.append(domain)
if invalid_domains:
return_warning('The following Domains were found invalid: {}'.format(', '.join(invalid_domains)),
exit=len(invalid_domains) == len(domain_array))
enhanced = argToBoolean(args.get('enhanced', False))
response = client.get_domain_details(domain_array, enhanced)
domain_list = response.get("data", {}).get("results", {})
domain_map = {domain.get("name2"): domain for domain in domain_list}
for domain_obj in domain_array:
if domain_obj not in domain_map:
domain_map.update({domain_obj: []})
domain_data_list = []
for domain_key, domain_data in domain_map.items():
if domain_data:
score = to_dbot_score(domain_data.get("score", 0))
dbot_score = Common.DBotScore(
indicator=domain_key,
indicator_type=DBotScoreType.DOMAIN,
integration_name='CTIX',
score=score
)
domain_standard_context = Common.Domain(
domain=domain_key,
dbot_score=dbot_score
)
domain_data_list.append(CommandResults(
readable_output=tableToMarkdown('Domain Data', domain_data, removeNull=True),
outputs_prefix='CTIX.Domain',
outputs_key_field='name2',
outputs=domain_data,
indicator=domain_standard_context
))
else:
dbot_score = Common.DBotScore(
indicator=domain_key,
indicator_type=DBotScoreType.DOMAIN,
integration_name="CTIX",
score=0,
)
domain_standard_context = Common.Domain(
domain=domain_key,
dbot_score=dbot_score
)
domain_data_list.append(CommandResults(
readable_output=f'No matches found for Domain {domain_key}',
outputs_prefix='CTIX.Domain',
outputs_key_field='name2',
outputs=domain_data,
indicator=domain_standard_context
))
return domain_data_list
|
20,172 |
def password_validator(password, is_register, **kwargs):
error_list = []
# calculating the length
if len(password) < current_app.config.get('SECURITY_PASSWORD_LENGTH_MIN'):
error_list.append(_('Password too short'))
# searching for lowercase
if current_app.config.get('SECURITY_PASSWORD_REQUIREMENTS_LOWERCASE') and (re.search(r"[a-z]", password) is None):
error_list.append(_('Password must contain lowercases'))
# searching for digits
if current_app.config.get('SECURITY_PASSWORD_REQUIREMENTS_DIGITS') and (re.search(r"\d", password) is None):
error_list.append(_('Password must contain digits'))
# searching for uppercase
if current_app.config.get('SECURITY_PASSWORD_REQUIREMENTS_UPPERCASE') and (re.search(r"[A-Z]", password) is None):
error_list.append(_('Password must contain uppercases'))
# searching for symbols
if current_app.config.get('SECURITY_PASSWORD_REQUIREMENTS_SYMBOLS') and (re.search(r"[ !#$%&'()*+,-./[\\\]^_`{|}~"+r'"]', password) is None):
error_list.append(_('Password must contain symbols'))
if error_list:
return error_list
return None
|
def password_validator(password, is_register, **kwargs):
error_list = []
# calculating the length
if len(password) < current_app.config.get('SECURITY_PASSWORD_LENGTH_MIN'):
error_list.append(_('Password too short'))
# searching for lowercase
if current_app.config.get('SECURITY_PASSWORD_REQUIREMENTS_LOWERCASE') and (re.search(r"[a-z]", password) is None):
error_list.append(_('Password must contain lowercases'))
# searching for digits
if current_app.config.get('SECURITY_PASSWORD_REQUIREMENTS_DIGITS') and (re.search(r"\d", password) is None):
error_list.append(_('Password must contain digits'))
# searching for uppercase
if current_app.config.get('SECURITY_PASSWORD_REQUIREMENTS_UPPERCASE') and (re.search(r"[A-Z]", password) is None):
error_list.append(_('Password must contain uppercases'))
# searching for symbols
if current_app.config.get('SECURITY_PASSWORD_REQUIREMENTS_SYMBOLS') and (re.search(r"[ !#$%&'()*+,-./[\\\]^_`{|}~"+r'"]', password) is None):
error_list.append(_('Password must contain symbols'))
return error_list or None
|
42,896 |
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments :math:`(\pi/4, \pi/2)`, and the two phase-shifters (see :class:`~.ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
52,894 |
def fornav(cols, rows, area_def, data_in,
rows_per_scan=None, fill=None, out=None,
weight_count=10000, weight_min=0.01, weight_distance_max=1.0,
weight_delta_max=10.0, weight_sum_min=-1.0,
maximum_weight_mode=False):
"""Remap data in to output grid using elliptical weighted averaging.
This algorithm works under the assumption that the data is observed
one scan line at a time. However, good results can still be achieved
for non-scan based data is provided if `rows_per_scan` is set to the
number of rows in the entire swath or by setting it to `None`.
Parameters
----------
cols : numpy array
Column location for each input swath pixel (from `ll2cr`)
rows : numpy array
Row location for each input swath pixel (from `ll2cr`)
area_def : AreaDefinition
Grid definition to be mapped to
data_in : numpy array or tuple of numpy arrays
Swath data to be remapped to output grid
rows_per_scan : int or None, optional
Number of data rows for every observed scanline. If None then the
entire swath is treated as one large scanline.
fill : float/int or None, optional
If `data_in` is made of numpy arrays then this represents the fill
value used to mark invalid data pixels. This value will also be
used in the output array(s). If None, then np.nan will be used
for float arrays and -999 will be used for integer arrays.
out : numpy array or tuple of numpy arrays, optional
Specify a numpy array to be written to for each input array. This can
be used as an optimization by providing `np.memmap` arrays or other
array-like objects.
weight_count : int, optional
number of elements to create in the gaussian weight table.
Default is 10000. Must be at least 2
weight_min : float, optional
the minimum value to store in the last position of the
weight table. Default is 0.01, which, with a
`weight_distance_max` of 1.0 produces a weight of 0.01
at a grid cell distance of 1.0. Must be greater than 0.
weight_distance_max : float, optional
distance in grid cell units at which to
apply a weight of `weight_min`. Default is
1.0. Must be greater than 0.
weight_delta_max : float, optional
maximum distance in grid cells in each grid
dimension over which to distribute a single swath cell.
Default is 10.0.
weight_sum_min : float, optional
minimum weight sum value. Cells whose weight sums
are less than `weight_sum_min` are set to the grid fill value.
Default is EPSILON.
maximum_weight_mode : bool, optional
If False (default), a weighted average of
all swath cells that map to a particular grid cell is used.
If True, the swath cell having the maximum weight of all
swath cells that map to a particular grid cell is used. This
option should be used for coded/category data, i.e. snow cover.
Returns
-------
(valid grid points, output arrays): tuple of integer tuples and numpy array tuples
The valid_grid_points tuple holds the number of output grid pixels that
were written with valid data. The second element in the tuple is a tuple of
output grid numpy arrays for each input array. If there was only one input
array provided then the returned tuple is simply the singe points integer
and single output grid array.
"""
if isinstance(data_in, (tuple, list)):
# we can only support one data type per call at this time
assert (in_arr.dtype == data_in[0].dtype for in_arr in data_in[1:])
else:
# assume they gave us a single numpy array-like object
data_in = [data_in]
# need a list for replacing these arrays later
data_in = [np.ascontiguousarray(d) for d in data_in]
# determine a fill value if they didn't tell us what they have as a
# fill value in the numpy arrays
if fill is None:
if np.issubdtype(data_in[0].dtype, np.floating):
fill = np.nan
elif np.issubdtype(data_in[0].dtype, np.integer):
fill = -999
else:
raise ValueError(
"Unsupported input data type for EWA Resampling: {}".format(data_in[0].dtype))
convert_to_masked = False
for idx, in_arr in enumerate(data_in):
if isinstance(in_arr, np.ma.MaskedArray):
convert_to_masked = True
# convert masked arrays to single numpy arrays
data_in[idx] = in_arr.filled(fill)
data_in = tuple(data_in)
if out is not None:
# the user may have provided memmapped arrays or other array-like
# objects
if isinstance(out, (tuple, list)):
out = tuple(out)
else:
out = (out,)
else:
# create a place for output data to be written
out = tuple(np.empty(area_def.shape, dtype=in_arr.dtype)
for in_arr in data_in)
# see if the user specified rows per scan
# otherwise, use the entire swath as one "scanline"
rows_per_scan = rows_per_scan or data_in[0].shape[0]
results = _fornav.fornav_wrapper(cols, rows, data_in, out,
np.nan, np.nan, rows_per_scan,
weight_count=weight_count,
weight_min=weight_min,
weight_distance_max=weight_distance_max,
weight_delta_max=weight_delta_max,
weight_sum_min=weight_sum_min,
maximum_weight_mode=maximum_weight_mode)
def _mask_helper(data, fill):
if np.isnan(fill):
return np.isnan(data)
return data == fill
if convert_to_masked:
# they gave us masked arrays so give them masked arrays back
out = [np.ma.masked_where(_mask_helper(out_arr, fill), out_arr)
for out_arr in out]
if len(out) == 1:
# they only gave us one data array as input, so give them one back
out = out[0]
results = results[0]
return results, out
|
def fornav(cols, rows, area_def, data_in,
rows_per_scan=None, fill=None, out=None,
weight_count=10000, weight_min=0.01, weight_distance_max=1.0,
weight_delta_max=10.0, weight_sum_min=-1.0,
maximum_weight_mode=False):
"""Remap data in to output grid using elliptical weighted averaging.
This algorithm works under the assumption that the data is observed
one scan line at a time. However, good results can still be achieved
for non-scan based data is provided if `rows_per_scan` is set to the
number of rows in the entire swath or by setting it to `None`.
Parameters
----------
cols : numpy array
Column location for each input swath pixel (from `ll2cr`)
rows : numpy array
Row location for each input swath pixel (from `ll2cr`)
area_def : AreaDefinition
Grid definition to be mapped to
data_in : numpy array or tuple of numpy arrays
Swath data to be remapped to output grid
rows_per_scan : int or None, optional
Number of data rows for every observed scanline. If None then the
entire swath is treated as one large scanline.
fill : float/int or None, optional
If `data_in` is made of numpy arrays then this represents the fill
value used to mark invalid data pixels. This value will also be
used in the output array(s). If None, then np.nan will be used
for float arrays and -999 will be used for integer arrays.
out : numpy array or tuple of numpy arrays, optional
Specify a numpy array to be written to for each input array. This can
be used as an optimization by providing `np.memmap` arrays or other
array-like objects.
weight_count : int, optional
number of elements to create in the gaussian weight table.
Default is 10000. Must be at least 2
weight_min : float, optional
the minimum value to store in the last position of the
weight table. Default is 0.01, which, with a
`weight_distance_max` of 1.0 produces a weight of 0.01
at a grid cell distance of 1.0. Must be greater than 0.
weight_distance_max : float, optional
distance in grid cell units at which to
apply a weight of `weight_min`. Default is
1.0. Must be greater than 0.
weight_delta_max : float, optional
maximum distance in grid cells in each grid
dimension over which to distribute a single swath cell.
Default is 10.0.
weight_sum_min : float, optional
minimum weight sum value. Cells whose weight sums
are less than `weight_sum_min` are set to the grid fill value.
Default is EPSILON.
maximum_weight_mode : bool, optional
If False (default), a weighted average of
all swath cells that map to a particular grid cell is used.
If True, the swath cell having the maximum weight of all
swath cells that map to a particular grid cell is used. This
option should be used for coded/category data, i.e. snow cover.
Returns
-------
(valid grid points, output arrays): tuple of integer tuples and numpy array tuples
The valid_grid_points tuple holds the number of output grid pixels that
were written with valid data. The second element in the tuple is a tuple of
output grid numpy arrays for each input array. If there was only one input
array provided then the returned tuple is simply the singe points integer
and single output grid array.
"""
if isinstance(data_in, (tuple, list)):
# we can only support one data type per call at this time
for in_arr in data_in[1:]:
if in_arr.dtype != data_in[0].dtype:
raise ValueError("All input arrays must be the same dtype")
else:
# assume they gave us a single numpy array-like object
data_in = [data_in]
# need a list for replacing these arrays later
data_in = [np.ascontiguousarray(d) for d in data_in]
# determine a fill value if they didn't tell us what they have as a
# fill value in the numpy arrays
if fill is None:
if np.issubdtype(data_in[0].dtype, np.floating):
fill = np.nan
elif np.issubdtype(data_in[0].dtype, np.integer):
fill = -999
else:
raise ValueError(
"Unsupported input data type for EWA Resampling: {}".format(data_in[0].dtype))
convert_to_masked = False
for idx, in_arr in enumerate(data_in):
if isinstance(in_arr, np.ma.MaskedArray):
convert_to_masked = True
# convert masked arrays to single numpy arrays
data_in[idx] = in_arr.filled(fill)
data_in = tuple(data_in)
if out is not None:
# the user may have provided memmapped arrays or other array-like
# objects
if isinstance(out, (tuple, list)):
out = tuple(out)
else:
out = (out,)
else:
# create a place for output data to be written
out = tuple(np.empty(area_def.shape, dtype=in_arr.dtype)
for in_arr in data_in)
# see if the user specified rows per scan
# otherwise, use the entire swath as one "scanline"
rows_per_scan = rows_per_scan or data_in[0].shape[0]
results = _fornav.fornav_wrapper(cols, rows, data_in, out,
np.nan, np.nan, rows_per_scan,
weight_count=weight_count,
weight_min=weight_min,
weight_distance_max=weight_distance_max,
weight_delta_max=weight_delta_max,
weight_sum_min=weight_sum_min,
maximum_weight_mode=maximum_weight_mode)
def _mask_helper(data, fill):
if np.isnan(fill):
return np.isnan(data)
return data == fill
if convert_to_masked:
# they gave us masked arrays so give them masked arrays back
out = [np.ma.masked_where(_mask_helper(out_arr, fill), out_arr)
for out_arr in out]
if len(out) == 1:
# they only gave us one data array as input, so give them one back
out = out[0]
results = results[0]
return results, out
|
54,862 |
def laplacian_pe(g, k, padding=False, return_eigval=False):
r"""Laplacian Positional Encoding, as introduced in
`Benchmarking Graph Neural Networks
<https://arxiv.org/abs/2003.00982>`__
This function computes the laplacian positional encodings as the
k smallest non-trivial eigenvectors.
Parameters
----------
g : DGLGraph
The input graph. Must be homogeneous.
k : int
Number of smallest non-trivial eigenvectors to use for positional encoding.
padding : bool
If padding=='false', raise exception when k>=n.
Else return (n-1) laplacian positional encodings and (k-n+1) zero encodings
(padding) when k>=n.
n is the number of nodes in the given graph.
return_eigval : bool
If return_eigval=='True', return laplacian eigenvalues together with eigenvectors.
Else return laplacian eigenvectors only.
Returns
-------
Tensor
The laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Two tensors
The eigenvalues of shape :math:`N` and
the laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Example
-------
>>> import dgl
>>> g = dgl.rand_graph(6, 20)
>>> dgl.laplacian_pe(g, 2)
tensor([[ 0.7251, -0.6224],
[-0.0000, 0.5390],
[-0.4065, 0.4042],
[-0.0744, 0.0519],
[-0.4694, -0.1556],
[ 0.2881, -0.3631]])
>>> dgl.laplacian_pe(g, 6, padding=True)
tensor([[-7.2513e-01, -6.2238e-01, -1.8517e-09, 1.8517e-09, 4.3006e-01, 0.0000e+00],
[ 0.0000e+00, 5.3900e-01, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[ 4.0653e-01, 4.0425e-01, 6.4145e-09, -6.4145e-09, 2.8766e-01, 0.0000e+00],
[ 7.4425e-02, 5.1865e-02, -7.0711e-01, -7.0711e-01, -6.5471e-01, 0.0000e+00],
[ 4.6942e-01, -1.5560e-01, -7.4068e-09, 7.4068e-09, 3.3216e-01, 0.0000e+00],
[-2.8814e-01, -3.6306e-01, 7.0711e-01, 7.0711e-01, -4.3968e-01, 0.0000e+00]])
>>> dgl.laplacian_pe(g, 6, padding=True, return_eigval=True)
(tensor([0.5684, 0.7500, 1.0000, 1.0000, 1.5149, nan]),
tensor([[ 7.2513e-01, -6.2238e-01, 1.8517e-09, -1.8517e-09, -4.3006e-01, 0.0000e+00],
[-0.0000e+00, 5.3900e-01, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],
[-4.0653e-01, 4.0425e-01, -6.4145e-09, 6.4145e-09, -2.8766e-01, 0.0000e+00],
[-7.4425e-02, 5.1865e-02, 7.0711e-01, 7.0711e-01, 6.5471e-01, 0.0000e+00],
[-4.6942e-01, -1.5560e-01, 7.4068e-09, -7.4068e-09, -3.3216e-01, 0.0000e+00],
[ 2.8814e-01, -3.6306e-01, -7.0711e-01, -7.0711e-01, 4.3968e-01, 0.0000e+00]]))
"""
# check for the "k < n" constraint
n = g.num_nodes()
if not padding and n <= k:
assert "the number of eigenvectors k must be smaller than the number of nodes n, " + \
f"{k} and {n} detected."
# get laplacian matrix as I - D^-0.5 * A * D^-0.5
A = g.adj(scipy_fmt='csr') # adjacency matrix
N = sparse.diags(F.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) # D^-1/2
L = sparse.eye(g.num_nodes()) - N * A * N
# select eigenvectors with smaller eigenvalues O(n + klogk)
EigVal, EigVec = np.linalg.eig(L.toarray())
max_freqs = min(n-1,k)
kpartition_indices = np.argpartition(EigVal, max_freqs)[:max_freqs+1]
topk_eigvals = EigVal[kpartition_indices]
topk_indices = kpartition_indices[topk_eigvals.argsort()][1:]
topk_EigVec = np.real(EigVec[:, topk_indices])
eigvals = F.tensor(np.real(EigVal[topk_indices]), dtype=F.float32)
# get random flip signs
rand_sign = 2 * (np.random.rand(max_freqs) > 0.5) - 1.
PE = F.astype(F.tensor(rand_sign * topk_EigVec), F.float32)
# add paddings
if n <= k:
temp_EigVec = F.zeros([n, k-n+1], dtype=F.float32, ctx=F.context(PE))
PE = F.cat([PE, temp_EigVec], dim=1)
temp_EigVal = F.tensor(np.full(k-n+1, np.nan), F.float32)
eigvals = F.cat([eigvals, temp_EigVal], dim=0)
if return_eigval:
return eigvals, PE
return PE
|
def laplacian_pe(g, k, padding=False, return_eigval=False):
r"""Laplacian Positional Encoding, as introduced in
`Benchmarking Graph Neural Networks
<https://arxiv.org/abs/2003.00982>`__
This function computes the laplacian positional encodings as the
k smallest non-trivial eigenvectors.
Parameters
----------
g : DGLGraph
The input graph. Must be homogeneous.
k : int
Number of smallest non-trivial eigenvectors to use for positional encoding.
padding : bool
If False, raise an exception when k>=n.
Otherwise, add zero paddings in the end when k>=n.
Default: False.
n is the number of nodes in the given graph.
return_eigval : bool
If return_eigval=='True', return laplacian eigenvalues together with eigenvectors.
Else return laplacian eigenvectors only.
Returns
-------
Tensor
The laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Two tensors
The eigenvalues of shape :math:`N` and
the laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Example
-------
>>> import dgl
>>> g = dgl.rand_graph(6, 20)
>>> dgl.laplacian_pe(g, 2)
tensor([[ 0.7251, -0.6224],
[-0.0000, 0.5390],
[-0.4065, 0.4042],
[-0.0744, 0.0519],
[-0.4694, -0.1556],
[ 0.2881, -0.3631]])
>>> dgl.laplacian_pe(g, 6, padding=True)
tensor([[-7.2513e-01, -6.2238e-01, -1.8517e-09, 1.8517e-09, 4.3006e-01, 0.0000e+00],
[ 0.0000e+00, 5.3900e-01, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[ 4.0653e-01, 4.0425e-01, 6.4145e-09, -6.4145e-09, 2.8766e-01, 0.0000e+00],
[ 7.4425e-02, 5.1865e-02, -7.0711e-01, -7.0711e-01, -6.5471e-01, 0.0000e+00],
[ 4.6942e-01, -1.5560e-01, -7.4068e-09, 7.4068e-09, 3.3216e-01, 0.0000e+00],
[-2.8814e-01, -3.6306e-01, 7.0711e-01, 7.0711e-01, -4.3968e-01, 0.0000e+00]])
>>> dgl.laplacian_pe(g, 6, padding=True, return_eigval=True)
(tensor([0.5684, 0.7500, 1.0000, 1.0000, 1.5149, nan]),
tensor([[ 7.2513e-01, -6.2238e-01, 1.8517e-09, -1.8517e-09, -4.3006e-01, 0.0000e+00],
[-0.0000e+00, 5.3900e-01, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],
[-4.0653e-01, 4.0425e-01, -6.4145e-09, 6.4145e-09, -2.8766e-01, 0.0000e+00],
[-7.4425e-02, 5.1865e-02, 7.0711e-01, 7.0711e-01, 6.5471e-01, 0.0000e+00],
[-4.6942e-01, -1.5560e-01, 7.4068e-09, -7.4068e-09, -3.3216e-01, 0.0000e+00],
[ 2.8814e-01, -3.6306e-01, -7.0711e-01, -7.0711e-01, 4.3968e-01, 0.0000e+00]]))
"""
# check for the "k < n" constraint
n = g.num_nodes()
if not padding and n <= k:
assert "the number of eigenvectors k must be smaller than the number of nodes n, " + \
f"{k} and {n} detected."
# get laplacian matrix as I - D^-0.5 * A * D^-0.5
A = g.adj(scipy_fmt='csr') # adjacency matrix
N = sparse.diags(F.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) # D^-1/2
L = sparse.eye(g.num_nodes()) - N * A * N
# select eigenvectors with smaller eigenvalues O(n + klogk)
EigVal, EigVec = np.linalg.eig(L.toarray())
max_freqs = min(n-1,k)
kpartition_indices = np.argpartition(EigVal, max_freqs)[:max_freqs+1]
topk_eigvals = EigVal[kpartition_indices]
topk_indices = kpartition_indices[topk_eigvals.argsort()][1:]
topk_EigVec = np.real(EigVec[:, topk_indices])
eigvals = F.tensor(np.real(EigVal[topk_indices]), dtype=F.float32)
# get random flip signs
rand_sign = 2 * (np.random.rand(max_freqs) > 0.5) - 1.
PE = F.astype(F.tensor(rand_sign * topk_EigVec), F.float32)
# add paddings
if n <= k:
temp_EigVec = F.zeros([n, k-n+1], dtype=F.float32, ctx=F.context(PE))
PE = F.cat([PE, temp_EigVec], dim=1)
temp_EigVal = F.tensor(np.full(k-n+1, np.nan), F.float32)
eigvals = F.cat([eigvals, temp_EigVal], dim=0)
if return_eigval:
return eigvals, PE
return PE
|
59,846 |
def _velocity(field, data, idir, prefix=None):
"""Velocity = linear momentum / density"""
# This is meant to be used with functools.partial to produce
# functions with only 2 arguments (field, data)
# idir : int
# the direction index (1, 2 or 3)
# prefix : str
# used to generalize to dust fields
if prefix is None:
prefix = ""
moment = data["gas", "%smoment_%d" % (prefix, idir)]
rho = data["gas", f"{prefix}density"]
mask1 = rho == 0
if mask1.any():
warnings.warn(
"zeros found in %sdensity, "
"patching them to compute corresponding velocity field.",
prefix,
category=RuntimeWarning
)
mask2 = moment == 0
if not ((mask1 & mask2) == mask1).all():
raise RuntimeError
rho[mask1] = 1
return moment / rho
|
def _velocity(field, data, idir, prefix=None):
"""Velocity = linear momentum / density"""
# This is meant to be used with functools.partial to produce
# functions with only 2 arguments (field, data)
# idir : int
# the direction index (1, 2 or 3)
# prefix : str
# used to generalize to dust fields
if prefix is None:
prefix = ""
moment = data["gas", "%smoment_%d" % (prefix, idir)]
rho = data["gas", f"{prefix}density"]
mask1 = rho == 0
if mask1.any():
warnings.warn(
f"zeros found in {prefix}density"
"patching them to compute corresponding velocity field.",
category=RuntimeWarning
)
mask2 = moment == 0
if not ((mask1 & mask2) == mask1).all():
raise RuntimeError
rho[mask1] = 1
return moment / rho
|
28,614 |
def plot_kde(
values,
values2=None,
cumulative=False,
rug=False,
label=None,
bw="default",
adaptive=False,
quantiles=None,
rotated=False,
contour=True,
hdi_probs=None,
fill_last=False,
figsize=None,
textsize=None,
plot_kwargs=None,
fill_kwargs=None,
rug_kwargs=None,
contour_kwargs=None,
contourf_kwargs=None,
pcolormesh_kwargs=None,
is_circular=False,
ax=None,
legend=True,
backend=None,
backend_kwargs=None,
show=None,
return_glyph=False,
**kwargs
):
"""1D or 2D KDE plot taking into account boundary conditions.
Parameters
----------
values : array-like
Values to plot
values2 : array-like, optional
Values to plot. If present, a 2D KDE will be estimated
cumulative : bool
If true plot the estimated cumulative distribution function. Defaults to False.
Ignored for 2D KDE
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE
label : string
Text to include as part of the legend
bw: float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental" when ``is_circular`` is False
and "taylor" (for now) when ``is_circular`` is True.
Defaults to "default" which means "experimental" when variable is not circular
and "taylor" when it is.
adaptive: bool, optional.
If True, an adaptative bandwidth is used. Only valid for 1D KDE.
Defaults to False.
quantiles : list
Quantiles in ascending order used to segment the KDE.
Use [.25, .5, .75] for quartiles. Defaults to None.
rotated : bool
Whether to rotate the 1D KDE plot 90 degrees.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE.
Defaults to True.
hdi_probs : list
Plots highest density credibility regions for the provided probabilities for a 2D KDE.
Defaults to matplotlib chosen levels with no fixed probability associated.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to False.
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``. Not implemented for bokeh backend.
plot_kwargs : dict
Keywords passed to the pdf line of a 1D KDE. See :meth:`mpl:matplotlib.axes.Axes.plot`
or :meth:`bokeh:bokeh.plotting.Figure.line` for a description of accepted values.
fill_kwargs : dict
Keywords passed to the fill under the line (use ``fill_kwargs={'alpha': 0}``
to disable fill). Ignored for 2D KDE. Passed to
:meth:`bokeh.plotting.Figure.patch`.
rug_kwargs : dict
Keywords passed to the rug plot. Ignored if ``rug=False`` or for 2D KDE
Use ``space`` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot. Passed to :class:`bokeh:bokeh.models.glyphs.Scatter`.
contour_kwargs : dict
Keywords passed to :meth:`mpl:matplotlib.axes.Axes.contour`
to draw contour lines or :meth:`bokeh.plotting.Figure.patch`.
Ignored for 1D KDE.
contourf_kwargs : dict
Keywords passed to :meth:`mpl:matplotlib.axes.Axes.contourf`
to draw filled contours. Ignored for 1D KDE.
pcolormesh_kwargs : dict
Keywords passed to :meth:`mpl:matplotlib.axes.Axes.pcolormesh` or
:meth:`bokeh.plotting.Figure.image`.
Ignored for 1D KDE.
is_circular : {False, True, "radians", "degrees"}. Default False.
Select input type {"radians", "degrees"} for circular histogram or KDE plot. If True,
default input type is "radians". When this argument is present, it interprets ``values``
is a circular variable measured in radians and a circular KDE is used. Inputs in
"degrees" will undergo an internal conversion to radians.
ax: axes, optional
Matplotlib axes or bokeh figures.
legend : bool
Add legend to the figure. By default True.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
return_glyph : bool, optional
Internal argument to return glyphs for bokeh
Returns
-------
axes : matplotlib.Axes or bokeh.plotting.Figure
Object containing the kde plot
glyphs : list, optional
Bokeh glyphs present in plot. Only provided if ``return_glyph`` is True.
See Also
--------
kde : One dimensional density estimation.
plot_dist : Plot distribution as histogram or kernel density estimates.
Examples
--------
Plot default KDE
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> mu_posterior = np.concatenate(non_centered.posterior["mu"].values)
>>> tau_posterior = np.concatenate(non_centered.posterior["tau"].values)
>>> az.plot_kde(mu_posterior)
Plot KDE with rugplot
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rug=True)
Plot KDE with adaptive bandwidth
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, adaptive=True)
Plot KDE with a different bandwidth estimator
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, bw="scott")
Plot KDE with a bandwidth specified manually
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, bw=0.4)
Plot KDE for a circular variable
.. plot::
:context: close-figs
>>> rvs = np.random.vonmises(mu=np.pi, kappa=2, size=500)
>>> az.plot_kde(rvs, is_circular=True)
Plot a cumulative distribution
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, cumulative=True)
Rotate plot 90 degrees
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rotated=True)
Plot 2d contour KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior)
Plot 2d contour KDE, without filling and contour lines using viridis cmap
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior,
... contour_kwargs={"colors":None, "cmap":plt.cm.viridis},
... contourf_kwargs={"alpha":0});
Plot 2d contour KDE, set the number of levels to 3.
.. plot::
:context: close-figs
>>> az.plot_kde(
... mu_posterior, values2=tau_posterior,
... contour_kwargs={"levels":3}, contourf_kwargs={"levels":3}
... );
Plot 2d contour KDE with 30%, 60% and 90% HDI contours.
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, hdi_probs=[0.3, 0.6, 0.9])
Plot 2d smooth KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, contour=False)
"""
if isinstance(values, xr.Dataset):
raise ValueError(
"Xarray dataset object detected.Use plot_posterior, plot_density "
"or plot_pair instead of plot_kde"
)
if isinstance(values, InferenceData):
raise ValueError(
" Inference Data object detected. Use plot_posterior "
"or plot_pair instead of plot_kde"
)
if values2 is None:
if bw == "default":
if is_circular:
bw = "taylor"
else:
bw = "experimental"
grid, density = kde(values, is_circular, bw=bw, adaptive=adaptive, cumulative=cumulative)
lower, upper = grid[0], grid[-1]
if cumulative:
density_q = density
else:
density_q = density.cumsum() / density.sum()
# This is just a hack placeholder for now
xmin, xmax, ymin, ymax, gridsize = [None] * 5
else:
gridsize = (128, 128) if contour else (256, 256)
density, xmin, xmax, ymin, ymax = _fast_kde_2d(values, values2, gridsize=gridsize)
if hdi_probs is not None:
# Check hdi probs are within bounds (0, 1)
if min(hdi_probs) <= 0 or max(hdi_probs) >= 1:
raise ValueError("Highest density interval probabilities must be between 0 and 1")
# Calculate contour levels and sort for matplotlib
contour_levels = _find_hdi_contours(density, hdi_probs)
contour_levels.sort()
contour_level_list = [0] + list(contour_levels) + [density.max()]
# Add keyword arguments to contour, contourf
contour_kwargs = _init_kwargs_dict(contour_kwargs)
if "levels" in contour_kwargs:
warnings.warn(
"Both 'levels' in contour_kwargs and 'hdi_probs' have been specified."
"Using 'hdi_probs' in favor of 'levels'.",
UserWarning,
)
contour_kwargs["levels"] = contour_level_list
contourf_kwargs = _init_kwargs_dict(contourf_kwargs)
if "levels" in contourf_kwargs:
warnings.warn(
"Both 'levels' in contourf_kwargs and 'hdi_probs' have been specified."
"Using 'hdi_probs' in favor of 'levels'.",
UserWarning,
)
contourf_kwargs["levels"] = contour_level_list
lower, upper, density_q = [None] * 3
kde_plot_args = dict(
# Internal API
density=density,
lower=lower,
upper=upper,
density_q=density_q,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
gridsize=gridsize,
# User Facing API that can be simplified
values=values,
values2=values2,
rug=rug,
label=label,
quantiles=quantiles,
rotated=rotated,
contour=contour,
fill_last=fill_last,
figsize=figsize,
textsize=textsize,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
contour_kwargs=contour_kwargs,
contourf_kwargs=contourf_kwargs,
pcolormesh_kwargs=pcolormesh_kwargs,
is_circular=is_circular,
ax=ax,
legend=legend,
backend_kwargs=backend_kwargs,
show=show,
return_glyph=return_glyph,
**kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_kde", "kdeplot", backend)
ax = plot(**kde_plot_args)
return ax
|
def plot_kde(
values,
values2=None,
cumulative=False,
rug=False,
label=None,
bw="default",
adaptive=False,
quantiles=None,
rotated=False,
contour=True,
hdi_probs=None,
fill_last=False,
figsize=None,
textsize=None,
plot_kwargs=None,
fill_kwargs=None,
rug_kwargs=None,
contour_kwargs=None,
contourf_kwargs=None,
pcolormesh_kwargs=None,
is_circular=False,
ax=None,
legend=True,
backend=None,
backend_kwargs=None,
show=None,
return_glyph=False,
**kwargs
):
"""1D or 2D KDE plot taking into account boundary conditions.
Parameters
----------
values : array-like
Values to plot
values2 : array-like, optional
Values to plot. If present, a 2D KDE will be estimated
cumulative : bool
If true plot the estimated cumulative distribution function. Defaults to False.
Ignored for 2D KDE
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE
label : string
Text to include as part of the legend
bw: float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental" when ``is_circular`` is False
and "taylor" (for now) when ``is_circular`` is True.
Defaults to "default" which means "experimental" when variable is not circular
and "taylor" when it is.
adaptive: bool, optional.
If True, an adaptative bandwidth is used. Only valid for 1D KDE.
Defaults to False.
quantiles : list
Quantiles in ascending order used to segment the KDE.
Use [.25, .5, .75] for quartiles. Defaults to None.
rotated : bool
Whether to rotate the 1D KDE plot 90 degrees.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE.
Defaults to True.
hdi_probs : list
Plots highest density credibility regions for the provided probabilities for a 2D KDE.
Defaults to matplotlib chosen levels with no fixed probability associated.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to False.
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``. Not implemented for bokeh backend.
plot_kwargs : dict
Keywords passed to the pdf line of a 1D KDE. See :meth:`mpl:matplotlib.axes.Axes.plot`
or :meth:`bokeh:bokeh.plotting.Figure.line` for a description of accepted values.
fill_kwargs : dict
Keywords passed to the fill under the line (use ``fill_kwargs={'alpha': 0}``
to disable fill). Ignored for 2D KDE. Passed to
:meth:`bokeh.plotting.Figure.patch`.
rug_kwargs : dict
Keywords passed to the rug plot. Ignored if ``rug=False`` or for 2D KDE
Use ``space`` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot. Passed to :class:`bokeh:bokeh.models.glyphs.Scatter`.
contour_kwargs : dict
Keywords passed to :meth:`mpl:matplotlib.axes.Axes.contour`
to draw contour lines or :meth:`bokeh.plotting.Figure.patch`.
Ignored for 1D KDE.
contourf_kwargs : dict
Keywords passed to :meth:`mpl:matplotlib.axes.Axes.contourf`
to draw filled contours. Ignored for 1D KDE.
pcolormesh_kwargs : dict
Keywords passed to :meth:`mpl:matplotlib.axes.Axes.pcolormesh` or
:meth:`bokeh.plotting.Figure.image`.
Ignored for 1D KDE.
is_circular : {False, True, "radians", "degrees"}. Default False.
Select input type {"radians", "degrees"} for circular histogram or KDE plot. If True,
default input type is "radians". When this argument is present, it interprets ``values``
is a circular variable measured in radians and a circular KDE is used. Inputs in
"degrees" will undergo an internal conversion to radians.
ax: axes, optional
Matplotlib axes or bokeh figures.
legend : bool
Add legend to the figure. By default True.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
return_glyph : bool, optional
Internal argument to return glyphs for bokeh
Returns
-------
axes : matplotlib.Axes or bokeh.plotting.Figure
Object containing the kde plot
glyphs : list, optional
Bokeh glyphs present in plot. Only provided if ``return_glyph`` is True.
See Also
--------
kde : One dimensional density estimation.
plot_dist : Plot distribution as histogram or kernel density estimates.
Examples
--------
Plot default KDE
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> mu_posterior = np.concatenate(non_centered.posterior["mu"].values)
>>> tau_posterior = np.concatenate(non_centered.posterior["tau"].values)
>>> az.plot_kde(mu_posterior)
Plot KDE with rugplot
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rug=True)
Plot KDE with adaptive bandwidth
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, adaptive=True)
Plot KDE with a different bandwidth estimator
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, bw="scott")
Plot KDE with a bandwidth specified manually
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, bw=0.4)
Plot KDE for a circular variable
.. plot::
:context: close-figs
>>> rvs = np.random.vonmises(mu=np.pi, kappa=2, size=500)
>>> az.plot_kde(rvs, is_circular=True)
Plot a cumulative distribution
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, cumulative=True)
Rotate plot 90 degrees
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rotated=True)
Plot 2d contour KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior)
Plot 2d contour KDE, without filling and contour lines using viridis cmap
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior,
... contour_kwargs={"colors":None, "cmap":plt.cm.viridis},
... contourf_kwargs={"alpha":0});
Plot 2d contour KDE, set the number of levels to 3.
.. plot::
:context: close-figs
>>> az.plot_kde(
... mu_posterior, values2=tau_posterior,
... contour_kwargs={"levels":3}, contourf_kwargs={"levels":3}
... );
Plot 2d contour KDE with 30%, 60% and 90% HDI contours.
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, hdi_probs=[0.3, 0.6, 0.9])
Plot 2d smooth KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, contour=False)
"""
if isinstance(values, xr.Dataset):
raise ValueError(
"Xarray dataset object detected. Use plot_posterior, plot_density "
"or plot_pair instead of plot_kde"
)
if isinstance(values, InferenceData):
raise ValueError(
" Inference Data object detected. Use plot_posterior "
"or plot_pair instead of plot_kde"
)
if values2 is None:
if bw == "default":
if is_circular:
bw = "taylor"
else:
bw = "experimental"
grid, density = kde(values, is_circular, bw=bw, adaptive=adaptive, cumulative=cumulative)
lower, upper = grid[0], grid[-1]
if cumulative:
density_q = density
else:
density_q = density.cumsum() / density.sum()
# This is just a hack placeholder for now
xmin, xmax, ymin, ymax, gridsize = [None] * 5
else:
gridsize = (128, 128) if contour else (256, 256)
density, xmin, xmax, ymin, ymax = _fast_kde_2d(values, values2, gridsize=gridsize)
if hdi_probs is not None:
# Check hdi probs are within bounds (0, 1)
if min(hdi_probs) <= 0 or max(hdi_probs) >= 1:
raise ValueError("Highest density interval probabilities must be between 0 and 1")
# Calculate contour levels and sort for matplotlib
contour_levels = _find_hdi_contours(density, hdi_probs)
contour_levels.sort()
contour_level_list = [0] + list(contour_levels) + [density.max()]
# Add keyword arguments to contour, contourf
contour_kwargs = _init_kwargs_dict(contour_kwargs)
if "levels" in contour_kwargs:
warnings.warn(
"Both 'levels' in contour_kwargs and 'hdi_probs' have been specified."
"Using 'hdi_probs' in favor of 'levels'.",
UserWarning,
)
contour_kwargs["levels"] = contour_level_list
contourf_kwargs = _init_kwargs_dict(contourf_kwargs)
if "levels" in contourf_kwargs:
warnings.warn(
"Both 'levels' in contourf_kwargs and 'hdi_probs' have been specified."
"Using 'hdi_probs' in favor of 'levels'.",
UserWarning,
)
contourf_kwargs["levels"] = contour_level_list
lower, upper, density_q = [None] * 3
kde_plot_args = dict(
# Internal API
density=density,
lower=lower,
upper=upper,
density_q=density_q,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
gridsize=gridsize,
# User Facing API that can be simplified
values=values,
values2=values2,
rug=rug,
label=label,
quantiles=quantiles,
rotated=rotated,
contour=contour,
fill_last=fill_last,
figsize=figsize,
textsize=textsize,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
contour_kwargs=contour_kwargs,
contourf_kwargs=contourf_kwargs,
pcolormesh_kwargs=pcolormesh_kwargs,
is_circular=is_circular,
ax=ax,
legend=legend,
backend_kwargs=backend_kwargs,
show=show,
return_glyph=return_glyph,
**kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_kde", "kdeplot", backend)
ax = plot(**kde_plot_args)
return ax
|
32,425 |
def create_events_search(client: Client,
fetch_mode: str,
events_columns: str,
events_limit: int,
offense_id: int,
offense_start_time: str = None
) -> str:
additional_where = ''' AND LOGSOURCETYPENAME(devicetype) = 'Custom Rule Engine' ''' \
if fetch_mode == FetchMode.correlations_events_only.value else ''
try:
# Get all the events starting from one hour after epoch
if not offense_start_time:
offense = client.offenses_list(offense_id=offense_id)
offense_start_time = offense['start_time']
query_expression = (
f'SELECT {events_columns} FROM events WHERE INOFFENSE({offense_id}) {additional_where} limit {events_limit} '
f'START {offense_start_time}'
)
print_debug_msg(f'Creating search for offense ID: {offense_id}, '
f'query_expression: {query_expression}')
search_response = client.search_create(query_expression)
print_debug_msg(f'Created search for offense ID: {offense_id}, '
f'Start Time: {offense_start_time}, '
f'events_limit: {events_limit}, '
f'ret_value: {search_response}.')
return search_response['search_id'] if search_response['search_id'] else QueryStatus.ERROR.value
except Exception as e:
print_debug_msg(f'Search for {offense_id} failed. Error: {e}')
time.sleep(FAILURE_SLEEP)
return QueryStatus.ERROR.value
|
def create_events_search(client: Client,
fetch_mode: str,
events_columns: str,
events_limit: int,
offense_id: int,
offense_start_time: str = None,
) -> str:
additional_where = ''' AND LOGSOURCETYPENAME(devicetype) = 'Custom Rule Engine' ''' \
if fetch_mode == FetchMode.correlations_events_only.value else ''
try:
# Get all the events starting from one hour after epoch
if not offense_start_time:
offense = client.offenses_list(offense_id=offense_id)
offense_start_time = offense['start_time']
query_expression = (
f'SELECT {events_columns} FROM events WHERE INOFFENSE({offense_id}) {additional_where} limit {events_limit} '
f'START {offense_start_time}'
)
print_debug_msg(f'Creating search for offense ID: {offense_id}, '
f'query_expression: {query_expression}')
search_response = client.search_create(query_expression)
print_debug_msg(f'Created search for offense ID: {offense_id}, '
f'Start Time: {offense_start_time}, '
f'events_limit: {events_limit}, '
f'ret_value: {search_response}.')
return search_response['search_id'] if search_response['search_id'] else QueryStatus.ERROR.value
except Exception as e:
print_debug_msg(f'Search for {offense_id} failed. Error: {e}')
time.sleep(FAILURE_SLEEP)
return QueryStatus.ERROR.value
|
35,130 |
def _get_irmod_elemwise_add(shape: list, dtype: str, mem_scope: str) -> tvm.ir.module.IRModule:
"""
Return an IRModule containing a single primfunc, expressed as NS-TIR.
The primfunc implements elementwise-add. Its signature is (A,B,C), where
A and B are the input tensors, and C is the output tensor.
All three tensors have the specfied shape, dtype, and mem_scope.
If the specified primfunc is known to be unsupported, raise an UnsupportedExcetion.
"""
assert len(shape) == 2
# TVMScript can reference simple Python variables, but it doesn't
# curently support more complex Python expressions...
(
dim0_size,
dim1_size,
) = shape
if mem_scope == "global.vtcm":
raise bu.UnsupportedException("This benchmark kernel does not yet support VTCM buffers.")
# This check is currently elided by the one above, but it should become relevant as soon
# as we add VTCM support to this kernel generator.
#
# Also: The VTCM budget is a very rough estimate, based only on experience.
# Assuming that it's even reasonable to use a hard-coded estimate AT ALL, this number
# may need tweaking.
# pylint: disable=unreachable
estimated_vtcm_budget_bytes = HVX_VECTOR_BYTES * 1024
dtype_bits = tvm._ffi.runtime_ctypes.DataType(dtype).bits
assert dtype_bits % 8 == 0
dtype_bytes = dtype_bits // 8
num_vtcm_tensors = 3
estimated_vtcm_needed_bytes = shape[0] * shape[1] * dtype_bytes * num_vtcm_tensors
if estimated_vtcm_needed_bytes > estimated_vtcm_budget_bytes:
raise bu.UnsupportedException("Expect to exceed VTCM budget.")
# pylint: enable=unreachable
@tvm.script.ir_module
class BenchmarkModule:
"""Elementwise STIR module for benchmarking"""
# pylint: disable=no-self-argument,invalid-name,missing-function-docstring
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle):
# We exchange data between function by handles, which are similar to pointer.
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, shape, dtype=dtype)
B = T.match_buffer(b, shape, dtype=dtype)
C = T.match_buffer(c, shape, dtype=dtype)
for i in range(dim0_size):
for j in range(dim1_size):
C[i, j] = A[i, j] + B[i, j]
# pylint: enable=no-self-argument,invalid-name,missing-function-docstring
return BenchmarkModule
|
def _get_irmod_elemwise_add(shape: list, dtype: str, mem_scope: str) -> tvm.ir.module.IRModule:
"""
Return an IRModule containing a single primfunc, expressed as NS-TIR.
The primfunc implements elementwise-add. Its signature is (A,B,C), where
A and B are the input tensors, and C is the output tensor.
All three tensors have the specfied shape, dtype, and mem_scope.
If the specified primfunc is known to be unsupported, raise an UnsupportedExcetion.
"""
assert len(shape) == 2
# TVMScript can reference simple Python variables, but it doesn't
# curently support more complex Python expressions...
(
dim0_size,
dim1_size,
) = shape
if mem_scope == "global.vtcm":
raise bu.UnsupportedException("This benchmark kernel does not yet support VTCM buffers.")
# This check is currently elided by the one above, but it should become relevant as soon
# as we add VTCM support to this kernel generator.
#
# Also: The VTCM budget is a very rough estimate, based only on experience.
# Assuming that it's even reasonable to use a hard-coded estimate AT ALL, this number
# may need tweaking.
# pylint: disable=unreachable
estimated_vtcm_budget_bytes = HVX_VECTOR_BYTES * 1024
dtype_bits = tvm._ffi.runtime_ctypes.DataType(dtype).bits
assert dtype_bits % 8 == 0
dtype_bytes = dtype_bits // 8
num_vtcm_tensors = 3
estimated_vtcm_needed_bytes = shape[0] * shape[1] * dtype_bytes * num_vtcm_tensors
if estimated_vtcm_needed_bytes > estimated_vtcm_budget_bytes:
raise bu.UnsupportedException("Expect to exceed VTCM budget.")
# pylint: enable=unreachable
@tvm.script.ir_module
class BenchmarkModule:
"""Elementwise STIR module for benchmarking"""
# pylint: disable=no-self-argument,invalid-name,missing-function-docstring
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle):
# We exchange data between function by handles, which are similar to pointer.
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, shape, dtype=dtype)
B = T.match_buffer(b, shape, dtype=dtype)
C = T.match_buffer(c, shape, dtype=dtype)
for i in range(dim0_size):
for j in range(dim1_size):
C[i, j] = A[i, j] + B[i, j]
# pylint: enable=no-self-argument,invalid-name,missing-function-docstring
return BenchmarkModule
|
8,475 |
def qt_plugins_binaries(plugin_type, namespace):
"""
Return list of dynamic libraries formatted for mod.binaries.
:param plugin_type: Plugin to look for
:param namespace: Import namespace, i.e., PyQt4, PyQt5, PySide, or PySide2
:return: Plugin directory path corresponding to the given plugin_type
"""
if namespace not in ['PyQt4', 'PyQt5', 'PySide', 'PySide2']:
raise Exception('Invalid namespace: {0}'.format(namespace))
pdir = qt_plugins_dir(namespace=namespace)
files = []
for path in pdir:
files.extend(misc.dlls_in_dir(os.path.join(path, plugin_type)))
# Windows:
#
# dlls_in_dir() grabs all files ending with ``*.dll``, ``*.so`` and
# ``*.dylib`` in a certain directory. On Windows this would grab debug
# copies of Qt plugins, which then causes PyInstaller to add a dependency on
# the Debug CRT *in addition* to the release CRT.
#
# Since on Windows debug copies of Qt4 plugins end with "d4.dll" and Qt5
# plugins end with "d.dll" we filter them out of the list.
if ( is_win or is_cygwin ) and (namespace in ['PyQt4', 'PySide']):
files = [f for f in files if not f.endswith("d4.dll")]
elif ( is_win or is_cygwin ) and namespace in ['PyQt5', 'PySide2']:
logger.debug("Found plugin files %s for plugin %s", files, plugin_type)
files = [f for f in files if not f.endswith("d.dll")]
logger.debug("Found plugin files %s for plugin %s", files, plugin_type)
if namespace in ['PyQt4', 'PySide']:
plugin_dir = 'qt4_plugins'
elif namespace == 'PyQt5':
plugin_dir = os.path.join('PyQt5', 'Qt', 'plugins')
else:
plugin_dir = os.path.join('PySide2', 'plugins')
dest_dir = os.path.join(plugin_dir, plugin_type)
binaries = [(f, dest_dir) for f in files]
return binaries
|
def qt_plugins_binaries(plugin_type, namespace):
"""
Return list of dynamic libraries formatted for mod.binaries.
:param plugin_type: Plugin to look for
:param namespace: Import namespace, i.e., PyQt4, PyQt5, PySide, or PySide2
:return: Plugin directory path corresponding to the given plugin_type
"""
if namespace not in ['PyQt4', 'PyQt5', 'PySide', 'PySide2']:
raise Exception('Invalid namespace: {0}'.format(namespace))
pdir = qt_plugins_dir(namespace=namespace)
files = []
for path in pdir:
files.extend(misc.dlls_in_dir(os.path.join(path, plugin_type)))
# Windows:
#
# dlls_in_dir() grabs all files ending with ``*.dll``, ``*.so`` and
# ``*.dylib`` in a certain directory. On Windows this would grab debug
# copies of Qt plugins, which then causes PyInstaller to add a dependency on
# the Debug CRT *in addition* to the release CRT.
#
# Since on Windows debug copies of Qt4 plugins end with "d4.dll" and Qt5
# plugins end with "d.dll" we filter them out of the list.
if ( is_win or is_cygwin ) and (namespace in ['PyQt4', 'PySide']):
files = [f for f in files if not f.endswith("d4.dll")]
elif (is_win or is_cygwin) and namespace in ['PyQt5', 'PySide2']:
logger.debug("Found plugin files %s for plugin %s", files, plugin_type)
files = [f for f in files if not f.endswith("d.dll")]
logger.debug("Found plugin files %s for plugin %s", files, plugin_type)
if namespace in ['PyQt4', 'PySide']:
plugin_dir = 'qt4_plugins'
elif namespace == 'PyQt5':
plugin_dir = os.path.join('PyQt5', 'Qt', 'plugins')
else:
plugin_dir = os.path.join('PySide2', 'plugins')
dest_dir = os.path.join(plugin_dir, plugin_type)
binaries = [(f, dest_dir) for f in files]
return binaries
|
30,457 |
def get_role_command():
args = demisto.args()
role_id = args['role-id']
customer = args['customer-id'] if args.get('customer-id') else GAPPS_ID
if not GAPPS_ID and not customer:
raise ValueError('Must provide Immutable GoogleApps Id')
role = get_role(role_id, customer)
return role_to_entry('Role %s details:' % (role_id,), role)
|
def get_role_command():
args = demisto.args()
role_id = args['role-id']
customer = args['customer-id'] if args.get('customer-id') else GAPPS_ID
if not customer:
raise ValueError('Must provide Immutable GoogleApps Id')
role = get_role(role_id, customer)
return role_to_entry('Role %s details:' % (role_id,), role)
|
24,963 |
def _create_checker_section(
checker: str, options: list[OptionsData], linter: PyLinter
) -> str:
checker_string = get_rst_title(f"``{checker.capitalize()}`` Checker", "^")
toml_doc = tomlkit.document()
pylint_tool_table = tomlkit.table(is_super_table=True)
toml_doc.add(tomlkit.key(["tool", "pylint"]), pylint_tool_table)
checker_table = tomlkit.table()
for option in sorted(options, key=lambda x: x):
checker_string += get_rst_title(f"--{option.name}", '"')
checker_string += f"\nDescription: *{option.optdict.get('help')}*\n\n"
checker_string += f"Default: *{option.optdict.get('default')}*\n\n\n"
# Start adding the option to the toml example
if option.optdict.get("hide_from_config_file"):
continue
# Get current value of option
value = getattr(linter.config, option.name.replace("-", "_"))
# Create a comment if the option has no value
if value is None:
checker_table.add(tomlkit.comment(f"{option.name} ="))
checker_table.add(tomlkit.nl())
continue
# Tomlkit doesn't support regular expressions
if isinstance(value, re.Pattern):
value = value.pattern
elif (
isinstance(value, (list, tuple))
and value
and isinstance(value[0], re.Pattern)
):
value = [i.pattern for i in value]
# Add to table
checker_table.add(option.name, value)
checker_table.add(tomlkit.nl())
pylint_tool_table.add(options[0].checker.name.lower(), checker_table)
toml_string = "\n".join(f" {i}" for i in tomlkit.dumps(toml_doc).split("\n"))
checker_string += f"""
.. raw:: html
<details>
<summary><a>Example configuration section</a></summary>
**Note:** Only ``pylint.tool`` is required, the section title is not. These are the default values.
.. code::
{toml_string}
.. raw:: html
</details>
"""
return checker_string
|
def _create_checker_section(
checker: str, options: list[OptionsData], linter: PyLinter
) -> str:
checker_string = get_rst_title(f"``{checker.capitalize()}`` Checker", "^")
toml_doc = tomlkit.document()
pylint_tool_table = tomlkit.table(is_super_table=True)
toml_doc.add(tomlkit.key(["tool", "pylint"]), pylint_tool_table)
checker_table = tomlkit.table()
for option in sorted(options):
checker_string += get_rst_title(f"--{option.name}", '"')
checker_string += f"\nDescription: *{option.optdict.get('help')}*\n\n"
checker_string += f"Default: *{option.optdict.get('default')}*\n\n\n"
# Start adding the option to the toml example
if option.optdict.get("hide_from_config_file"):
continue
# Get current value of option
value = getattr(linter.config, option.name.replace("-", "_"))
# Create a comment if the option has no value
if value is None:
checker_table.add(tomlkit.comment(f"{option.name} ="))
checker_table.add(tomlkit.nl())
continue
# Tomlkit doesn't support regular expressions
if isinstance(value, re.Pattern):
value = value.pattern
elif (
isinstance(value, (list, tuple))
and value
and isinstance(value[0], re.Pattern)
):
value = [i.pattern for i in value]
# Add to table
checker_table.add(option.name, value)
checker_table.add(tomlkit.nl())
pylint_tool_table.add(options[0].checker.name.lower(), checker_table)
toml_string = "\n".join(f" {i}" for i in tomlkit.dumps(toml_doc).split("\n"))
checker_string += f"""
.. raw:: html
<details>
<summary><a>Example configuration section</a></summary>
**Note:** Only ``pylint.tool`` is required, the section title is not. These are the default values.
.. code::
{toml_string}
.. raw:: html
</details>
"""
return checker_string
|
31,859 |
def hackerone_report_list_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Retrieves list with detailed information of all the reports.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result.
:rtype: ``CommandResults``
"""
params = validate_report_list_args(args)
# Sending http request
response = client.http_request(method="GET", url_suffix=URL_SUFFIX["REPORTS"],
params=params)
result = response.get("data")
# Returning if data is empty or not present
if not result:
return CommandResults(readable_output=MESSAGES["NO_RECORDS_FOUND"].format("reports"))
# Creating the Human Readable
hr_response = prepare_hr_for_reports(result)
# Creating the Context data
context_data = remove_empty_elements_from_list(result)
return CommandResults(outputs_prefix="HackerOne.Report",
outputs_key_field="id",
outputs=context_data,
readable_output=hr_response,
raw_response=response
)
|
def hackerone_report_list_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Retrieves list with detailed information of all the reports.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result.
:rtype: ``CommandResults``
"""
params = validate_report_list_args(args)
# Sending http request
response = client.report_list(params=params)
result = response.get("data")
# Returning if data is empty or not present
if not result:
return CommandResults(readable_output=MESSAGES["NO_RECORDS_FOUND"].format("reports"))
# Creating the Human Readable
hr_response = prepare_hr_for_reports(result)
# Creating the Context data
context_data = remove_empty_elements_from_list(result)
return CommandResults(outputs_prefix="HackerOne.Report",
outputs_key_field="id",
outputs=context_data,
readable_output=hr_response,
raw_response=response
)
|
37,560 |
def _write_instruction(file_obj, instruction_tuple, custom_instructions, index_map):
gate_class_name = instruction_tuple[0].__class__.__name__
if (
(
not hasattr(library, gate_class_name)
and not hasattr(circuit_mod, gate_class_name)
and not hasattr(extensions, gate_class_name)
and not hasattr(quantum_initializer, gate_class_name)
)
or gate_class_name == "Gate"
or gate_class_name == "Instruction"
or isinstance(instruction_tuple[0], (library.BlueprintCircuit))
):
if instruction_tuple[0].name not in custom_instructions:
custom_instructions[instruction_tuple[0].name] = instruction_tuple[0]
gate_class_name = instruction_tuple[0].name
elif isinstance(instruction_tuple[0], library.PauliEvolutionGate):
gate_class_name = r"###PauliEvolutionGate_" + str(uuid.uuid4())
custom_instructions[gate_class_name] = instruction_tuple[0]
has_condition = False
condition_register = b""
condition_value = 0
if instruction_tuple[0].condition:
has_condition = True
if isinstance(instruction_tuple[0].condition[0], Clbit):
bit_index = index_map["c"][instruction_tuple[0].condition[0]]
condition_register = b"\x00" + str(bit_index).encode("utf8")
condition_value = int(instruction_tuple[0].condition[1])
else:
condition_register = instruction_tuple[0].condition[0].name.encode("utf8")
condition_value = instruction_tuple[0].condition[1]
gate_class_name = gate_class_name.encode("utf8")
label = getattr(instruction_tuple[0], "label")
if label:
label_raw = label.encode("utf8")
else:
label_raw = b""
instruction_raw = struct.pack(
INSTRUCTION_PACK,
len(gate_class_name),
len(label_raw),
len(instruction_tuple[0].params),
instruction_tuple[0].num_qubits,
instruction_tuple[0].num_clbits,
has_condition,
len(condition_register),
condition_value,
)
file_obj.write(instruction_raw)
file_obj.write(gate_class_name)
file_obj.write(label_raw)
file_obj.write(condition_register)
# Encode instruciton args
for qbit in instruction_tuple[1]:
instruction_arg_raw = struct.pack(INSTRUCTION_ARG_PACK, b"q", index_map["q"][qbit])
file_obj.write(instruction_arg_raw)
for clbit in instruction_tuple[2]:
instruction_arg_raw = struct.pack(INSTRUCTION_ARG_PACK, b"c", index_map["c"][clbit])
file_obj.write(instruction_arg_raw)
# Encode instruction params
for param in instruction_tuple[0].params:
container = io.BytesIO()
if isinstance(param, int):
type_key = "i"
data = struct.pack("<q", param)
size = struct.calcsize("<q")
elif isinstance(param, float):
type_key = "f"
data = struct.pack("<d", param)
size = struct.calcsize("<d")
elif isinstance(param, str):
type_key = "s"
data = param.encode("utf8")
size = len(data)
elif isinstance(param, Parameter):
type_key = "p"
_write_parameter(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, ParameterExpression):
type_key = "e"
_write_parameter_expression(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, complex):
type_key = "c"
data = struct.pack(COMPLEX_PACK, param.real, param.imag)
size = struct.calcsize(COMPLEX_PACK)
elif isinstance(param, (np.integer, np.floating, np.ndarray, np.complexfloating)):
type_key = "n"
np.save(container, param)
container.seek(0)
data = container.read()
size = len(data)
else:
raise TypeError(
f"Invalid parameter type {instruction_tuple[0]} for gate {type(param)},"
)
instruction_param_raw = struct.pack(INSTRUCTION_PARAM_PACK, type_key.encode("utf8"), size)
file_obj.write(instruction_param_raw)
file_obj.write(data)
container.close()
|
def _write_instruction(file_obj, instruction_tuple, custom_instructions, index_map):
gate_class_name = instruction_tuple[0].__class__.__name__
if (
(
not hasattr(library, gate_class_name)
and not hasattr(circuit_mod, gate_class_name)
and not hasattr(extensions, gate_class_name)
and not hasattr(quantum_initializer, gate_class_name)
)
or gate_class_name == "Gate"
or gate_class_name == "Instruction"
or isinstance(instruction_tuple[0], library.BlueprintCircuit)
):
if instruction_tuple[0].name not in custom_instructions:
custom_instructions[instruction_tuple[0].name] = instruction_tuple[0]
gate_class_name = instruction_tuple[0].name
elif isinstance(instruction_tuple[0], library.PauliEvolutionGate):
gate_class_name = r"###PauliEvolutionGate_" + str(uuid.uuid4())
custom_instructions[gate_class_name] = instruction_tuple[0]
has_condition = False
condition_register = b""
condition_value = 0
if instruction_tuple[0].condition:
has_condition = True
if isinstance(instruction_tuple[0].condition[0], Clbit):
bit_index = index_map["c"][instruction_tuple[0].condition[0]]
condition_register = b"\x00" + str(bit_index).encode("utf8")
condition_value = int(instruction_tuple[0].condition[1])
else:
condition_register = instruction_tuple[0].condition[0].name.encode("utf8")
condition_value = instruction_tuple[0].condition[1]
gate_class_name = gate_class_name.encode("utf8")
label = getattr(instruction_tuple[0], "label")
if label:
label_raw = label.encode("utf8")
else:
label_raw = b""
instruction_raw = struct.pack(
INSTRUCTION_PACK,
len(gate_class_name),
len(label_raw),
len(instruction_tuple[0].params),
instruction_tuple[0].num_qubits,
instruction_tuple[0].num_clbits,
has_condition,
len(condition_register),
condition_value,
)
file_obj.write(instruction_raw)
file_obj.write(gate_class_name)
file_obj.write(label_raw)
file_obj.write(condition_register)
# Encode instruciton args
for qbit in instruction_tuple[1]:
instruction_arg_raw = struct.pack(INSTRUCTION_ARG_PACK, b"q", index_map["q"][qbit])
file_obj.write(instruction_arg_raw)
for clbit in instruction_tuple[2]:
instruction_arg_raw = struct.pack(INSTRUCTION_ARG_PACK, b"c", index_map["c"][clbit])
file_obj.write(instruction_arg_raw)
# Encode instruction params
for param in instruction_tuple[0].params:
container = io.BytesIO()
if isinstance(param, int):
type_key = "i"
data = struct.pack("<q", param)
size = struct.calcsize("<q")
elif isinstance(param, float):
type_key = "f"
data = struct.pack("<d", param)
size = struct.calcsize("<d")
elif isinstance(param, str):
type_key = "s"
data = param.encode("utf8")
size = len(data)
elif isinstance(param, Parameter):
type_key = "p"
_write_parameter(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, ParameterExpression):
type_key = "e"
_write_parameter_expression(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, complex):
type_key = "c"
data = struct.pack(COMPLEX_PACK, param.real, param.imag)
size = struct.calcsize(COMPLEX_PACK)
elif isinstance(param, (np.integer, np.floating, np.ndarray, np.complexfloating)):
type_key = "n"
np.save(container, param)
container.seek(0)
data = container.read()
size = len(data)
else:
raise TypeError(
f"Invalid parameter type {instruction_tuple[0]} for gate {type(param)},"
)
instruction_param_raw = struct.pack(INSTRUCTION_PARAM_PACK, type_key.encode("utf8"), size)
file_obj.write(instruction_param_raw)
file_obj.write(data)
container.close()
|
1,797 |
def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
np.random.seed(1)
n_samples = 1000
n_features = 2
n_half_samples = int(n_samples / 2)
x = np.random.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w_none, 2) == 1.00
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == x1_x2_imp_ratio_w_none
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none, 2) == 2.00
|
def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
np.random.seed(1)
n_samples = 1000
n_features = 2
n_half_samples = int(n_samples / 2)
x = np.random.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w_none, 2) == 1.00
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == x1_x2_imp_ratio_w_none
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2)
|
3,854 |
def prefix_tree(paths):
"""Creates a directed prefix tree from a list of paths.
Usually the paths are described as strings or lists of integers.
A "prefix tree" represents the prefix structure of the strings.
Each node represents a prefix of some string. The root represents
the empty prefix with children for the single letter prefixes which
in turn have children for each double letter prefixes starting with
the single letter corresponding to the parent node, and so on.
More generally the prefixes do not need to be strings. A prefix refers
to the start of a sequence. The root has children for each one element
prefix and they have children for each two element prefix that starts
with the one element sequence of the parent, and so on.
Note that this implementation uses integer nodes with an attribute.
Each node is associated with a prefix. But to save space the last
element of the prefix is stored as a node attribute named `source`.
The prefix can be constructed by traversing the tree up to the root.
All the descendants of a node have a common prefix in the sequence
associated with that node.
Parameters
----------
paths: iterable of paths
An iterable of paths which are themselves sequences.
Matching prefixes among these sequences are identified with
nodes of the prefix tree. One leaf of the tree is associated
with each path. (Identical paths are associated with the same
leaf of the tree.)
Returns
-------
tree: DiGraph
A directed graph representing an arborescence consisting of the
prefix tree generated by `paths`. Nodes are directed "downward",
from parent to child. A special "synthetic" root node is added
to be the parent of the first node in each path. A special
"synthetic" leaf node, the "nil" node `-1`, is added to be the child
of all nodes representing the last element in a path. (The
addition of this nil node technically makes this not an
arborescence but a directed acyclic graph; removing the nil node
makes it an arborescence.)
Each node has an attribute 'source' whose value is the original
element of the path to which this node corresponds. The 'source'
of the root node is None, and the 'source' of the nil node is
:data:`.NIL`.
The root node, `0`, is the only node of in-degree zero in the graph,
and the nil node is the only node of out-degree zero. For
convenience, the nil node is always `-1`.
Notes
-----
The prefix tree is also known as a *trie*.
Examples
--------
Create a prefix tree from a list of strings with common prefixes::
>>> paths = ["ab", "abs", "ad"]
>>> T = nx.prefix_tree(paths)
>>> list(T.edges)
[(0, 1), (1, 2), (1, 4), (2, -1), (2, 3), (3, -1), (4, -1)]
The leaf nodes can be obtained as predecessors of the nil node.
>>> root, NIL = 0, -1
>>> list(T.predecessors(NIL))
[2, 3, 4]
To recover the original paths that generated the prefix tree,
traverse up the tree from the node `-1` to the node `0`::
>>> recovered = []
>>> for v in T.predecessors(NIL):
... prefix = ""
... while v != root:
... prefix = str(T.nodes[v]["source"]) + prefix
... v = next(T.predecessors(v)) # only one predecessor
... recovered.append(prefix)
>>> sorted(recovered)
['ab', 'abs', 'ad']
"""
def _helper(paths, root, tree):
"""Recursively create a trie from the given list of paths.
`paths` is a list of paths, each of which is itself a list of
nodes, relative to the given `root` (but not including it). This
list of paths will be interpreted as a tree-like structure, in
which two paths that share a prefix represent two branches of
the tree with the same initial segment.
`root` is the parent of the node at index 0 in each path.
`tree` is the "accumulator", the :class:`networkx.DiGraph`
representing the branching to which the new nodes and edges will
be added.
"""
# For each path, remove the first node and make it a child of root.
# Any remaining paths then get processed recursively.
children = defaultdict(list)
for path in paths:
# If path is empty, we add an edge to the NIL node.
if not path:
tree.add_edge(root, NIL)
continue
child, *rest = path
# `child` may exist as the head of more than one path in `paths`.
children[child].append(rest)
# Add a node for each child, connect root, recurse to remaining paths
for child, remaining_paths in children.items():
# We relabel each child with an unused name.
new_name = len(tree) - 1
# The "source" node attribute stores the original node name.
tree.add_node(new_name, source=child)
tree.add_edge(root, new_name)
_helper(remaining_paths, new_name, tree)
# Initialize the prefix tree with a root node and a nil node.
tree = nx.DiGraph()
root = 0
tree.add_node(root, source=None)
NIL = -1
tree.add_node(NIL, source="NIL")
# Populate the tree.
_helper(paths, root, tree)
return tree
|
def prefix_tree(paths):
"""Creates a directed prefix tree from a list of paths.
Usually the paths are described as strings or lists of integers.
A "prefix tree" represents the prefix structure of the strings.
Each node represents a prefix of some string. The root represents
the empty prefix with children for the single letter prefixes which
in turn have children for each double letter prefix starting with
the single letter corresponding to the parent node, and so on.
More generally the prefixes do not need to be strings. A prefix refers
to the start of a sequence. The root has children for each one element
prefix and they have children for each two element prefix that starts
with the one element sequence of the parent, and so on.
Note that this implementation uses integer nodes with an attribute.
Each node is associated with a prefix. But to save space the last
element of the prefix is stored as a node attribute named `source`.
The prefix can be constructed by traversing the tree up to the root.
All the descendants of a node have a common prefix in the sequence
associated with that node.
Parameters
----------
paths: iterable of paths
An iterable of paths which are themselves sequences.
Matching prefixes among these sequences are identified with
nodes of the prefix tree. One leaf of the tree is associated
with each path. (Identical paths are associated with the same
leaf of the tree.)
Returns
-------
tree: DiGraph
A directed graph representing an arborescence consisting of the
prefix tree generated by `paths`. Nodes are directed "downward",
from parent to child. A special "synthetic" root node is added
to be the parent of the first node in each path. A special
"synthetic" leaf node, the "nil" node `-1`, is added to be the child
of all nodes representing the last element in a path. (The
addition of this nil node technically makes this not an
arborescence but a directed acyclic graph; removing the nil node
makes it an arborescence.)
Each node has an attribute 'source' whose value is the original
element of the path to which this node corresponds. The 'source'
of the root node is None, and the 'source' of the nil node is
:data:`.NIL`.
The root node, `0`, is the only node of in-degree zero in the graph,
and the nil node is the only node of out-degree zero. For
convenience, the nil node is always `-1`.
Notes
-----
The prefix tree is also known as a *trie*.
Examples
--------
Create a prefix tree from a list of strings with common prefixes::
>>> paths = ["ab", "abs", "ad"]
>>> T = nx.prefix_tree(paths)
>>> list(T.edges)
[(0, 1), (1, 2), (1, 4), (2, -1), (2, 3), (3, -1), (4, -1)]
The leaf nodes can be obtained as predecessors of the nil node.
>>> root, NIL = 0, -1
>>> list(T.predecessors(NIL))
[2, 3, 4]
To recover the original paths that generated the prefix tree,
traverse up the tree from the node `-1` to the node `0`::
>>> recovered = []
>>> for v in T.predecessors(NIL):
... prefix = ""
... while v != root:
... prefix = str(T.nodes[v]["source"]) + prefix
... v = next(T.predecessors(v)) # only one predecessor
... recovered.append(prefix)
>>> sorted(recovered)
['ab', 'abs', 'ad']
"""
def _helper(paths, root, tree):
"""Recursively create a trie from the given list of paths.
`paths` is a list of paths, each of which is itself a list of
nodes, relative to the given `root` (but not including it). This
list of paths will be interpreted as a tree-like structure, in
which two paths that share a prefix represent two branches of
the tree with the same initial segment.
`root` is the parent of the node at index 0 in each path.
`tree` is the "accumulator", the :class:`networkx.DiGraph`
representing the branching to which the new nodes and edges will
be added.
"""
# For each path, remove the first node and make it a child of root.
# Any remaining paths then get processed recursively.
children = defaultdict(list)
for path in paths:
# If path is empty, we add an edge to the NIL node.
if not path:
tree.add_edge(root, NIL)
continue
child, *rest = path
# `child` may exist as the head of more than one path in `paths`.
children[child].append(rest)
# Add a node for each child, connect root, recurse to remaining paths
for child, remaining_paths in children.items():
# We relabel each child with an unused name.
new_name = len(tree) - 1
# The "source" node attribute stores the original node name.
tree.add_node(new_name, source=child)
tree.add_edge(root, new_name)
_helper(remaining_paths, new_name, tree)
# Initialize the prefix tree with a root node and a nil node.
tree = nx.DiGraph()
root = 0
tree.add_node(root, source=None)
NIL = -1
tree.add_node(NIL, source="NIL")
# Populate the tree.
_helper(paths, root, tree)
return tree
|
5,615 |
def rank_distance(x, y, weights=None, method='spearman'):
"""
Distance measure between rankings.
Parameters
----------
x, y: array-like
1-D permutations of [1..N] vector
weights: array-like, optional
1-D array of weights. Default None equals to unit weights.
method: {'spearman'm 'kendalltau'}, optional
Defines the method to find distance:
'spearman' - sum of absolute distance between same elements
in x and y.
'kendalltau' - number of inverions needed to bring x to y.
Default is 'spearman'.
Return
------
distance: float
Distance between x and y.
Example
-------
>>> from scipy.stats import rank_distance
>>> rank_distance([1,3,4,2],[2,3,1,4])
6.0
>>> rank_distance([1,3,4,2],[2,3,1,4], method='kendalltau')
4.0
"""
x = np.asarray(x)
y = np.asarray(y)
if np.unique(x).size != x.size or np.unique(y).size != y.size:
raise ValueError("x and y must have only unique elements")
if x.size != y.size:
raise ValueError("x and y have different size")
if weights is None:
weights = np.ones(x.size - 1)
else:
weights = np.asarray(weights)
if weights.size < (x.size - 1):
raise ValueError("weights vector have a small size")
if method == 'spearman':
return _spearman_footrule(x, y, weights)
elif method == 'kendalltau':
return _kendalltau_distance(x, y, weights)
else:
raise ValueError("unknown value for method parameter.")
|
def rank_distance(x, y, weights=None, method='spearman'):
"""
Distance measure between rankings.
Parameters
----------
x, y: array-like
1-D permutations of [1..N] vector
weights: array-like, optional
1-D array of weights. Default `None` is equivalent to unit weights.
method: {'spearman'm 'kendalltau'}, optional
Defines the method to find distance:
'spearman' - sum of absolute distance between same elements
in x and y.
'kendalltau' - number of inverions needed to bring x to y.
Default is 'spearman'.
Return
------
distance: float
Distance between x and y.
Example
-------
>>> from scipy.stats import rank_distance
>>> rank_distance([1,3,4,2],[2,3,1,4])
6.0
>>> rank_distance([1,3,4,2],[2,3,1,4], method='kendalltau')
4.0
"""
x = np.asarray(x)
y = np.asarray(y)
if np.unique(x).size != x.size or np.unique(y).size != y.size:
raise ValueError("x and y must have only unique elements")
if x.size != y.size:
raise ValueError("x and y have different size")
if weights is None:
weights = np.ones(x.size - 1)
else:
weights = np.asarray(weights)
if weights.size < (x.size - 1):
raise ValueError("weights vector have a small size")
if method == 'spearman':
return _spearman_footrule(x, y, weights)
elif method == 'kendalltau':
return _kendalltau_distance(x, y, weights)
else:
raise ValueError("unknown value for method parameter.")
|
44,085 |
def tensor_wrapper(obj):
"""Decorator that wraps callable objects and classes so that they both accept
a ``requires_grad`` keyword argument, as well as returning a PennyLane
:class:`~.tensor`.
Only if the decorated object returns an ``ndarray`` is the
output converted to a :class:`~.tensor`; this avoids superfluous conversion
of scalars and other native-Python types.
.. note::
This wrapper does *not* enable autodifferentiation of the wrapped function,
it merely adds support for :class:`~pennylane.numpy.tensor` output.
Args:
obj: a callable object or class
**Example**
By default, the ``ones`` function provided by Autograd
constructs standard ``ndarray`` objects, and does not
permit a ``requires_grad`` argument:
>>> from autograd.numpy import ones
>>> ones([2, 2])
array([[1., 1.],
[1., 1.]])
>>> ones([2, 2], requires_grad=True)
TypeError: ones() got an unexpected keyword argument 'requires_grad'
``tensor_wrapper`` both enables construction of :class:`~pennylane.numpy.tensor`
objects, while also converting the output.
>>> from pennylane import numpy as np
>>> ones = np.tensor_wrapper(ones)
>>> ones([2, 2], requires_grad=True)
tensor([[1., 1.],
[1., 1.]], requires_grad=True)
"""
@functools.wraps(obj)
def _wrapped(*args, **kwargs):
"""Wrapped NumPy function"""
tensor_kwargs = {}
if "requires_grad" in kwargs:
tensor_kwargs["requires_grad"] = kwargs.pop("requires_grad")
else:
tensor_args = list(extract_tensors(args))
if tensor_args:
# Unless the user specifies otherwise, if all tensors in the argument
# list are non-trainable, the output is also non-trainable.
# Equivalently: if any tensor is trainable, the output is also trainable.
# NOTE: Use of Python's ``any`` results in an infinite recursion,
# and I'm not sure why. Using ``np.any`` works fine.
tensor_kwargs["requires_grad"] = _np.any([i.requires_grad for i in tensor_args])
# evaluate the original object
res = obj(*args, **kwargs)
if isinstance(res, _np.ndarray):
# only if the output of the object is a ndarray,
# then convert to a PennyLane tensor
res = tensor(res, **tensor_kwargs)
return res
return _wrapped
|
def tensor_wrapper(obj):
"""Decorator that wraps callable objects and classes so that they both accept
a ``requires_grad`` keyword argument, as well as returning a PennyLane
:class:`~.tensor`.
Only if the decorated object returns an ``ndarray`` is the
output converted to a :class:`~.tensor`; this avoids superfluous conversion
of scalars and other native-Python types.
.. note::
This wrapper does *not* enable autodifferentiation of the wrapped function,
it merely adds support for :class:`~pennylane.numpy.tensor` output.
callable classes are ignored; only functions are wrapped.
Args:
obj: a callable object or class
**Example**
By default, the ``ones`` function provided by Autograd
constructs standard ``ndarray`` objects, and does not
permit a ``requires_grad`` argument:
>>> from autograd.numpy import ones
>>> ones([2, 2])
array([[1., 1.],
[1., 1.]])
>>> ones([2, 2], requires_grad=True)
TypeError: ones() got an unexpected keyword argument 'requires_grad'
``tensor_wrapper`` both enables construction of :class:`~pennylane.numpy.tensor`
objects, while also converting the output.
>>> from pennylane import numpy as np
>>> ones = np.tensor_wrapper(ones)
>>> ones([2, 2], requires_grad=True)
tensor([[1., 1.],
[1., 1.]], requires_grad=True)
"""
@functools.wraps(obj)
def _wrapped(*args, **kwargs):
"""Wrapped NumPy function"""
tensor_kwargs = {}
if "requires_grad" in kwargs:
tensor_kwargs["requires_grad"] = kwargs.pop("requires_grad")
else:
tensor_args = list(extract_tensors(args))
if tensor_args:
# Unless the user specifies otherwise, if all tensors in the argument
# list are non-trainable, the output is also non-trainable.
# Equivalently: if any tensor is trainable, the output is also trainable.
# NOTE: Use of Python's ``any`` results in an infinite recursion,
# and I'm not sure why. Using ``np.any`` works fine.
tensor_kwargs["requires_grad"] = _np.any([i.requires_grad for i in tensor_args])
# evaluate the original object
res = obj(*args, **kwargs)
if isinstance(res, _np.ndarray):
# only if the output of the object is a ndarray,
# then convert to a PennyLane tensor
res = tensor(res, **tensor_kwargs)
return res
return _wrapped
|
25,964 |
def _handle_result(cmd, result_poller, source_registry, source_image, registry):
from msrestazure.azure_exceptions import ClientException
try:
result = LongRunningOperation(cmd.cli_ctx, 'Importing image...', 'Import has finished')(result_poller)
except CLIError as e:
try:
# if command fails, it might be because user specified registry twice in --source and --registry
if source_registry:
if not hasattr(registry, 'login_server'):
if is_valid_resource_id(source_registry):
registry, _ = get_registry_by_name(cmd.cli_ctx, parse_resource_id(source_registry)["name"])
else:
registry = get_registry_from_name_or_login_server(cmd.cli_ctx, source_registry, source_registry)
if registry.login_server.lower() in source_image.lower():
logger.warning("Import from source failed.\n\tsource image: '%s'\n"
"Attention: When source registry is specified with `--registry`, "
"`--source` is considered to be a source image name. "
"Do not prefix `--source` with the registry login server name.", "{}/{}"
.format(registry.login_server, source_image))
except (ClientException, CLIError) as unexpected_ex: # raise exception
logger.debug("Unexpected exception: %s", unexpected_ex)
raise e # regardless re-raise the CLIError as this is an error from the service
return result
|
def _handle_result(cmd, result_poller, source_registry, source_image, registry):
from msrestazure.azure_exceptions import ClientException
try:
result = LongRunningOperation(cmd.cli_ctx, 'Importing image...', 'Import succeeded')(result_poller)
except CLIError as e:
try:
# if command fails, it might be because user specified registry twice in --source and --registry
if source_registry:
if not hasattr(registry, 'login_server'):
if is_valid_resource_id(source_registry):
registry, _ = get_registry_by_name(cmd.cli_ctx, parse_resource_id(source_registry)["name"])
else:
registry = get_registry_from_name_or_login_server(cmd.cli_ctx, source_registry, source_registry)
if registry.login_server.lower() in source_image.lower():
logger.warning("Import from source failed.\n\tsource image: '%s'\n"
"Attention: When source registry is specified with `--registry`, "
"`--source` is considered to be a source image name. "
"Do not prefix `--source` with the registry login server name.", "{}/{}"
.format(registry.login_server, source_image))
except (ClientException, CLIError) as unexpected_ex: # raise exception
logger.debug("Unexpected exception: %s", unexpected_ex)
raise e # regardless re-raise the CLIError as this is an error from the service
return result
|
56,238 |
def main():
args = build_argparser().parse_args()
if args.labels:
with open(args.labels) as f:
labels = [line.strip() for line in f]
else:
labels = None
ie = IECore()
if 'MYRIAD' in args.device:
myriad_config = {'VPU_HW_STAGES_OPTIMIZATION': 'YES'}
ie.set_config(myriad_config, 'MYRIAD')
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, 'CPU')
decoder_target_device = 'CPU'
if args.device != 'CPU':
encoder_target_device = args.device
else:
encoder_target_device = decoder_target_device
encoder_xml = args.m_encoder
encoder_bin = args.m_encoder.replace('.xml', '.bin')
model = []
model.append(IEModel(encoder_xml, encoder_bin, ie, encoder_target_device,
num_requests=(3 if args.device == 'MYRIAD' else 1)))
if args.architecture_type == 'dummy-de':
model.append(DummyDecoder(num_requests=2))
seq_size = args.decoder_seq_size
elif args.architecture_type == 'en-de':
if args.m_decoder is None:
raise RuntimeError('No decoder for encoder-decoder model type (-m_de) provided: {}'.format(args.m_decoder))
decoder_xml = args.m_decoder
decoder_bin = args.m_decoder.replace('.xml', '.bin')
model.append(IEModel(decoder_xml, decoder_bin, ie, decoder_target_device, num_requests=2))
seq_size = model[1].input_size[1]
elif args.architecture_type == 'i3d-rgb':
seq_size = model[0].input_size[2]
presenter = monitors.Presenter(args.utilization_monitors, 70)
result_presenter = ResultRenderer(no_show=args.no_show, architecture_type=args.architecture_type, presenter=presenter, output=args.output, limit=args.output_limit, labels=labels,
label_smoothing_window=args.label_smoothing)
cap = open_images_capture(args.input, args.loop)
run_pipeline(cap, args.architecture_type, model, result_presenter.render_frame, seq_size=seq_size, fps=cap.fps())
print(presenter.reportMeans())
|
def main():
args = build_argparser().parse_args()
if args.labels:
with open(args.labels) as f:
labels = [line.strip() for line in f]
else:
labels = None
ie = IECore()
if 'MYRIAD' in args.device:
myriad_config = {'VPU_HW_STAGES_OPTIMIZATION': 'YES'}
ie.set_config(myriad_config, 'MYRIAD')
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, 'CPU')
decoder_target_device = 'CPU'
if args.device != 'CPU':
encoder_target_device = args.device
else:
encoder_target_device = decoder_target_device
encoder_xml = args.m_encoder
encoder_bin = args.m_encoder.replace('.xml', '.bin')
model = [IEModel(encoder_xml, encoder_bin, ie, encoder_target_device,
num_requests=(3 if args.device == 'MYRIAD' else 1))]
if args.architecture_type == 'dummy-de':
model.append(DummyDecoder(num_requests=2))
seq_size = args.decoder_seq_size
elif args.architecture_type == 'en-de':
if args.m_decoder is None:
raise RuntimeError('No decoder for encoder-decoder model type (-m_de) provided: {}'.format(args.m_decoder))
decoder_xml = args.m_decoder
decoder_bin = args.m_decoder.replace('.xml', '.bin')
model.append(IEModel(decoder_xml, decoder_bin, ie, decoder_target_device, num_requests=2))
seq_size = model[1].input_size[1]
elif args.architecture_type == 'i3d-rgb':
seq_size = model[0].input_size[2]
presenter = monitors.Presenter(args.utilization_monitors, 70)
result_presenter = ResultRenderer(no_show=args.no_show, architecture_type=args.architecture_type, presenter=presenter, output=args.output, limit=args.output_limit, labels=labels,
label_smoothing_window=args.label_smoothing)
cap = open_images_capture(args.input, args.loop)
run_pipeline(cap, args.architecture_type, model, result_presenter.render_frame, seq_size=seq_size, fps=cap.fps())
print(presenter.reportMeans())
|
33,977 |
def convert_pandas_to_torch_tensor(
data_batch: pd.DataFrame,
columns: Optional[Union[List[str], List[List[str]]]] = None,
column_dtypes: Optional[Union[torch.dtype, List[torch.dtype]]] = None,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Converts a Pandas dataframe to a torch Tensor or list of torch Tensors.
The format of the return type will match the format of ``columns``. If a
list of columns is provided, the return type will be a single tensor. If
``columns`` is a list of list, then the return type will be a list of
tensors.
Args:
data_batch (pandas.DataFrame): The pandas dataframe to conver to a
torch tensor.
columns (Optional[Union[List[str], List[List[str]]]):
The names of the columns in the dataframe to include in the
torch tensor. If this arg is a List[List[str]], then the return
type will be a List of tensors. This is useful for multi-input
models. If None, then use all columns in the ``data_batch``.
column_dtype (Optional[Union[torch.dtype, List[torch.dtype]): The
torch dtype to use for the tensor. If set to None,
then automatically
infer the dtype.
Returns:
Either a torch tensor of size (N, len(columns)) where N is the
number of rows in the ``data_batch`` Dataframe, or a list of
tensors, where the size of item i is (N, len(columns[i])).
"""
multi_input = columns and (isinstance(columns[0], (list, tuple)))
if multi_input and column_dtypes:
if not type(column_dtypes) not in [torch.dtype, list, tuple]:
raise TypeError(
"If `columns` is a list of lists, "
"`column_dtypes` must be None, `torch.dtype`,"
f" or a sequence, got {type(column_dtypes)}."
)
if (
not multi_input
and column_dtypes
and type(column_dtypes) not in [torch.dtype, list, tuple]
):
raise TypeError(
"If `columns` is a list of strings, "
"`column_dtypes` must be None or a single `torch.dtype`."
f"Got {type(column_dtypes)} instead."
)
def get_tensor_for_columns(columns, dtype):
feature_tensors = []
if columns:
batch = data_batch[columns]
else:
batch = data_batch
for col in batch.columns:
col_vals = batch[col].values
t = torch.as_tensor(col_vals, dtype=dtype)
t = t.view(-1, 1)
feature_tensors.append(t)
return torch.cat(feature_tensors, dim=1)
if multi_input:
if isinstance(column_dtypes, torch.dtype):
column_dtypes = [column_dtypes] * len(columns)
return [
get_tensor_for_columns(columns=subcolumns, dtype=dtype)
for subcolumns, dtype in zip(columns, column_dtypes)
]
else:
return get_tensor_for_columns(columns=columns, dtype=column_dtypes)
|
def convert_pandas_to_torch_tensor(
data_batch: pd.DataFrame,
columns: Optional[Union[List[str], List[List[str]]]] = None,
column_dtypes: Optional[Union[torch.dtype, List[torch.dtype]]] = None,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Converts a Pandas dataframe to a torch Tensor or list of torch Tensors.
The format of the return type will match the format of ``columns``. If a
list of columns is provided, the return type will be a single tensor. If
``columns`` is a list of list, then the return type will be a list of
tensors.
Args:
data_batch (pandas.DataFrame): The pandas dataframe to conver to a
torch tensor.
columns (Optional[Union[List[str], List[List[str]]]):
The names of the columns in the dataframe to include in the
torch tensor. If this arg is a List[List[str]], then the return
type will be a List of tensors. This is useful for multi-input
models. If None, then use all columns in the ``data_batch``.
column_dtype (Optional[Union[torch.dtype, List[torch.dtype]): The
torch dtype to use for the tensor. If set to None,
then automatically infer the dtype.
Returns:
Either a torch tensor of size (N, len(columns)) where N is the
number of rows in the ``data_batch`` Dataframe, or a list of
tensors, where the size of item i is (N, len(columns[i])).
"""
multi_input = columns and (isinstance(columns[0], (list, tuple)))
if multi_input and column_dtypes:
if not type(column_dtypes) not in [torch.dtype, list, tuple]:
raise TypeError(
"If `columns` is a list of lists, "
"`column_dtypes` must be None, `torch.dtype`,"
f" or a sequence, got {type(column_dtypes)}."
)
if (
not multi_input
and column_dtypes
and type(column_dtypes) not in [torch.dtype, list, tuple]
):
raise TypeError(
"If `columns` is a list of strings, "
"`column_dtypes` must be None or a single `torch.dtype`."
f"Got {type(column_dtypes)} instead."
)
def get_tensor_for_columns(columns, dtype):
feature_tensors = []
if columns:
batch = data_batch[columns]
else:
batch = data_batch
for col in batch.columns:
col_vals = batch[col].values
t = torch.as_tensor(col_vals, dtype=dtype)
t = t.view(-1, 1)
feature_tensors.append(t)
return torch.cat(feature_tensors, dim=1)
if multi_input:
if isinstance(column_dtypes, torch.dtype):
column_dtypes = [column_dtypes] * len(columns)
return [
get_tensor_for_columns(columns=subcolumns, dtype=dtype)
for subcolumns, dtype in zip(columns, column_dtypes)
]
else:
return get_tensor_for_columns(columns=columns, dtype=column_dtypes)
|
8,488 |
def create_manifest(filename, manifest, console, uac_admin=False, uac_uiaccess=False):
"""
Create assembly manifest.
"""
if not manifest:
manifest = ManifestFromXMLFile(filename)
# /path/NAME.exe.manifest - split extension twice to get NAME.
name = os.path.basename(filename)
manifest.name = os.path.splitext(os.path.splitext(name)[0])[0]
elif isinstance(manifest, string_types) and "<" in manifest:
# Assume XML string
manifest = ManifestFromXML(manifest)
elif not isinstance(manifest, Manifest):
# Assume filename
manifest = ManifestFromXMLFile(manifest)
dep_names = set([dep.name for dep in manifest.dependentAssemblies])
if manifest.filename != filename:
# Update dependent assemblies
depmanifest = ManifestFromXMLFile(filename)
for assembly in depmanifest.dependentAssemblies:
if not assembly.name in dep_names:
manifest.dependentAssemblies.append(assembly)
dep_names.add(assembly.name)
if (not console and
not "Microsoft.Windows.Common-Controls" in dep_names):
# Add Microsoft.Windows.Common-Controls to dependent assemblies
manifest.dependentAssemblies.append(
Manifest(type_="win32",
name="Microsoft.Windows.Common-Controls",
language="*",
processorArchitecture=processor_architecture(),
version=(6, 0, 0, 0),
publicKeyToken="6595b64144ccf1df")
)
if uac_admin:
manifest.requestedExecutionLevel = 'requireAdministrator'
else:
manifest.requestedExecutionLevel = 'asInvoker'
if uac_uiaccess:
manifest.uiAccess = True
# only write a new manifest if it is different from the old
need_new = not os.path.exists(filename)
if not need_new:
old_xml = ManifestFromXMLFile(filename).toprettyxml().replace('\r','')
new_xml = manifest.toprettyxml().replace('\r','')
# this only works if PYTHONHASHSEED is set in environment
need_new = (old_xml != new_xml)
if need_new:
manifest.writeprettyxml(filename)
return manifest
|
def create_manifest(filename, manifest, console, uac_admin=False, uac_uiaccess=False):
"""
Create assembly manifest.
"""
if not manifest:
manifest = ManifestFromXMLFile(filename)
# /path/NAME.exe.manifest - split extension twice to get NAME.
name = os.path.basename(filename)
manifest.name = os.path.splitext(os.path.splitext(name)[0])[0]
elif isinstance(manifest, string_types) and "<" in manifest:
# Assume XML string
manifest = ManifestFromXML(manifest)
elif not isinstance(manifest, Manifest):
# Assume filename
manifest = ManifestFromXMLFile(manifest)
dep_names = set([dep.name for dep in manifest.dependentAssemblies])
if manifest.filename != filename:
# Update dependent assemblies
depmanifest = ManifestFromXMLFile(filename)
for assembly in depmanifest.dependentAssemblies:
if not assembly.name in dep_names:
manifest.dependentAssemblies.append(assembly)
dep_names.add(assembly.name)
if (not console and
not "Microsoft.Windows.Common-Controls" in dep_names):
# Add Microsoft.Windows.Common-Controls to dependent assemblies
manifest.dependentAssemblies.append(
Manifest(type_="win32",
name="Microsoft.Windows.Common-Controls",
language="*",
processorArchitecture=processor_architecture(),
version=(6, 0, 0, 0),
publicKeyToken="6595b64144ccf1df")
)
if uac_admin:
manifest.requestedExecutionLevel = 'requireAdministrator'
else:
manifest.requestedExecutionLevel = 'asInvoker'
if uac_uiaccess:
manifest.uiAccess = True
# only write a new manifest if it is different from the old
need_new = not os.path.exists(filename)
if not need_new:
old_xml = ManifestFromXMLFile(filename).toprettyxml().replace('\r', '')
new_xml = manifest.toprettyxml().replace('\r','')
# this only works if PYTHONHASHSEED is set in environment
need_new = (old_xml != new_xml)
if need_new:
manifest.writeprettyxml(filename)
return manifest
|
50,303 |
def test_delete_user(gl, wait_for_sidekiq):
new_user = gl.users.create(
{
"email": "delete-user@test.com",
"username": "delete-user",
"name": "delete-user",
"password": "delete-user-pass",
}
)
time.sleep(10)
new_user.delete()
wait_for_sidekiq()
assert new_user.id not in [user.id for user in gl.users.list()]
|
def test_delete_user(gl, wait_for_sidekiq):
new_user = gl.users.create(
{
"email": "delete-user@test.com",
"username": "delete-user",
"name": "delete-user",
"password": "delete-user-pass",
}
)
new_user.delete()
time.sleep(10)
wait_for_sidekiq()
assert new_user.id not in [user.id for user in gl.users.list()]
|
17,939 |
def read_user_configuration(default_configuration, command_line_parser):
configuration = default_configuration
args, unknown_args = command_line_parser.parse_known_args()
if 'configuration_file' in vars(args) and args.configuration_file:
file_configuration = {}
with open(args.configuration_file, "r") as file:
exec(file.read(), {}, file_configuration)
# Configuration file overloads default configuration
update(configuration, file_configuration)
# Command line configuration overloads all configuration
gunicorn_parser = config.Config().parser()
configuration = update(configuration, vars(args))
configuration = update(configuration, vars(gunicorn_parser.parse_args(unknown_args)))
if configuration['args']:
command_line_parser.print_help()
log.error('Unexpected positional argument {}'.format(configuration['args']))
sys.exit(1)
return configuration
|
def read_user_configuration(default_configuration, command_line_parser):
configuration = default_configuration
args, unknown_args = command_line_parser.parse_known_args()
if args.configuration_file:
file_configuration = {}
with open(args.configuration_file, "r") as file:
exec(file.read(), {}, file_configuration)
# Configuration file overloads default configuration
update(configuration, file_configuration)
# Command line configuration overloads all configuration
gunicorn_parser = config.Config().parser()
configuration = update(configuration, vars(args))
configuration = update(configuration, vars(gunicorn_parser.parse_args(unknown_args)))
if configuration['args']:
command_line_parser.print_help()
log.error('Unexpected positional argument {}'.format(configuration['args']))
sys.exit(1)
return configuration
|
7,317 |
def shape_index(image, sigma=1, mode='constant', cval=0):
"""Compute the shape index.
The shape index, as defined by Koenderink & van Doorn [1]_, is a
single valued measure of local curvature, assuming the image as a 3D plane
with intensities representing heights.
It is derived from the eigen values of the Hessian, and its
value ranges from -1 to 1 (and is undefined (=NaN) in *flat* regions),
with following ranges representing following shapes:
.. table:: Ranges of the shape index and corresponding shapes.
=================== =============
Interval (s in ...) Shape
=================== =============
[ -1, -7/8) Spherical cup
[-7/8, -5/8) Through
[-5/8, -3/8) Rut
[-3/8, -1/8) Saddle rut
[-1/8, +1/8) Saddle
[+1/8, +3/8) Saddle ridge
[+3/8, +5/8) Ridge
[+5/8, +7/8) Dome
[+7/8, +1] Spherical cap
=================== =============
Parameters
----------
image : 2D array
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used for
smoothing the input data before Hessian eigen value calculation.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
s : ndarray
Shape index
References
----------
.. [1] Koenderink, J. J. & van Doorn, A. J.,
"Surface shape and curvature scales",
Image and Vision Computing, 1992, 10, 557-564.
:DOI:`10.1016/0262-8856(92)90076-F`
Examples
--------
>>> from skimage.feature import shape_index
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> s = shape_index(square, sigma=0.1)
>>> s
array([[ nan, nan, -0.5, nan, nan],
[ nan, -0. , nan, -0. , nan],
[-0.5, nan, -1. , nan, -0.5],
[ nan, -0. , nan, -0. , nan],
[ nan, nan, -0.5, nan, nan]])
"""
H = hessian_matrix(image, sigma=sigma, mode=mode, cval=cval, order='rc')
l1, l2 = hessian_matrix_eigvals(H)
return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1))
|
def shape_index(image, sigma=1, mode='constant', cval=0):
"""Compute the shape index.
The shape index, as defined by Koenderink & van Doorn [1]_, is a
single valued measure of local curvature, assuming the image as a 3D plane
with intensities representing heights.
It is derived from the eigen values of the Hessian, and its
value ranges from -1 to 1 (and is undefined (=NaN) in *flat* regions),
with following ranges representing following shapes:
.. table:: Ranges of the shape index and corresponding shapes.
=================== =============
Interval (s in ...) Shape
=================== =============
[ -1, -7/8) Spherical cup
[-7/8, -5/8) Through
[-5/8, -3/8) Rut
[-3/8, -1/8) Saddle rut
[-1/8, +1/8) Saddle
[+1/8, +3/8) Saddle ridge
[+3/8, +5/8) Ridge
[+5/8, +7/8) Dome
[+7/8, +1] Spherical cap
=================== =============
Parameters
----------
image : (M, N) ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used for
smoothing the input data before Hessian eigen value calculation.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
s : ndarray
Shape index
References
----------
.. [1] Koenderink, J. J. & van Doorn, A. J.,
"Surface shape and curvature scales",
Image and Vision Computing, 1992, 10, 557-564.
:DOI:`10.1016/0262-8856(92)90076-F`
Examples
--------
>>> from skimage.feature import shape_index
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> s = shape_index(square, sigma=0.1)
>>> s
array([[ nan, nan, -0.5, nan, nan],
[ nan, -0. , nan, -0. , nan],
[-0.5, nan, -1. , nan, -0.5],
[ nan, -0. , nan, -0. , nan],
[ nan, nan, -0.5, nan, nan]])
"""
H = hessian_matrix(image, sigma=sigma, mode=mode, cval=cval, order='rc')
l1, l2 = hessian_matrix_eigvals(H)
return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1))
|
5,596 |
def example_data():
"""Access sample xarray Dataset example for calculating and plotting 2D variables."""
import xarray as xr
# make data based on Matplotlib example data for wind barbs
x, y = np.meshgrid(np.linspace(-3, 3, 25), np.linspace(-3, 3, 25))
z = (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 - y**2)
# make u and v out of the z equation
u = -np.diff(z[:, 1:], axis=0) * 100 + 10
v = np.diff(z[1:, :], axis=1) * 100 + 10
# make t as colder air to the north
t = (np.linspace(15, 5, 24) * np.ones((24, 24))).T
# Make lat/lon data over the mid-latitudes
lats = np.linspace(30, 40, 24)
lons = np.linspace(360 - 100, 360 - 90, 24)
# place data into an xarray dataset object
lat = xr.DataArray(lats, attrs={'standard_name': 'latitude', 'units': 'degrees_north'})
lon = xr.DataArray(lons, attrs={'standard_name': 'longitude', 'units': 'degrees_east'})
uwind = xr.DataArray(u, coords=(lat, lon), dims=['lat', 'lon'],
attrs={'standard_name': 'u-component_of_wind', 'units': 'm s-1'})
vwind = xr.DataArray(v, coords=(lat, lon), dims=['lat', 'lon'],
attrs={'standard_name': 'u-component_of_wind', 'units': 'm s-1'})
temperature = xr.DataArray(t, coords=(lat, lon), dims=['lat', 'lon'],
attrs={'standard_name': 'temperature', 'units': 'degC'})
return xr.Dataset({'uwind': uwind,
'vwind': vwind,
'temperature': temperature})
|
def example_data():
"""Creates a sample xarray Dataset with 2D variables."""
import xarray as xr
# make data based on Matplotlib example data for wind barbs
x, y = np.meshgrid(np.linspace(-3, 3, 25), np.linspace(-3, 3, 25))
z = (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 - y**2)
# make u and v out of the z equation
u = -np.diff(z[:, 1:], axis=0) * 100 + 10
v = np.diff(z[1:, :], axis=1) * 100 + 10
# make t as colder air to the north
t = (np.linspace(15, 5, 24) * np.ones((24, 24))).T
# Make lat/lon data over the mid-latitudes
lats = np.linspace(30, 40, 24)
lons = np.linspace(360 - 100, 360 - 90, 24)
# place data into an xarray dataset object
lat = xr.DataArray(lats, attrs={'standard_name': 'latitude', 'units': 'degrees_north'})
lon = xr.DataArray(lons, attrs={'standard_name': 'longitude', 'units': 'degrees_east'})
uwind = xr.DataArray(u, coords=(lat, lon), dims=['lat', 'lon'],
attrs={'standard_name': 'u-component_of_wind', 'units': 'm s-1'})
vwind = xr.DataArray(v, coords=(lat, lon), dims=['lat', 'lon'],
attrs={'standard_name': 'u-component_of_wind', 'units': 'm s-1'})
temperature = xr.DataArray(t, coords=(lat, lon), dims=['lat', 'lon'],
attrs={'standard_name': 'temperature', 'units': 'degC'})
return xr.Dataset({'uwind': uwind,
'vwind': vwind,
'temperature': temperature})
|
27,495 |
def spark_streaming_to_pubsublite(
project_number: int, location: str, topic_id: str
) -> None:
# [START pubsublite_spark_streaming_to_pubsublite]
from pyspark.sql import SparkSession
from pyspark.sql.types import BinaryType, StringType
# TODO(developer):
# project_number = 11223344556677
# location = "us-central1-a"
# topic_id = "your-topic-id"
spark = SparkSession.builder.appName("write-app").master("yarn").getOrCreate()
# Create a RateStreamSource that generates consecutive numbers with timestamps:
# |-- timestamp: timestamp (nullable = true)
# |-- value: long (nullable = true)
sdf = spark.readStream.format("rate").option("rowsPerSecond", 1).load()
# divisible_by_two_udf = udf(lambda z: "even" if str(z)[-1] % 2 == 0 else "odd")
sdf = (
sdf.withColumn("key", (sdf.value % 5).cast(StringType()).cast(BinaryType()))
.withColumn("event_timestamp", sdf.timestamp)
.withColumn(
"data",
sdf.value.cast(StringType()).cast(BinaryType())
# ).withColumn(
# "attributes", create_map(
# lit("prop1"), array(divisible_by_two_udf("value").cast(BinaryType()))).cast(MapType(StringType(), ArrayType(BinaryType()), True))
)
.drop("value", "timestamp")
)
sdf.printSchema()
query = (
sdf.writeStream.format("pubsublite")
.option(
"pubsublite.topic",
f"projects/{project_number}/locations/{location}/topics/{topic_id}",
)
.option("checkpointLocation", "/tmp/app")
.outputMode("append")
.trigger(processingTime="1 second")
.start()
)
query.awaitTermination(60)
query.stop()
# [END pubsublite_spark_streaming_to_pubsublite]
|
def spark_streaming_to_pubsublite(
project_number: int, region: str, topic_id: str
) -> None:
# [START pubsublite_spark_streaming_to_pubsublite]
from pyspark.sql import SparkSession
from pyspark.sql.types import BinaryType, StringType
# TODO(developer):
# project_number = 11223344556677
# location = "us-central1-a"
# topic_id = "your-topic-id"
spark = SparkSession.builder.appName("write-app").master("yarn").getOrCreate()
# Create a RateStreamSource that generates consecutive numbers with timestamps:
# |-- timestamp: timestamp (nullable = true)
# |-- value: long (nullable = true)
sdf = spark.readStream.format("rate").option("rowsPerSecond", 1).load()
# divisible_by_two_udf = udf(lambda z: "even" if str(z)[-1] % 2 == 0 else "odd")
sdf = (
sdf.withColumn("key", (sdf.value % 5).cast(StringType()).cast(BinaryType()))
.withColumn("event_timestamp", sdf.timestamp)
.withColumn(
"data",
sdf.value.cast(StringType()).cast(BinaryType())
# ).withColumn(
# "attributes", create_map(
# lit("prop1"), array(divisible_by_two_udf("value").cast(BinaryType()))).cast(MapType(StringType(), ArrayType(BinaryType()), True))
)
.drop("value", "timestamp")
)
sdf.printSchema()
query = (
sdf.writeStream.format("pubsublite")
.option(
"pubsublite.topic",
f"projects/{project_number}/locations/{location}/topics/{topic_id}",
)
.option("checkpointLocation", "/tmp/app")
.outputMode("append")
.trigger(processingTime="1 second")
.start()
)
query.awaitTermination(60)
query.stop()
# [END pubsublite_spark_streaming_to_pubsublite]
|
23,185 |
def default_threadable_predictors():
"""Generates a new defaultdict for known threadable predictors.
The default is to predict true.
"""
# alphabetical, for what it is worth.
predictors = {
"asciinema": predict_help_ver,
"aurman": predict_false,
"awk": predict_true,
"bash": predict_shell,
"cat": predict_false,
"clear": predict_false,
"cls": predict_false,
"cmd": predict_shell,
"cryptop": predict_false,
"cryptsetup": predict_true,
"csh": predict_shell,
"curl": predict_true,
"emacsclient": predict_false,
"env": predict_env,
"ex": predict_false,
"fish": predict_shell,
"gawk": predict_true,
"git": predict_true,
"gvim": predict_help_ver,
"hg": predict_hg,
"htop": predict_help_ver,
"ipython": predict_shell,
"ksh": predict_shell,
"less": predict_help_ver,
"ls": predict_true,
"man": predict_help_ver,
"mc": predict_false,
"more": predict_help_ver,
"mutt": predict_help_ver,
"mvim": predict_help_ver,
"nano": predict_help_ver,
"nmcli": predict_true,
"nvim": predict_false,
"ponysay": predict_help_ver,
"psql": predict_false,
"push": predict_shell,
"pv": predict_false,
"python": predict_shell,
"python2": predict_shell,
"python3": predict_shell,
"ranger": predict_help_ver,
"repo": predict_help_ver,
"rview": predict_false,
"rvim": predict_false,
"rwt": predict_shell,
"scp": predict_false,
"sh": predict_shell,
"ssh": predict_false,
"startx": predict_false,
"sudo": predict_help_ver,
"sudoedit": predict_help_ver,
"systemctl": predict_true,
"tcsh": predict_shell,
"telnet": predict_false,
"top": predict_help_ver,
"tput": predict_false,
"udisksctl": predict_true,
"unzip": predict_true,
"vi": predict_false,
"view": predict_false,
"vim": predict_false,
"vimpager": predict_help_ver,
"weechat": predict_help_ver,
"wget": predict_true,
"xclip": predict_help_ver,
"xo": predict_help_ver,
"xon.sh": predict_shell,
"xonsh": predict_shell,
"yes": predict_false,
"zip": predict_true,
"zipinfo": predict_true,
"zsh": predict_shell
}
return predictors
|
def default_threadable_predictors():
"""Generates a new defaultdict for known threadable predictors.
The default is to predict true.
"""
# alphabetical, for what it is worth.
predictors = {
"asciinema": predict_help_ver,
"aurman": predict_false,
"awk": predict_true,
"bash": predict_shell,
"cat": predict_false,
"clear": predict_false,
"cls": predict_false,
"cmd": predict_shell,
"cryptop": predict_false,
"cryptsetup": predict_true,
"csh": predict_shell,
"curl": predict_true,
"emacsclient": predict_false,
"env": predict_env,
"ex": predict_false,
"fish": predict_shell,
"gawk": predict_true,
"git": predict_true,
"gvim": predict_help_ver,
"hg": predict_hg,
"htop": predict_help_ver,
"ipython": predict_shell,
"ksh": predict_shell,
"less": predict_help_ver,
"ls": predict_true,
"man": predict_help_ver,
"mc": predict_false,
"more": predict_help_ver,
"mutt": predict_help_ver,
"mvim": predict_help_ver,
"nano": predict_help_ver,
"nmcli": predict_true,
"nvim": predict_false,
"ponysay": predict_help_ver,
"psql": predict_false,
"push": predict_shell,
"pv": predict_false,
"python": predict_shell,
"python2": predict_shell,
"python3": predict_shell,
"ranger": predict_help_ver,
"repo": predict_help_ver,
"rview": predict_false,
"rvim": predict_false,
"rwt": predict_shell,
"scp": predict_false,
"sh": predict_shell,
"ssh": predict_false,
"startx": predict_false,
"sudo": predict_help_ver,
"sudoedit": predict_help_ver,
"systemctl": predict_true,
"tcsh": predict_shell,
"telnet": predict_false,
"top": predict_help_ver,
"tput": predict_false,
"udisksctl": predict_true,
"unzip": predict_true,
"vi": predict_false,
"view": predict_false,
"vim": predict_false,
"vimpager": predict_help_ver,
"weechat": predict_help_ver,
"wget": predict_true,
"xclip": predict_help_ver,
"xo": predict_help_ver,
"xon.sh": predict_shell,
"xonsh": predict_shell,
"yes": predict_false,
"zip": predict_true,
"zipinfo": predict_true,
"zsh": predict_shell,
}
return predictors
|
6,723 |
def execfile(
filename: Union[str, bytes, os.PathLike],
globals: Dict[str, Any],
locals: Optional[Mapping[str, Any]] = None,
) -> None:
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
with open(filename, "rb") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec(code, globals, locals)
|
def execfile(
filename: Union[str, bytes, os.PathLike],
globals: Optional[Dict[str, Any]],
locals: Optional[Mapping[str, Any]] = None,
) -> None:
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
with open(filename, "rb") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec(code, globals, locals)
|
34,246 |
def validate_files(args):
"""Validate all files needed for training a model.
Fails with a non-zero exit code if there are any error in the data."""
from rasa.core.validator import Validator
from rasa.importers.rasa import RasaFileImporter
loop = asyncio.get_event_loop()
file_importer = RasaFileImporter(
domain_path=args.domain, training_data_paths=args.data
)
validator = loop.run_until_complete(Validator.from_importer(file_importer))
everything_is_alright = validator.verify_all(not args.fail_on_warnings)
sys.exit(not everything_is_alright) # fail with non zero if there are issues
|
def validate_files(args):
"""Validate all files needed for training a model.
Fails with a non-zero exit code if there are any errors in the data."""
from rasa.core.validator import Validator
from rasa.importers.rasa import RasaFileImporter
loop = asyncio.get_event_loop()
file_importer = RasaFileImporter(
domain_path=args.domain, training_data_paths=args.data
)
validator = loop.run_until_complete(Validator.from_importer(file_importer))
everything_is_alright = validator.verify_all(not args.fail_on_warnings)
sys.exit(not everything_is_alright) # fail with non zero if there are issues
|
7,307 |
def test_is_low_contrast_boolean():
image = np.zeros((8, 8))
assert exposure.is_low_contrast(image)
image[:5] = 1
assert not exposure.is_low_contrast(image)
|
def test_is_low_contrast_boolean():
image = np.zeros((8, 8), dtype=bool)
assert exposure.is_low_contrast(image)
image[:5] = 1
assert not exposure.is_low_contrast(image)
|
2,645 |
def test_davies_bouldin_score():
assert_raises_on_only_one_label(davies_bouldin_score)
assert_raises_on_all_points_same_cluster(davies_bouldin_score)
# Assert the value is 0. when all samples are equals
assert davies_bouldin_score(np.ones((10, 2)), [0] * 5 + [1] * 5) == pytest.approx(
0.0
)
# Assert the value is 0. when all the mean cluster are equal
assert davies_bouldin_score(
[[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10
) == pytest.approx(0.0)
# General case (with non numpy arrays)
X = (
[[0, 0], [1, 1]] * 5
+ [[3, 3], [4, 4]] * 5
+ [[0, 4], [1, 3]] * 5
+ [[3, 1], [4, 0]] * 5
)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3)
# Ensure divide by zero warning is not raised in general case
with warnings.catch_warnings():
warnings.simplefilter("error", UndefinedMetricWarning)
davies_bouldin_score(X, labels)
# General case - cluster have one sample
X = [[0, 0], [2, 2], [3, 3], [5, 5]]
labels = [0, 0, 1, 2]
pytest.approx(davies_bouldin_score(X, labels), (5.0 / 4) / 3)
|
def test_davies_bouldin_score():
assert_raises_on_only_one_label(davies_bouldin_score)
assert_raises_on_all_points_same_cluster(davies_bouldin_score)
# Assert the value is 0. when all samples are equals
assert davies_bouldin_score(np.ones((10, 2)), [0] * 5 + [1] * 5) == pytest.approx(
0.0
)
# Assert the value is 0. when all the mean cluster are equal
assert davies_bouldin_score(
[[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10
) == pytest.approx(0.0)
# General case (with non numpy arrays)
X = (
[[0, 0], [1, 1]] * 5
+ [[3, 3], [4, 4]] * 5
+ [[0, 4], [1, 3]] * 5
+ [[3, 1], [4, 0]] * 5
)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3)
# Ensure divide by zero warning is not raised in general case
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
davies_bouldin_score(X, labels)
# General case - cluster have one sample
X = [[0, 0], [2, 2], [3, 3], [5, 5]]
labels = [0, 0, 1, 2]
pytest.approx(davies_bouldin_score(X, labels), (5.0 / 4) / 3)
|
26,634 |
def default_task_id_to_fargate_options_function():
"""
This is a function which returns a function. The outer function takes no arguments, and returns the inner function.
The inner function takes in an airflow CLI command an outputs a json compatible with the boto3 run_task API
linked above. In other words, if you don't like the way I call the boto3 Fargate API then call it yourself by
overriding the airflow config file.
i.e: execution_config_function = airflow.executors.fargate_executor.default_task_id_to_fargate_options_function
"""
# Absolutely mandatory configurations
cluster = configuration.conf.get('fargate', 'cluster')
# grab a few variables
task_definition = configuration.conf.get('fargate', 'task_definition')
container_name = configuration.conf.get('fargate', 'container_name')
security_groups = configuration.conf.get('fargate', 'security_groups').split(',')
launch_type = 'FARGATE'
if configuration.conf.has_option('fargate', 'launch_type'):
launch_type = configuration.conf.get('fargate', 'launch_type')
platform_version = 'LATEST'
if configuration.conf.has_option('fargate', 'platform_version'):
platform_version = configuration.conf.get('fargate', 'platform_version')
assign_public_ip = 'ENABLED'
if configuration.conf.has_option('fargate', 'assign_public_ip'):
assign_public_ip = configuration.conf.get('fargate', 'assign_public_ip')
subnets = None
if configuration.conf.has_option('fargate', 'subnets'):
subnets = configuration.conf.get('fargate', 'subnets').split(',')
# build the function based on the provided configurations
return get_default_execute_config_function(cluster, task_definition, container_name, platform_version,
launch_type, assign_public_ip, security_groups, subnets)
|
def default_task_id_to_fargate_options_function():
"""
This is a function which returns a function. The outer function takes no arguments, and returns the inner function.
The inner function takes in an airflow CLI command an outputs a json compatible with the boto3 run_task API
linked above. In other words, if you don't like the way I call the boto3 Fargate API then call it yourself by
overriding the airflow config file.
i.e: execution_config_function = airflow.executors.fargate_executor.default_task_id_to_fargate_options_function
"""
# Absolutely mandatory configurations
cluster = configuration.conf.get('fargate', 'cluster')
# grab a few variables
task_definition = configuration.conf.get('fargate', 'task_definition')
container_name = configuration.conf.get('fargate', 'container_name')
security_groups = configuration.conf.get('fargate', 'security_groups').split(',')
launch_type = 'FARGATE'
if configuration.conf.has_option('fargate', 'launch_type'):
launch_type = configuration.conf.get('fargate', 'launch_type', fallback='FARGATE')
platform_version = 'LATEST'
if configuration.conf.has_option('fargate', 'platform_version'):
platform_version = configuration.conf.get('fargate', 'platform_version')
assign_public_ip = 'ENABLED'
if configuration.conf.has_option('fargate', 'assign_public_ip'):
assign_public_ip = configuration.conf.get('fargate', 'assign_public_ip')
subnets = None
if configuration.conf.has_option('fargate', 'subnets'):
subnets = configuration.conf.get('fargate', 'subnets').split(',')
# build the function based on the provided configurations
return get_default_execute_config_function(cluster, task_definition, container_name, platform_version,
launch_type, assign_public_ip, security_groups, subnets)
|
20,556 |
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
fname_in = os.path.abspath(arguments.i)
fname_seg = os.path.abspath(arguments.s)
contrast = arguments.c
path_template = os.path.abspath(arguments.t)
scale_dist = arguments.scale_dist
path_output = os.path.abspath(arguments.ofolder)
fname_disc = arguments.discfile
if fname_disc is not None:
fname_disc = os.path.abspath(fname_disc)
initz = arguments.initz
initcenter = arguments.initcenter
fname_initlabel = arguments.initlabel
if fname_initlabel is not None:
fname_initlabel = os.path.abspath(fname_initlabel)
remove_temp_files = arguments.r
clean_labels = arguments.clean_labels
path_tmp = tmp_create(basename="label_vertebrae")
# Copying input data to tmp folder
printv('\nCopying input data to tmp folder...', verbose)
Image(fname_in).save(os.path.join(path_tmp, "data.nii"))
Image(fname_seg).save(os.path.join(path_tmp, "segmentation.nii"))
# Go go temp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# Straighten spinal cord
printv('\nStraighten spinal cord...', verbose)
# check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time)
cache_sig = cache_signature(
input_files=[fname_in, fname_seg],
)
fname_cache = "straightening.cache"
if (cache_valid(os.path.join(curdir, fname_cache), cache_sig)
and os.path.isfile(os.path.join(curdir, "warp_curve2straight.nii.gz"))
and os.path.isfile(os.path.join(curdir, "warp_straight2curve.nii.gz"))
and os.path.isfile(os.path.join(curdir, "straight_ref.nii.gz"))):
# if they exist, copy them into current folder
printv('Reusing existing warping field which seems to be valid', verbose, 'warning')
copy(os.path.join(curdir, "warp_curve2straight.nii.gz"), 'warp_curve2straight.nii.gz')
copy(os.path.join(curdir, "warp_straight2curve.nii.gz"), 'warp_straight2curve.nii.gz')
copy(os.path.join(curdir, "straight_ref.nii.gz"), 'straight_ref.nii.gz')
# apply straightening
s, o = run_proc(['sct_apply_transfo', '-i', 'data.nii', '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'data_straight.nii'])
else:
sct_straighten_spinalcord.main(argv=[
'-i', 'data.nii',
'-s', 'segmentation.nii',
'-r', str(remove_temp_files),
'-v', '0',
])
cache_save(os.path.join(path_output, fname_cache), cache_sig)
# resample to 0.5mm isotropic to match template resolution
printv('\nResample to 0.5mm isotropic...', verbose)
s, o = run_proc(['sct_resample', '-i', 'data_straight.nii', '-mm', '0.5x0.5x0.5', '-x', 'linear', '-o', 'data_straightr.nii'], verbose=verbose)
# Apply straightening to segmentation
# N.B. Output is RPI
printv('\nApply straightening to segmentation...', verbose)
sct_apply_transfo.main(['-i', 'segmentation.nii',
'-d', 'data_straightr.nii',
'-w', 'warp_curve2straight.nii.gz',
'-o', 'segmentation_straight.nii',
'-x', 'linear',
'-v', '0'])
# Threshold segmentation at 0.5
img = Image('segmentation_straight.nii')
img.data = threshold(img.data, 0.5)
img.save()
# If disc label file is provided, label vertebrae using that file instead of automatically
if fname_disc:
# Apply straightening to disc-label
printv('\nApply straightening to disc labels...', verbose)
run_proc('sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' %
(fname_disc,
'data_straightr.nii',
'warp_curve2straight.nii.gz',
'labeldisc_straight.nii.gz',
'label'),
verbose=verbose
)
label_vert('segmentation_straight.nii', 'labeldisc_straight.nii.gz', verbose=1)
else:
printv('\nCreate label to identify disc...', verbose)
fname_labelz = os.path.join(path_tmp, 'labelz.nii.gz')
if initcenter is not None:
# find z centered in FOV
nii = Image('segmentation.nii').change_orientation("RPI")
nx, ny, nz, nt, px, py, pz, pt = nii.dim
z_center = round(nz / 2)
initz = [z_center, initcenter]
if initz is not None:
im_label = create_labels_along_segmentation(Image('segmentation.nii'), [tuple(initz)])
im_label.save(fname_labelz)
elif fname_initlabel is not None:
Image(fname_initlabel).save(fname_labelz)
else:
# automatically finds C2-C3 disc
im_data = Image('data.nii')
im_seg = Image('segmentation.nii')
# because verbose is also used for keeping temp files
verbose_detect_c2c3 = 0 if remove_temp_files else 2
im_label_c2c3 = detect_c2c3(im_data, im_seg, contrast, verbose=verbose_detect_c2c3)
ind_label = np.where(im_label_c2c3.data)
if np.size(ind_label) == 0:
printv('Automatic C2-C3 detection failed. Please provide manual label with sct_label_utils', 1, 'error')
sys.exit(1)
im_label_c2c3.data[ind_label] = 3
im_label_c2c3.save(fname_labelz)
# dilate label so it is not lost when applying warping
dilate(Image(fname_labelz), 3, 'ball').save(fname_labelz)
# Apply straightening to z-label
printv('\nAnd apply straightening to label...', verbose)
sct_apply_transfo.main(['-i', 'labelz.nii.gz',
'-d', 'data_straightr.nii',
'-w', 'warp_curve2straight.nii.gz',
'-o', 'labelz_straight.nii.gz',
'-x', 'nn',
'-v', '0'])
# get z value and disk value to initialize labeling
printv('\nGet z and disc values from straight label...', verbose)
init_disc = get_z_and_disc_values_from_label('labelz_straight.nii.gz')
printv('.. ' + str(init_disc), verbose)
# apply laplacian filtering
if arguments.laplacian:
printv('\nApply Laplacian filter...', verbose)
img = Image("data_straightr.nii")
# apply std dev to each axis of the image
sigmas = [1 for i in range(len(img.data.shape))]
# adjust sigma based on voxel size
sigmas = [sigmas[i] / img.dim[i + 4] for i in range(3)]
# smooth data
img.data = laplacian(img.data, sigmas)
img.save()
# detect vertebral levels on straight spinal cord
init_disc[1] = init_disc[1] - 1
vertebral_detection('data_straightr.nii', 'segmentation_straight.nii', contrast, arguments.param, init_disc=init_disc,
verbose=verbose, path_template=path_template, path_output=path_output, scale_dist=scale_dist)
# un-straighten labeled spinal cord
printv('\nUn-straighten labeling...', verbose)
sct_apply_transfo.main(['-i', 'segmentation_straight_labeled.nii',
'-d', 'segmentation.nii',
'-w', 'warp_straight2curve.nii.gz',
'-o', 'segmentation_labeled.nii',
'-x', 'nn',
'-v', '0'])
if clean_labels == 1:
printv('\nClean labeled segmentation (remove labels outside segmentation)...', verbose)
clean_extra_labels('segmentation_labeled.nii', 'segmentation.nii')
elif clean_labels == 2:
printv('\nClean labeled segmentation (remove labels outside segmentation and fill in missing labels)...', verbose)
clean_extra_and_missing_labels('segmentation_labeled.nii', 'segmentation.nii')
# label discs
printv('\nLabel discs...', verbose)
printv('\nUn-straighten labeled discs...', verbose)
run_proc('sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' %
('segmentation_straight_labeled_disc.nii',
'segmentation.nii',
'warp_straight2curve.nii.gz',
'segmentation_labeled_disc.nii',
'label'),
verbose=verbose,
is_sct_binary=True,
)
# come back
os.chdir(curdir)
# Generate output files
path_seg, file_seg, ext_seg = extract_fname(fname_seg)
fname_seg_labeled = os.path.join(path_output, file_seg + '_labeled' + ext_seg)
printv('\nGenerate output files...', verbose)
generate_output_file(os.path.join(path_tmp, "segmentation_labeled.nii"), fname_seg_labeled)
generate_output_file(os.path.join(path_tmp, "segmentation_labeled_disc.nii"), os.path.join(path_output, file_seg + '_labeled_discs' + ext_seg))
# copy straightening files in case subsequent SCT functions need them
generate_output_file(os.path.join(path_tmp, "warp_curve2straight.nii.gz"), os.path.join(path_output, "warp_curve2straight.nii.gz"), verbose=verbose)
generate_output_file(os.path.join(path_tmp, "warp_straight2curve.nii.gz"), os.path.join(path_output, "warp_straight2curve.nii.gz"), verbose=verbose)
generate_output_file(os.path.join(path_tmp, "straight_ref.nii.gz"), os.path.join(path_output, "straight_ref.nii.gz"), verbose=verbose)
# Remove temporary files
if remove_temp_files == 1:
printv('\nRemove temporary files...', verbose)
rmtree(path_tmp)
# Generate QC report
if arguments.qc is not None:
path_qc = os.path.abspath(arguments.qc)
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
labeled_seg_file = os.path.join(path_output, file_seg + '_labeled' + ext_seg)
generate_qc(fname_in, fname_seg=labeled_seg_file, args=argv, path_qc=os.path.abspath(path_qc),
dataset=qc_dataset, subject=qc_subject, process='sct_label_vertebrae')
display_viewer_syntax([fname_in, fname_seg_labeled], colormaps=['', 'subcortical'], opacities=['1', '0.5'])
|
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
fname_in = os.path.abspath(arguments.i)
fname_seg = os.path.abspath(arguments.s)
contrast = arguments.c
path_template = os.path.abspath(arguments.t)
scale_dist = arguments.scale_dist
path_output = os.path.abspath(arguments.ofolder)
fname_disc = arguments.discfile
if fname_disc is not None:
fname_disc = os.path.abspath(fname_disc)
initz = arguments.initz
initcenter = arguments.initcenter
fname_initlabel = arguments.initlabel
if fname_initlabel is not None:
fname_initlabel = os.path.abspath(fname_initlabel)
remove_temp_files = arguments.r
clean_labels = arguments.clean_labels
path_tmp = tmp_create(basename="label_vertebrae")
# Copying input data to tmp folder
printv('\nCopying input data to tmp folder...', verbose)
Image(fname_in).save(os.path.join(path_tmp, "data.nii"))
Image(fname_seg).save(os.path.join(path_tmp, "segmentation.nii"))
# Go go temp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# Straighten spinal cord
printv('\nStraighten spinal cord...', verbose)
# check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time)
cache_sig = cache_signature(
input_files=[fname_in, fname_seg],
)
fname_cache = "straightening.cache"
if (cache_valid(os.path.join(curdir, fname_cache), cache_sig)
and os.path.isfile(os.path.join(curdir, "warp_curve2straight.nii.gz"))
and os.path.isfile(os.path.join(curdir, "warp_straight2curve.nii.gz"))
and os.path.isfile(os.path.join(curdir, "straight_ref.nii.gz"))):
# if they exist, copy them into current folder
printv('Reusing existing warping field which seems to be valid', verbose, 'warning')
copy(os.path.join(curdir, "warp_curve2straight.nii.gz"), 'warp_curve2straight.nii.gz')
copy(os.path.join(curdir, "warp_straight2curve.nii.gz"), 'warp_straight2curve.nii.gz')
copy(os.path.join(curdir, "straight_ref.nii.gz"), 'straight_ref.nii.gz')
# apply straightening
s, o = run_proc(['sct_apply_transfo', '-i', 'data.nii', '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'data_straight.nii'])
else:
sct_straighten_spinalcord.main(argv=[
'-i', 'data.nii',
'-s', 'segmentation.nii',
'-r', str(remove_temp_files),
'-v', '0',
])
cache_save(os.path.join(path_output, fname_cache), cache_sig)
# resample to 0.5mm isotropic to match template resolution
printv('\nResample to 0.5mm isotropic...', verbose)
s, o = run_proc(['sct_resample', '-i', 'data_straight.nii', '-mm', '0.5x0.5x0.5', '-x', 'linear', '-o', 'data_straightr.nii'], verbose=verbose)
# Apply straightening to segmentation
# N.B. Output is RPI
printv('\nApply straightening to segmentation...', verbose)
sct_apply_transfo.main(['-i', 'segmentation.nii',
'-d', 'data_straightr.nii',
'-w', 'warp_curve2straight.nii.gz',
'-o', 'segmentation_straight.nii',
'-x', 'linear',
'-v', '0'])
# Threshold segmentation at 0.5
img = Image('segmentation_straight.nii')
img.data = threshold(img.data, 0.5)
img.save()
# If disc label file is provided, label vertebrae using that file instead of automatically
if fname_disc:
# Apply straightening to disc-label
printv('\nApply straightening to disc labels...', verbose)
run_proc('sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' %
(fname_disc,
'data_straightr.nii',
'warp_curve2straight.nii.gz',
'labeldisc_straight.nii.gz',
'label'),
verbose=verbose
)
label_vert('segmentation_straight.nii', 'labeldisc_straight.nii.gz', verbose=1)
else:
printv('\nCreate label to identify disc...', verbose)
fname_labelz = os.path.join(path_tmp, 'labelz.nii.gz')
if initcenter is not None:
# find z centered in FOV
nii = Image('segmentation.nii').change_orientation("RPI")
nx, ny, nz, nt, px, py, pz, pt = nii.dim
z_center = round(nz / 2)
initz = [z_center, initcenter]
if initz is not None:
im_label = create_labels_along_segmentation(Image('segmentation.nii'), [tuple(initz)])
im_label.save(fname_labelz)
elif fname_initlabel is not None:
Image(fname_initlabel).save(fname_labelz)
else:
# automatically finds C2-C3 disc
im_data = Image('data.nii')
im_seg = Image('segmentation.nii')
# because verbose is also used for keeping temp files
verbose_detect_c2c3 = 0 if remove_temp_files else 2
im_label_c2c3 = detect_c2c3(im_data, im_seg, contrast, verbose=verbose_detect_c2c3)
ind_label = np.where(im_label_c2c3.data)
if np.size(ind_label) == 0:
printv('Automatic C2-C3 detection failed. Please provide manual label with sct_label_utils', 1, 'error')
sys.exit(1)
im_label_c2c3.data[ind_label] = 3
im_label_c2c3.save(fname_labelz)
# dilate label so it is not lost when applying warping
dilate(Image(fname_labelz), 3, 'ball').save(fname_labelz)
# Apply straightening to z-label
printv('\nAnd apply straightening to label...', verbose)
sct_apply_transfo.main(['-i', 'labelz.nii.gz',
'-d', 'data_straightr.nii',
'-w', 'warp_curve2straight.nii.gz',
'-o', 'labelz_straight.nii.gz',
'-x', 'nn',
'-v', '0'])
# get z value and disk value to initialize labeling
printv('\nGet z and disc values from straight label...', verbose)
init_disc = get_z_and_disc_values_from_label('labelz_straight.nii.gz')
printv('.. ' + str(init_disc), verbose)
# apply laplacian filtering
if arguments.laplacian:
printv('\nApply Laplacian filter...', verbose)
img = Image("data_straightr.nii")
# apply std dev to each axis of the image
sigmas = [1 for i in range(len(img.data.shape))]
# adjust sigma based on voxel size
sigmas = [sigmas[i] / img.dim[i + 4] for i in range(3)]
# smooth data
img.data = laplacian(img.data, sigmas)
img.save()
# detect vertebral levels on straight spinal cord
init_disc[1] = init_disc[1] - 1
vertebral_detection('data_straightr.nii', 'segmentation_straight.nii', contrast, arguments.param, init_disc=init_disc,
verbose=verbose, path_template=path_template, path_output=path_output, scale_dist=scale_dist)
# un-straighten labeled spinal cord
printv('\nUn-straighten labeling...', verbose)
sct_apply_transfo.main(['-i', 'segmentation_straight_labeled.nii',
'-d', 'segmentation.nii',
'-w', 'warp_straight2curve.nii.gz',
'-o', 'segmentation_labeled.nii',
'-x', 'nn',
'-v', '0'])
if clean_labels == 1:
printv('\nClean labeled segmentation (remove labels outside segmentation)...', verbose)
clean_extra_labels('segmentation_labeled.nii', 'segmentation.nii')
elif clean_labels == 2:
printv('\nCleaning labeled segmentation (removing labeled voxels outside segmentation and filling in missing label voxels)...', verbose)
clean_extra_and_missing_labels('segmentation_labeled.nii', 'segmentation.nii')
# label discs
printv('\nLabel discs...', verbose)
printv('\nUn-straighten labeled discs...', verbose)
run_proc('sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' %
('segmentation_straight_labeled_disc.nii',
'segmentation.nii',
'warp_straight2curve.nii.gz',
'segmentation_labeled_disc.nii',
'label'),
verbose=verbose,
is_sct_binary=True,
)
# come back
os.chdir(curdir)
# Generate output files
path_seg, file_seg, ext_seg = extract_fname(fname_seg)
fname_seg_labeled = os.path.join(path_output, file_seg + '_labeled' + ext_seg)
printv('\nGenerate output files...', verbose)
generate_output_file(os.path.join(path_tmp, "segmentation_labeled.nii"), fname_seg_labeled)
generate_output_file(os.path.join(path_tmp, "segmentation_labeled_disc.nii"), os.path.join(path_output, file_seg + '_labeled_discs' + ext_seg))
# copy straightening files in case subsequent SCT functions need them
generate_output_file(os.path.join(path_tmp, "warp_curve2straight.nii.gz"), os.path.join(path_output, "warp_curve2straight.nii.gz"), verbose=verbose)
generate_output_file(os.path.join(path_tmp, "warp_straight2curve.nii.gz"), os.path.join(path_output, "warp_straight2curve.nii.gz"), verbose=verbose)
generate_output_file(os.path.join(path_tmp, "straight_ref.nii.gz"), os.path.join(path_output, "straight_ref.nii.gz"), verbose=verbose)
# Remove temporary files
if remove_temp_files == 1:
printv('\nRemove temporary files...', verbose)
rmtree(path_tmp)
# Generate QC report
if arguments.qc is not None:
path_qc = os.path.abspath(arguments.qc)
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
labeled_seg_file = os.path.join(path_output, file_seg + '_labeled' + ext_seg)
generate_qc(fname_in, fname_seg=labeled_seg_file, args=argv, path_qc=os.path.abspath(path_qc),
dataset=qc_dataset, subject=qc_subject, process='sct_label_vertebrae')
display_viewer_syntax([fname_in, fname_seg_labeled], colormaps=['', 'subcortical'], opacities=['1', '0.5'])
|
57,666 |
def Druva_Decommission(clientObj, resource_id):
response = clientObj.post_decommission(resource_id)
statusCode = response.status_code
responseJson = response.json()
if (statusCode == 200):
responseJson = response.json()
headers = ['Resource ID']
readable_output = tableToMarkdown('Device Decomission Request', str(resource_id), headers=headers)
outputs = {"Druva.decomissionedResource(val.resource_id == obj.resource_id)": str(resource_id)}
raw_response = responseJson
return (readable_output, outputs, raw_response)
else:
demisto.log('Error: ' + str(response.status_code))
return None
|
def Druva_Decommission(clientObj, resource_id):
response = clientObj.post_decommission(resource_id)
statusCode = response.status_code
if (statusCode == 200):
responseJson = response.json()
headers = ['Resource ID']
readable_output = tableToMarkdown('Device Decomission Request', str(resource_id), headers=headers)
outputs = {"Druva.decomissionedResource(val.resource_id == obj.resource_id)": str(resource_id)}
raw_response = responseJson
return (readable_output, outputs, raw_response)
else:
demisto.log('Error: ' + str(response.status_code))
return None
|
31,339 |
def create_event_or_incident_output(item: Dict,
table_headers: List[str]) -> Dict[str, str]:
"""
Create the complete output dictionary for events or incidents.
Args:
item (dict): A source dictionary from the API response.
table_headers (list(str)): The table headers to be used when creating initial data.
Returns:
object_data (dict(str)): The output dictionary.
"""
alert_data = {field: item.get(field) for field in table_headers}
if 'agentId' in table_headers:
alert_data['agentId'] = dict_safe_get(item, ['asset', 'agentId'])
if 'username' in table_headers:
alert_data['occurred'] = dict_safe_get(item, ['createdBy', 'date'])
alert_data['username'] = dict_safe_get(item, ['createdBy', 'user', 'name'])
if 'profiles' in table_headers:
profiles = item.get('profiles', [])
profiles_list = []
for profile in profiles:
profiles_list.append(profile.get('name'))
alert_data['profiles'] = profiles_list
return remove_empty_elements(alert_data)
|
def create_event_or_incident_output(item: Dict,
table_headers: List[str]) -> Dict[str, str]:
"""
Create the complete output dictionary for events or incidents.
Args:
item (dict): A source dictionary from the API response.
table_headers (list(str)): The table headers to be used when creating initial data.
Returns:
object_data (dict[str, str]): The output dictionary.
"""
alert_data = {field: item.get(field) for field in table_headers}
if 'agentId' in table_headers:
alert_data['agentId'] = dict_safe_get(item, ['asset', 'agentId'])
if 'username' in table_headers:
alert_data['occurred'] = dict_safe_get(item, ['createdBy', 'date'])
alert_data['username'] = dict_safe_get(item, ['createdBy', 'user', 'name'])
if 'profiles' in table_headers:
profiles = item.get('profiles', [])
profiles_list = []
for profile in profiles:
profiles_list.append(profile.get('name'))
alert_data['profiles'] = profiles_list
return remove_empty_elements(alert_data)
|
12,032 |
def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a combination of data from the provided 'region_cubes'.
The result metadata, including name and units, are also replaced by those
of the 'region_cubes' (which must all be the same).
Args:
* full_mesh_cube
Describes the full mesh and mesh-location to which the region data
refers, and acts as a template for the result.
Must have a :class:`~iris.experimental.ugrid.mesh.Mesh`.
* region_cubes
Contain data on a subset of the 'full_mesh_cube' mesh locations.
The region cubes do not need to have a mesh. There must be at least
1 of them, to determine the result phenomenon.
Their shapes and dimension-coords must all match those of
'full_mesh_cube', except in the mesh dimension, which can have
different sizes between the regions, and from the 'full_mesh_cube'.
The mesh dimension of each region cube must have a 1-D coord named by
'index_coord_name'. Although these region index coords can vary in
length, they must all have matching metadata (names, units and
attributes), and must also match the coord of that name in the
'full_mesh_cube', if there is one.
The ".points" values of the region index coords specify, for each
datapoint, its location in the original mesh -- i.e. they are indices
into the relevant mesh-location dimension.
* index_coord_name
Coord name of the index coords in each region cubes, containing the
mesh location indices.
Result:
* result_cube
An unstructured region_cube identical to 'full_mesh_cube', and with the
same mesh and location, but with its data and ".metadata" replaced by
that from the 'region_cubes'.
Where regions overlap, the result data comes from the last-listed of the
original region cubes which contain that location.
Where no region contains a datapoint, it will be masked in the result.
HINT: alternatively, values covered by no region can be taken from the
original 'full_mesh_cube' data, if 'full_mesh_cube' is *also* passed
as the first of the 'region_cubes'.
"""
if not region_cubes:
raise ValueError("'region_cubes' must be non-empty.")
mesh_dim = full_mesh_cube.mesh_dim()
if mesh_dim is None:
raise ValueError("'full_mesh_cube' has no \".mesh\".")
# Check the basic required properties of the input.
mesh_dim_coords = full_mesh_cube.coords(
dim_coords=True, dimensions=(mesh_dim,)
)
if not mesh_dim_coords:
err = (
"'full_mesh_cube' has no dim-coord on the mesh dimension, "
f"(dimension {mesh_dim})."
)
raise ValueError(err)
#
# Perform consistency checks on all the region-cubes.
#
def metadata_no_varname(cube_or_coord):
# Get a metadata object but omit any var_name.
metadata = cube_or_coord.metadata
fields = metadata._asdict()
fields["var_name"] = None
result = metadata.__class__(**fields)
return result
n_regions = len(region_cubes)
n_dims = full_mesh_cube.ndim
regioncube_metadata = None
indexcoord_metadata = None
for i_region, region_cube in enumerate(region_cubes):
reg_cube_str = (
f'Region cube #{i_region}/{n_regions}, "{region_cube.name()}"'
)
reg_ndims = region_cube.ndim
# Check dimensionality.
if reg_ndims != n_dims:
err = (
f"{reg_cube_str} has {reg_ndims} dimensions, but "
f"'full_mesh_cube' has {n_dims}."
)
raise ValueError(err)
# Get region_cube metadata, which will apply to the result..
region_cube_metadata = metadata_no_varname(region_cube)
if regioncube_metadata is None:
# Store the first region-cube metadata as a reference
regioncube_metadata = region_cube_metadata
elif region_cube_metadata != regioncube_metadata:
# Check subsequent region-cubes metadata against the first.
err = (
f"{reg_cube_str} has metadata {region_cube_metadata}, "
"which does not match that of the first region region_cube, "
f'"{region_cubes[0].name()}", '
f"which is {regioncube_metadata}."
)
raise ValueError(err)
# For each dim, check that coords match other regions, and full-cube.
for i_dim in range(full_mesh_cube.ndim):
if i_dim == mesh_dim:
# mesh dim : look for index coords (by name).
fulldim = full_mesh_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
regdim = region_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
else:
# non-mesh dims : look for dim-coords (only)
fulldim = full_mesh_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
regdim = region_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
if fulldim:
(fulldim,) = fulldim
full_dimname = fulldim.name()
fulldim_metadata = metadata_no_varname(fulldim)
if regdim:
(regdim,) = regdim
reg_dimname = regdim.name()
regdim_metadata = metadata_no_varname(regdim)
err = None
# N.B. checks for mesh- and non-mesh-dims are different.
if i_dim != mesh_dim:
# i_dim == mesh_dim : checks for non-mesh dims.
if fulldim and not regdim:
err = (
f"{reg_cube_str} has no dim-coord for dimension "
"{i_dim}, to match the 'full_mesh_cube' dimension "
f'"{full_dimname}".'
)
elif regdim and not fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, but 'full_mesh_cube' has none."
)
elif regdim != fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, which does not match that "
f"of 'full_mesh_cube', \"{full_dimname}\"."
)
else:
# i_dim == mesh_dim : different rules for this one
if not regdim:
# Must have an index coord on the mesh dimension
err = (
f'{reg_cube_str} has no "{index_coord_name}" coord on '
f"the mesh dimension (dimension {mesh_dim})."
)
elif fulldim and regdim_metadata != fulldim_metadata:
# May *not* have full-cube index, but if so it must match
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
"match that on 'full_mesh_cube' : "
f"{regdim_metadata} != {fulldim_metadata}."
)
# At this point, we know we *have* an index coord, and it does not
# conflict with the one on 'full_mesh_cube' (if any).
# Now check for matches between the region cubes.
if indexcoord_metadata is None:
# Store first occurrence (from first region-cube)
indexcoord_metadata = regdim_metadata
elif regdim_metadata != indexcoord_metadata:
# Compare subsequent occurences (from other region-cubes)
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
f"match that of the first region-cube : "
f"{regdim_metadata} != {indexcoord_metadata}."
)
if err:
raise ValueError(err)
# Use the mesh_dim to transpose inputs + outputs, if required, as it is
# simpler for all the array operations to always have the mesh dim *last*.
if mesh_dim == full_mesh_cube.ndim - 1:
# Mesh dim is already the last one : no tranposes required
untranspose_dims = None
else:
dim_range = np.arange(full_mesh_cube.ndim, dtype=int)
# Transpose all inputs to mesh-last order.
tranpose_dims = [i_dim for i_dim in dim_range if i_dim != mesh_dim] + [
mesh_dim
] # chop out mesh_dim + put it at the end.
def transposed_copy(cube, dim_order):
cube = cube.copy()
cube.transpose()
return cube
full_mesh_cube = transposed_copy(full_mesh_cube, tranpose_dims)
region_cubes = [
transposed_copy(region_cube, tranpose_dims)
for region_cube in region_cubes
]
# Also prepare for transforming the output back to the original order.
untranspose_dims = dim_range.copy()
# Neat trick to produce the reverse operation.
untranspose_dims[tranpose_dims] = dim_range
#
# Here's the core operation..
#
def fill_region(target, regiondata, regioninds):
if not target.flags.writeable:
# The initial input can be a section of a da.zeros(), which has no
# real array "behind" it. This means that real arrays created in
# memory are only chunk-sized, but it also means that 'target' may
# not be writeable. So take a copy to fix that, where needed.
target = target.copy()
# N.B. Indices are basically 1D, but may have leading *1 dims for
# alignment, to satisfy da.map_blocks
assert all(size == 1 for size in regioninds.shape[:-1])
inds = regioninds.flatten()
# Assign blocks with indexing on the last dim only.
target[..., inds] = regiondata
return target
# Create an initially 'empty' (all-masked) dask array matching the input.
# N.B. this does not use the full_mesh_cube.lazy_data() array, but only its
# shape and dtype, since the data itself is not used in the calculation.
# N.B. chunking matches the input cube, allowing performance control.
input_data = full_mesh_cube.lazy_data()
result_array = da.ma.masked_array(
da.zeros(
input_data.shape,
dtype=input_data.dtype,
chunks=input_data.chunksize,
),
True,
)
# Wrap this repeatedly with a lazy operation to assign each region.
# It is done this way because we couldn't get map_blocks to correctly wrap
# a function which does all regions in a single operation.
# TODO: replace with a single-stage solution: Probably better, if possible.
# Notes on resultant calculation properties:
# 1. map_blocks is chunk-mapped, so it is parallelisable and space-saving
# 2. However, fetching less than a whole chunk is not efficient
for region_cube in region_cubes:
# Lazy data array from the region cube
datarr = region_cube.lazy_data()
# Lazy indices from the mesh-dim coord.
mesh_dimcoord = region_cube.coord(
name_or_coord=index_coord_name, dimensions=region_cube.ndim - 1
)
indarr = mesh_dimcoord.lazy_points()
# Extend indarr dimensions to align it with the 'target' array dims.
assert indarr.ndim == 1
shape = (1,) * (region_cube.ndim - 1) + indarr.shape
indarr = indarr.reshape(shape)
# Apply the operation to paste from one region into the target.
# N.B. replacing 'result_array' each time around the loop.
result_array = da.map_blocks(
fill_region,
result_array,
datarr,
indarr,
dtype=result_array.dtype,
meta=np.ndarray,
)
# Construct the result cube.
result_cube = full_mesh_cube.copy()
result_cube.data = result_array
# Copy names, units + attributes from region data (N.B. but not var_name)
result_cube.metadata = regioncube_metadata
if untranspose_dims:
# Re-order dims as in the original input.
result_cube.transpose(untranspose_dims)
return result_cube
|
def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a combination of data from the provided 'region_cubes'.
The result metadata, including name and units, are also replaced by those
of the 'region_cubes' (which must all be the same).
Args:
* full_mesh_cube
Describes the full mesh and mesh-location to which the region data
refers, and acts as a template for the result.
Must have a :class:`~iris.experimental.ugrid.mesh.Mesh`.
* region_cubes
Contain data on a subset of the 'full_mesh_cube' mesh locations.
The region cubes do not need to have a mesh. There must be at least
1 of them, to determine the result phenomenon.
Their shapes and dimension-coords must all match those of
'full_mesh_cube', except in the mesh dimension, which can have
different sizes between the regions, and from the 'full_mesh_cube'.
The mesh dimension of each region cube must have a 1-D coord named by
'index_coord_name'. Although these region index coords can vary in
length, they must all have matching metadata (names, units and
attributes), and must also match the coord of that name in the
'full_mesh_cube', if there is one.
The ".points" values of the region index coords specify, for each
datapoint, its location in the original mesh -- i.e. they are indices
into the relevant mesh-location dimension.
* index_coord_name
Coord name of the index coords in each region cubes, containing the
mesh location indices.
Result:
* result_cube
An unstructured region_cube identical to 'full_mesh_cube', and with the
same mesh and location, but with its data and ".metadata" replaced by
that from the 'region_cubes'.
Where regions overlap, the result data comes from the last-listed of the
original region cubes which contain that location.
Where no region contains a datapoint, it will be masked in the result.
HINT: alternatively, values covered by no region can be taken from the
original 'full_mesh_cube' data, if 'full_mesh_cube' is *also* passed
as the first of the 'region_cubes'.
"""
if not region_cubes:
raise ValueError("'region_cubes' must be non-empty.")
mesh_dim = full_mesh_cube.mesh_dim()
if mesh_dim is None:
raise ValueError("'full_mesh_cube' has no \".mesh\".")
# Check the basic required properties of the input.
mesh_dim_coords = full_mesh_cube.coords(
dim_coords=True, dimensions=(mesh_dim,)
)
if not mesh_dim_coords:
err = (
"'full_mesh_cube' has no dim-coord on the mesh dimension, "
f"(dimension {mesh_dim})."
)
raise ValueError(err)
#
# Perform consistency checks on all the region-cubes.
#
def metadata_no_varname(cube_or_coord):
# Get a metadata object but omit any var_name.
metadata = cube_or_coord.metadata
fields = metadata._asdict()
fields["var_name"] = None
result = metadata.__class__(**fields)
return result
n_regions = len(region_cubes)
n_dims = full_mesh_cube.ndim
regioncube_metadata = None
indexcoord_metadata = None
for i_region, region_cube in enumerate(region_cubes):
reg_cube_str = (
f'Region cube #{i_region}/{n_regions}, "{region_cube.name()}"'
)
reg_ndims = region_cube.ndim
# Check dimensionality.
if reg_ndims != n_dims:
err = (
f"{reg_cube_str} has {reg_ndims} dimensions, but "
f"'full_mesh_cube' has {n_dims}."
)
raise ValueError(err)
# Get region_cube metadata, which will apply to the result..
region_cube_metadata = metadata_no_varname(region_cube)
if regioncube_metadata is None:
# Store the first region-cube metadata as a reference
regioncube_metadata = region_cube_metadata
elif region_cube_metadata != regioncube_metadata:
# Check subsequent region-cubes metadata against the first.
err = (
f"{reg_cube_str} has metadata {region_cube_metadata}, "
"which does not match that of the first region region_cube, "
f'"{region_cubes[0].name()}", '
f"which is {regioncube_metadata}."
)
raise ValueError(err)
# For each dim, check that coords match other regions, and full-cube.
for i_dim in range(full_mesh_cube.ndim):
if i_dim == mesh_dim:
# mesh dim : look for index coords (by name).
fulldim = full_mesh_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
regdim = region_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
else:
# non-mesh dims : look for dim-coords (only)
fulldim = full_mesh_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
regdim = region_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
if fulldim:
(fulldim,) = fulldim
full_dimname = fulldim.name()
fulldim_metadata = metadata_no_varname(fulldim)
if regdim:
(regdim,) = regdim
reg_dimname = regdim.name()
regdim_metadata = metadata_no_varname(regdim)
err = None
# N.B. checks for mesh- and non-mesh-dims are different.
if i_dim != mesh_dim:
# i_dim == mesh_dim : checks for non-mesh dims.
if fulldim and not regdim:
err = (
f"{reg_cube_str} has no dim-coord for dimension "
"{i_dim}, to match the 'full_mesh_cube' dimension "
f'"{full_dimname}".'
)
elif regdim and not fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, but 'full_mesh_cube' has none."
)
elif regdim != fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, which does not match that "
f"of 'full_mesh_cube', \"{full_dimname}\"."
)
else:
# i_dim == mesh_dim : different rules for this one
if not regdim:
# Must have an index coord on the mesh dimension
err = (
f'{reg_cube_str} has no "{index_coord_name}" coord on '
f"the mesh dimension (dimension {mesh_dim})."
)
elif fulldim and regdim_metadata != fulldim_metadata:
# May *not* have full-cube index, but if so it must match
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
"match that on 'full_mesh_cube' : "
f"{regdim_metadata} != {fulldim_metadata}."
)
# At this point, we know we *have* an index coord, and it does not
# conflict with the one on 'full_mesh_cube' (if any).
# Now check for matches between the region cubes.
if indexcoord_metadata is None:
# Store first occurrence (from first region-cube)
indexcoord_metadata = regdim_metadata
elif regdim_metadata != indexcoord_metadata:
# Compare subsequent occurrences (from other region-cubes)
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
f"match that of the first region-cube : "
f"{regdim_metadata} != {indexcoord_metadata}."
)
if err:
raise ValueError(err)
# Use the mesh_dim to transpose inputs + outputs, if required, as it is
# simpler for all the array operations to always have the mesh dim *last*.
if mesh_dim == full_mesh_cube.ndim - 1:
# Mesh dim is already the last one : no tranposes required
untranspose_dims = None
else:
dim_range = np.arange(full_mesh_cube.ndim, dtype=int)
# Transpose all inputs to mesh-last order.
tranpose_dims = [i_dim for i_dim in dim_range if i_dim != mesh_dim] + [
mesh_dim
] # chop out mesh_dim + put it at the end.
def transposed_copy(cube, dim_order):
cube = cube.copy()
cube.transpose()
return cube
full_mesh_cube = transposed_copy(full_mesh_cube, tranpose_dims)
region_cubes = [
transposed_copy(region_cube, tranpose_dims)
for region_cube in region_cubes
]
# Also prepare for transforming the output back to the original order.
untranspose_dims = dim_range.copy()
# Neat trick to produce the reverse operation.
untranspose_dims[tranpose_dims] = dim_range
#
# Here's the core operation..
#
def fill_region(target, regiondata, regioninds):
if not target.flags.writeable:
# The initial input can be a section of a da.zeros(), which has no
# real array "behind" it. This means that real arrays created in
# memory are only chunk-sized, but it also means that 'target' may
# not be writeable. So take a copy to fix that, where needed.
target = target.copy()
# N.B. Indices are basically 1D, but may have leading *1 dims for
# alignment, to satisfy da.map_blocks
assert all(size == 1 for size in regioninds.shape[:-1])
inds = regioninds.flatten()
# Assign blocks with indexing on the last dim only.
target[..., inds] = regiondata
return target
# Create an initially 'empty' (all-masked) dask array matching the input.
# N.B. this does not use the full_mesh_cube.lazy_data() array, but only its
# shape and dtype, since the data itself is not used in the calculation.
# N.B. chunking matches the input cube, allowing performance control.
input_data = full_mesh_cube.lazy_data()
result_array = da.ma.masked_array(
da.zeros(
input_data.shape,
dtype=input_data.dtype,
chunks=input_data.chunksize,
),
True,
)
# Wrap this repeatedly with a lazy operation to assign each region.
# It is done this way because we couldn't get map_blocks to correctly wrap
# a function which does all regions in a single operation.
# TODO: replace with a single-stage solution: Probably better, if possible.
# Notes on resultant calculation properties:
# 1. map_blocks is chunk-mapped, so it is parallelisable and space-saving
# 2. However, fetching less than a whole chunk is not efficient
for region_cube in region_cubes:
# Lazy data array from the region cube
datarr = region_cube.lazy_data()
# Lazy indices from the mesh-dim coord.
mesh_dimcoord = region_cube.coord(
name_or_coord=index_coord_name, dimensions=region_cube.ndim - 1
)
indarr = mesh_dimcoord.lazy_points()
# Extend indarr dimensions to align it with the 'target' array dims.
assert indarr.ndim == 1
shape = (1,) * (region_cube.ndim - 1) + indarr.shape
indarr = indarr.reshape(shape)
# Apply the operation to paste from one region into the target.
# N.B. replacing 'result_array' each time around the loop.
result_array = da.map_blocks(
fill_region,
result_array,
datarr,
indarr,
dtype=result_array.dtype,
meta=np.ndarray,
)
# Construct the result cube.
result_cube = full_mesh_cube.copy()
result_cube.data = result_array
# Copy names, units + attributes from region data (N.B. but not var_name)
result_cube.metadata = regioncube_metadata
if untranspose_dims:
# Re-order dims as in the original input.
result_cube.transpose(untranspose_dims)
return result_cube
|
54,135 |
def supervised_training_step(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),
) -> Callable:
"""Factory function for supervised training.
Args:
model (torch.nn.Module): the model to train.
optimizer (torch.optim.Optimizer): the optimizer to use.
loss_fn (torch.nn loss function): the loss function to use.
device (str, optional): device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
Returns:
Callable: update function.
Example::
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step(model, optimizer, loss_fn, 'cuda')
trainer = Engine(update_fn)
.. versionadded:: 0.5.0
"""
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return update
|
def supervised_training_step(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),
) -> Callable:
"""Factory function for supervised training.
Args:
model (torch.nn.Module): the model to train.
optimizer (torch.optim.Optimizer): the optimizer to use.
loss_fn (torch.nn loss function): the loss function to use.
device (str, optional): device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
Returns:
Callable: update function.
Example::
from ignite.engine import Engne, supervised_training_step
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step(model, optimizer, loss_fn, 'cuda')
trainer = Engine(update_fn)
.. versionadded:: 0.5.0
"""
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return update
|
7,555 |
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then then will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode('utf8') + b' ', dtype='S1')
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
|
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode('utf8') + b' ', dtype='S1')
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
|
4,733 |
def _check_versions():
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.1"),
("kiwisolver", "1.0.1"),
("numpy", "1.11"),
("pyparsing", "2.0.1"),
]:
try:
module = importlib.import_module(modname)
except ImportError as error:
if sys.platform == 'win32' and 'DLL' in error.msg:
msg = ('You may be missing MIcrosoft Visual C++ '
'redistributable matching your Python version. '
'Consult Kiwisolver documentation for more details')
raise ImportError(msg) from error
else:
raise
if LooseVersion(module.__version__) < minver:
raise ImportError("Matplotlib requires {}>={}; you have {}"
.format(modname, minver, module.__version__))
|
def _check_versions():
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.1"),
("kiwisolver", "1.0.1"),
("numpy", "1.11"),
("pyparsing", "2.0.1"),
]:
try:
module = importlib.import_module(modname)
except ImportError as error:
if sys.platform == 'win32' and 'DLL' in error.msg:
msg = ('You may be missing Microsoft Visual C++ '
'redistributable matching your Python version. '
'Consult Kiwisolver documentation for more details')
raise ImportError(msg) from error
else:
raise
if LooseVersion(module.__version__) < minver:
raise ImportError("Matplotlib requires {}>={}; you have {}"
.format(modname, minver, module.__version__))
|
26,557 |
def configure_orm(disable_connection_pool=False):
log.debug("Setting up DB connection pool (PID %s)" % os.getpid())
global engine
global Session
engine_args = {}
pool_connections = conf.getboolean('core', 'SQL_ALCHEMY_POOL_ENABLED')
if disable_connection_pool or not pool_connections:
engine_args['poolclass'] = NullPool
log.debug("settings.configure_orm(): Using NullPool")
elif 'sqlite' not in SQL_ALCHEMY_CONN:
# Pool size engine args not supported by sqlite.
# If no config value is defined for the pool size, select a reasonable value.
# 0 means no limit, which could lead to exceeding the Database connection limit.
try:
pool_size = conf.getint('core', 'SQL_ALCHEMY_POOL_SIZE')
except conf.AirflowConfigException:
pool_size = 5
# The maximum overflow size of the pool.
# When the number of checked-out connections reaches the size set in pool_size,
# additional connections will be returned up to this limit.
# When those additional connections are returned to the pool, they are disconnected and discarded.
# It follows then that the total number of simultaneous connections
# the pool will allow is pool_size + max_overflow,
# and the total number of “sleeping” connections the pool will allow is pool_size.
# max_overflow can be set to -1 to indicate no overflow limit;
# no limit will be placed on the total number
# of concurrent connections. Defaults to 10.
try:
max_overflow = conf.getint('core', 'SQL_ALCHEMY_MAX_OVERFLOW')
except conf.AirflowConfigException:
max_overflow = 10
# The DB server already has a value for wait_timeout (number of seconds after
# which an idle sleeping connection should be killed). Since other DBs may
# co-exist on the same server, SQLAlchemy should set its
# pool_recycle to an equal or smaller value.
try:
pool_recycle = conf.getint('core', 'SQL_ALCHEMY_POOL_RECYCLE')
except conf.AirflowConfigException:
pool_recycle = 1800
# Check connection at the start of each connection pool checkout.
# Typically, this is a simple statement like “SELECT 1”, but may also make use
# of some DBAPI-specific method to test the connection for liveness.
# More information here:
# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic
try:
pool_pre_ping = conf.getboolean('core', 'SQL_ALCHEMY_POOL_PRE_PING')
except conf.AirflowConfigException:
pool_pre_ping = False
log.info("settings.configure_orm(): Using pool settings. pool_size={}, max_overflow={}, "
"pool_recycle={}, pid={}".format(pool_size, max_overflow, pool_recycle, os.getpid()))
engine_args['pool_size'] = pool_size
engine_args['pool_recycle'] = pool_recycle
engine_args['pool_pre_ping'] = pool_pre_ping
engine_args['max_overflow'] = max_overflow
# Allow the user to specify an encoding for their DB otherwise default
# to utf-8 so jobs & users with non-latin1 characters can still use
# us.
engine_args['encoding'] = conf.get('core', 'SQL_ENGINE_ENCODING', fallback='utf-8')
# For Python2 we get back a newstr and need a str
engine_args['encoding'] = engine_args['encoding'].__str__()
engine = create_engine(SQL_ALCHEMY_CONN, **engine_args)
reconnect_timeout = conf.getint('core', 'SQL_ALCHEMY_RECONNECT_TIMEOUT')
setup_event_handlers(engine, reconnect_timeout)
Session = scoped_session(
sessionmaker(autocommit=False,
autoflush=False,
bind=engine,
expire_on_commit=False))
|
def configure_orm(disable_connection_pool=False):
log.debug("Setting up DB connection pool (PID %s)" % os.getpid())
global engine
global Session
engine_args = {}
pool_connections = conf.getboolean('core', 'SQL_ALCHEMY_POOL_ENABLED')
if disable_connection_pool or not pool_connections:
engine_args['poolclass'] = NullPool
log.debug("settings.configure_orm(): Using NullPool")
elif 'sqlite' not in SQL_ALCHEMY_CONN:
# Pool size engine args not supported by sqlite.
# If no config value is defined for the pool size, select a reasonable value.
# 0 means no limit, which could lead to exceeding the Database connection limit.
try:
pool_size = conf.getint('core', 'SQL_ALCHEMY_POOL_SIZE')
except conf.AirflowConfigException:
pool_size = 5
# The maximum overflow size of the pool.
# When the number of checked-out connections reaches the size set in pool_size,
# additional connections will be returned up to this limit.
# When those additional connections are returned to the pool, they are disconnected and discarded.
# It follows then that the total number of simultaneous connections
# the pool will allow is pool_size + max_overflow,
# and the total number of “sleeping” connections the pool will allow is pool_size.
# max_overflow can be set to -1 to indicate no overflow limit;
# no limit will be placed on the total number
# of concurrent connections. Defaults to 10.
try:
max_overflow = conf.getint('core', 'SQL_ALCHEMY_MAX_OVERFLOW')
except conf.AirflowConfigException:
max_overflow = 10
# The DB server already has a value for wait_timeout (number of seconds after
# which an idle sleeping connection should be killed). Since other DBs may
# co-exist on the same server, SQLAlchemy should set its
# pool_recycle to an equal or smaller value.
try:
pool_recycle = conf.getint('core', 'SQL_ALCHEMY_POOL_RECYCLE')
except conf.AirflowConfigException:
pool_recycle = 1800
# Check connection at the start of each connection pool checkout.
# Typically, this is a simple statement like “SELECT 1”, but may also make use
# of some DBAPI-specific method to test the connection for liveness.
# More information here:
# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic
try:
pool_pre_ping = conf.getboolean('core', 'SQL_ALCHEMY_POOL_PRE_PING', fallback=False)
except conf.AirflowConfigException:
pool_pre_ping = False
log.info("settings.configure_orm(): Using pool settings. pool_size={}, max_overflow={}, "
"pool_recycle={}, pid={}".format(pool_size, max_overflow, pool_recycle, os.getpid()))
engine_args['pool_size'] = pool_size
engine_args['pool_recycle'] = pool_recycle
engine_args['pool_pre_ping'] = pool_pre_ping
engine_args['max_overflow'] = max_overflow
# Allow the user to specify an encoding for their DB otherwise default
# to utf-8 so jobs & users with non-latin1 characters can still use
# us.
engine_args['encoding'] = conf.get('core', 'SQL_ENGINE_ENCODING', fallback='utf-8')
# For Python2 we get back a newstr and need a str
engine_args['encoding'] = engine_args['encoding'].__str__()
engine = create_engine(SQL_ALCHEMY_CONN, **engine_args)
reconnect_timeout = conf.getint('core', 'SQL_ALCHEMY_RECONNECT_TIMEOUT')
setup_event_handlers(engine, reconnect_timeout)
Session = scoped_session(
sessionmaker(autocommit=False,
autoflush=False,
bind=engine,
expire_on_commit=False))
|
45,997 |
def _four_point_to_homography(corners: torch.Tensor, deltas: torch.Tensor) -> torch.Tensor:
"""Convert 4-point representation introduced in :cite:`detone2016deep` to homography.
Args:
corners: corners tensor with shape :math:`(B, 4, 2)` where B = batch size
deltas: deltas tensor with shape :math:`(B, 4, 2)` where B = batch size
Return:
the converted homography.
"""
if not isinstance(corners, torch.Tensor):
raise TypeError(f"corners type is not a torch.Tensor. Got {type(corners)}")
if not isinstance(deltas, torch.Tensor):
raise TypeError(f"deltas type is not a torch.Tensor. Got {type(deltas)}")
if not len(corners.shape) == 3 or not corners.shape[1] == 4 or not corners.shape[2] == 2:
raise ValueError(f"Invalid input shape of corners, we expect Bx4x2. Got: {corners.shape}")
if not len(deltas.shape) == 3 or not deltas.shape[1] == 4 or not deltas.shape[2] == 2:
raise ValueError(f"Invalid input shape of deltas, we expect Bx4x2. Got: {deltas.shape}")
if not corners.size(0) == deltas.size(0):
raise ValueError(f'Expected corners batch_size ({corners.size(0)}) to match deltas batch '
f'size ({deltas.size(0)}).')
corners_hat = corners + deltas
homography_inv = get_perspective_transform(corners, corners_hat)
homography = torch.inverse(homography_inv)
return homography
|
def _four_point_to_homography(corners: torch.Tensor, deltas: torch.Tensor) -> torch.Tensor:
"""Convert 4-point representation introduced in :cite:`detone2016deep` to homography.
Args:
corners: corners tensor with shape :math:`(B, 4, 2)` where B = batch size
deltas: deltas tensor with shape :math:`(B, 4, 2)` where B = batch size
Return:
the converted homography.
"""
if not isinstance(corners, torch.Tensor):
raise TypeError(f"corners type is not a torch.Tensor. Got {type(corners)}")
if not isinstance(deltas, torch.Tensor):
raise TypeError(f"deltas type is not a torch.Tensor. Got {type(deltas)}")
if not len(corners.shape) == 3 or not corners.shape[1] == 4 or not corners.shape[2] == 2:
raise ValueError(f"Invalid input shape of corners, we expect Bx4x2. Got: {corners.shape}")
if not len(deltas.shape) == 3 or not deltas.shape[1] == 4 or not deltas.shape[2] == 2:
raise ValueError(f"Invalid input shape of deltas, we expect Bx4x2. Got: {deltas.shape}")
if not corners.size(0) == deltas.size(0):
raise ValueError(f'Expected corners batch_size ({corners.size(0)}) to match deltas batch '
f'size ({deltas.size(0)}).')
corners_hat = corners + deltas
homography_inv = get_perspective_transform(corners, corners_hat)
src_homo_dst = _torch_inverse_cast(dst_homo_src)
return homography
|
7,524 |
def test_sigma_clip_masked_data_values():
"""
Test that the data values & type returned by sigma_clip are the same as
its input when using masked=True (rather than being upcast to float64 &
containing NaNs as in issue #10605) and also that the input data get
copied or referenced as appropriate.
"""
data = np.array([-2, 5, -5, -6, 20, 14, 1])
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True,
copy=True)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert not np.shares_memory(result.data, data)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True,
copy=False)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert np.shares_memory(result.data, data)
# (The fact that the arrays share memory probably also means they're the
# same, but doesn't strictly prove it, eg. one could be reversed.)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True,
copy=True)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert not np.shares_memory(result.data, data)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True,
copy=False)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert np.shares_memory(result.data, data)
|
def test_sigma_clip_masked_data_values():
"""
Test that the data values & type returned by sigma_clip are the same as
its input when using masked=True (rather than being upcast to float64 &
containing NaNs as in issue #10605) and also that the input data get
copied or referenced as appropriate.
"""
data = np.array([-2, 5, -5, -6, 20, 14, 1])
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True,
copy=True)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert not np.shares_memory(result.data, data)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True,
copy=False)
assert result.dtype == data.dtype
assert_equal(result.data, data)
assert np.shares_memory(result.data, data)
# (The fact that the arrays share memory probably also means they're the
# same, but doesn't strictly prove it, eg. one could be reversed.)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True,
copy=True)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert not np.shares_memory(result.data, data)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True,
copy=False)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert np.shares_memory(result.data, data)
|
8,752 |
def rule(*patterns):
"""Decorate a function to be called when a line matches the given pattern
Each argument is a regular expression which will trigger the function.
This decorator can be used multiple times to add more rules.
If the Sopel instance is in a channel, or sent a PRIVMSG, where a string
matching this expression is said, the function will execute. Note that
captured groups here will be retrievable through the Trigger object later.
Inside the regular expression, some special directives can be used. $nick
will be replaced with the nick of the bot and , or :, and $nickname will be
replaced with the nick of the bot.
.. versionchanged:: 7.0
The :func:`rule` decorator can be called with many positional
arguments, each used to add a rule. This is equivalent as decorating
the same function many times with this decorator.
"""
def add_attribute(function):
if not hasattr(function, "rule"):
function.rule = []
for value in patterns:
if value not in function.rule:
function.rule.append(value)
return function
return add_attribute
|
def rule(*patterns):
"""Decorate a function to be called when a line matches the given pattern
Each argument is a regular expression which will trigger the function.
This decorator can be used multiple times to add more rules.
If the Sopel instance is in a channel, or sent a PRIVMSG, where a string
matching this expression is said, the function will execute. Note that
captured groups here will be retrievable through the Trigger object later.
Inside the regular expression, some special directives can be used. $nick
will be replaced with the nick of the bot and , or :, and $nickname will be
replaced with the nick of the bot.
.. versionchanged:: 7.0
The :func:`rule` decorator can be called with many positional
arguments, each used to add a rule. This is equivalent as decorating
the same function multiple times with this decorator.
"""
def add_attribute(function):
if not hasattr(function, "rule"):
function.rule = []
for value in patterns:
if value not in function.rule:
function.rule.append(value)
return function
return add_attribute
|
43,465 |
def AmplitudeEmbedding(features, wires, pad):
r"""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.
If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``.
The absolute square of all elements in ``features`` has to add up to one.
.. note::
AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with
devices that implement this function.
Args:
features (array): Input array of shape ``(2**n,)``
wires (Sequence[int]): sequence of qubit indices that the template acts on
pad (Boolean): controls the activation of the padding option
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if pad==True and 2**len(wires) != len(features):
features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant')
if pad==False and 2**len(wires) != len(features):
raise ValueError("AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "
"got {}.".format(2 ** len(wires), len(features)))
if np.linalg.norm(features,2) != 1:
raise ValueError("AmplitudeEmbedding requires a normalized feature vector.")
QubitStateVector(features, wires=wires)
|
def AmplitudeEmbedding(features, wires, pad):
r"""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.
If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``.
The absolute square of all elements in ``features`` has to add up to one.
.. note::
AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with
devices that implement this function.
Args:
features (array): Input array of shape ``(2**n,)``
wires (Sequence[int]): sequence of qubit indices that the template acts on
pad (Boolean): controls the activation of the padding option
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if pad==True and 2**len(wires) != len(features):
features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant')
if pad==False and 2**len(wires) != len(features):
raise ValueError("AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "
"got {}.".format(2 ** len(wires), len(features)))
if np.linalg.norm(features, 2) != 1:
raise ValueError("AmplitudeEmbedding requires a normalized feature vector.")
QubitStateVector(features, wires=wires)
|
12,457 |
def int_neg_callback(ctx: MethodContext) -> Type:
"""Infer a more precise return type for int.__neg__.
This is mainly used to infer the return type as LiteralType
if the original underlying object is a LiteralType object
"""
if isinstance(ctx.type, Instance) and isinstance(ctx.type.last_known_value, LiteralType):
value = ctx.type.last_known_value.value
fallback = ctx.type.last_known_value.fallback
if isinstance(value, int):
return LiteralType(-value, fallback)
return ctx.default_return_type
|
def int_neg_callback(ctx: MethodContext) -> Type:
"""Infer a more precise return type for int.__neg__.
This is mainly used to infer the return type as LiteralType
if the original underlying object is a LiteralType object
"""
if isinstance(ctx.type, Instance) and ctx.type.last_known_value is not None:
value = ctx.type.last_known_value.value
fallback = ctx.type.last_known_value.fallback
if isinstance(value, int):
return LiteralType(-value, fallback)
return ctx.default_return_type
|
28,262 |
def update_GUIDs(conn: ConnectionPlus) -> None:
"""
Update all GUIDs in this database where either the location code or the
work_station code is zero to use the location and work_station code from
the qcodesrc.json file in home. Runs where it is not true that both codes
are zero are skipped.
"""
log.info('Commencing update of all GUIDs in database')
cfg = qc.config
location = cfg['GUID_components']['location']
work_station = cfg['GUID_components']['work_station']
if location == 0:
log.warning('The location is still set to the default (0). Can not '
'proceed. Please configure the location before updating '
'the GUIDs.')
return
if work_station == 0:
log.warning('The work_station is still set to the default (0). Can not'
' proceed. Please configure the location before updating '
'the GUIDs.')
return
query = f"select MAX(run_id) from runs"
c = atomic_transaction(conn, query)
no_of_runs = c.fetchall()[0][0]
# now, there are four actions we can take
def _both_nonzero(run_id: int, *args: Any) -> None:
log.info(f'Run number {run_id} already has a valid GUID, skipping.')
def _location_only_zero(run_id: int, *args: Any) -> None:
log.warning(f'Run number {run_id} has a zero (default) location '
'code, but a non-zero work station code. Please manually '
'resolve this, skipping the run now.')
def _workstation_only_zero(run_id: int, *args: Any) -> None:
log.warning(f'Run number {run_id} has a zero (default) work station'
' code, but a non-zero location code. Please manually '
'resolve this, skipping the run now.')
def _both_zero(run_id: int,
conn: ConnectionPlus,
guid_comps: Dict[str, Any]) -> None:
guid_str = generate_guid(timeint=guid_comps['time'],
sampleint=guid_comps['sample'])
with atomic(conn) as conn:
sql = f"""
UPDATE runs
SET guid = ?
where run_id == {run_id}
"""
cur = conn.cursor()
cur.execute(sql, (guid_str,))
log.info(f'Succesfully updated run number {run_id}.')
actions: Dict[Tuple[bool, bool], Callable[..., Any]]
actions = {(True, True): _both_zero,
(False, True): _workstation_only_zero,
(True, False): _location_only_zero,
(False, False): _both_nonzero}
for run_id in range(1, no_of_runs+1):
guid_str = get_guid_from_run_id(conn, run_id)
guid_comps = parse_guid(guid_str)
loc = guid_comps['location']
ws = guid_comps['work_station']
log.info(f'Updating run number {run_id}...')
actions[(loc == 0, ws == 0)](run_id, conn, guid_comps)
|
def update_GUIDs(conn: ConnectionPlus) -> None:
"""
Update all GUIDs in this database where either the location code or the
work_station code is zero to use the location and work_station code from
the qcodesrc.json file in home. Runs where it is not true that both codes
are zero are skipped.
"""
log.info('Commencing update of all GUIDs in database')
cfg = qc.config
location = cfg['GUID_components']['location']
work_station = cfg['GUID_components']['work_station']
if location == 0:
log.warning('The location is still set to the default (0). Can not '
'proceed. Please configure the location before updating '
'the GUIDs.')
return
if work_station == 0:
log.warning('The work_station is still set to the default (0). Can not'
' proceed. Please configure the location before updating '
'the GUIDs.')
return
query = f"select MAX(run_id) from runs"
c = atomic_transaction(conn, query)
no_of_runs = c.fetchall()[0][0]
# now, there are four actions we can take
def _both_nonzero(run_id: int, *args: Any) -> None:
log.info(f'Run number {run_id} already has a valid GUID, skipping.')
def _location_only_zero(run_id: int, *args: Any) -> None:
log.warning(f'Run number {run_id} has a zero (default) location '
'code, but a non-zero work station code. Please manually '
'resolve this, skipping the run now.')
def _workstation_only_zero(run_id: int, *args: Any) -> None:
log.warning(f'Run number {run_id} has a zero (default) work station'
' code, but a non-zero location code. Please manually '
'resolve this, skipping the run now.')
def _both_zero(run_id: int,
conn: ConnectionPlus,
guid_comps: Dict[str, Any]) -> None:
guid_str = generate_guid(timeint=guid_comps['time'],
sampleint=guid_comps['sample'])
with atomic(conn) as conn:
sql = f"""
UPDATE runs
SET guid = ?
where run_id == {run_id}
"""
cur = conn.cursor()
cur.execute(sql, (guid_str,))
log.info(f'Succesfully updated run number {run_id}.')
actions: Dict[Tuple[bool, bool], Callable[[int, ConnectionPlus, Dict[str, Any]], None]]
actions = {(True, True): _both_zero,
(False, True): _workstation_only_zero,
(True, False): _location_only_zero,
(False, False): _both_nonzero}
for run_id in range(1, no_of_runs+1):
guid_str = get_guid_from_run_id(conn, run_id)
guid_comps = parse_guid(guid_str)
loc = guid_comps['location']
ws = guid_comps['work_station']
log.info(f'Updating run number {run_id}...')
actions[(loc == 0, ws == 0)](run_id, conn, guid_comps)
|
20,514 |
def main(args=None):
# Initialization
param = Param()
start_time = time.time()
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
fname_anat = arguments['-i']
fname_centerline = arguments['-s']
if '-smooth' in arguments:
sigma = arguments['-smooth']
if '-param' in arguments:
param.update(arguments['-param'])
if '-r' in arguments:
remove_temp_files = int(arguments['-r'])
if '-v' in arguments:
verbose = int(arguments['-v'])
# Display arguments
sct.printv('\nCheck input arguments...')
sct.printv(' Volume to smooth .................. ' + fname_anat)
sct.printv(' Centerline ........................ ' + fname_centerline)
sct.printv(' Sigma (mm) ........................ ' + str(sigma))
sct.printv(' Verbose ........................... ' + str(verbose))
# Check that input is 3D:
from spinalcordtoolbox.image import Image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_anat).dim
dim = 4 # by default, will be adjusted later
if nt == 1:
dim = 3
if nz == 1:
dim = 2
if dim == 4:
sct.printv('WARNING: the input image is 4D, please split your image to 3D before smoothing spinalcord using :\n'
'sct_image -i ' + fname_anat + ' -split t -o ' + fname_anat, verbose, 'warning')
sct.printv('4D images not supported, aborting ...', verbose, 'error')
# Extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
path_tmp = sct.tmp_create(basename="smooth_spinalcord", verbose=verbose)
# Copying input data to tmp folder
sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
sct.copy(fname_anat, os.path.join(path_tmp, "anat" + ext_anat))
sct.copy(fname_centerline, os.path.join(path_tmp, "centerline" + ext_centerline))
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# convert to nii format
convert('anat' + ext_anat, 'anat.nii')
convert('centerline' + ext_centerline, 'centerline.nii')
# Change orientation of the input image into RPI
sct.printv('\nOrient input volume to RPI orientation...')
fname_anat_rpi = msct_image.Image("anat.nii") \
.change_orientation("RPI", generate_path=True) \
.save() \
.absolutepath
# Change orientation of the input image into RPI
sct.printv('\nOrient centerline to RPI orientation...')
fname_centerline_rpi = msct_image.Image("centerline.nii") \
.change_orientation("RPI", generate_path=True) \
.save() \
.absolutepath
# Straighten the spinal cord
# straighten segmentation
sct.printv('\nStraighten the spinal cord using centerline/segmentation...', verbose)
cache_sig = sct.cache_signature(input_files=[fname_anat_rpi, fname_centerline_rpi],
input_params={"x": "spline"})
cachefile = os.path.join(curdir, "straightening.cache")
if sct.cache_valid(cachefile, cache_sig) and os.path.isfile(os.path.join(curdir, 'warp_curve2straight.nii.gz')) and os.path.isfile(os.path.join(curdir, 'warp_straight2curve.nii.gz')) and os.path.isfile(os.path.join(curdir, 'straight_ref.nii.gz')):
# if they exist, copy them into current folder
sct.printv('Reusing existing warping field which seems to be valid', verbose, 'warning')
sct.copy(os.path.join(curdir, 'warp_curve2straight.nii.gz'), 'warp_curve2straight.nii.gz')
sct.copy(os.path.join(curdir, 'warp_straight2curve.nii.gz'), 'warp_straight2curve.nii.gz')
sct.copy(os.path.join(curdir, 'straight_ref.nii.gz'), 'straight_ref.nii.gz')
# apply straightening
sct.run(['sct_apply_transfo', '-i', fname_anat_rpi, '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'anat_rpi_straight.nii', '-x', 'spline'], verbose)
else:
sct.run(['sct_straighten_spinalcord', '-i', fname_anat_rpi, '-o', 'anat_rpi_straight.nii', '-s', fname_centerline_rpi, '-x', 'spline', '-param', 'algo_fitting='+param.algo_fitting], verbose)
sct.cache_save(cachefile, cache_sig)
# move warping fields locally (to use caching next time)
sct.copy('warp_curve2straight.nii.gz', os.path.join(curdir, 'warp_curve2straight.nii.gz'))
sct.copy('warp_straight2curve.nii.gz', os.path.join(curdir, 'warp_straight2curve.nii.gz'))
# Smooth the straightened image along z
sct.printv('\nSmooth the straightened image...')
sct_maths.main(args=['-i', 'anat_rpi_straight.nii',
'-smooth', ",".join([str(i) for i in sigma]),
'-o', 'anat_rpi_straight_smooth.nii',
'-v', '0'])
# Apply the reversed warping field to get back the curved spinal cord
sct.printv('\nApply the reversed warping field to get back the curved spinal cord...')
sct.run(['sct_apply_transfo', '-i', 'anat_rpi_straight_smooth.nii', '-o', 'anat_rpi_straight_smooth_curved.nii', '-d', 'anat.nii', '-w', 'warp_straight2curve.nii.gz', '-x', 'spline'], verbose)
# replace zeroed voxels by original image (issue #937)
sct.printv('\nReplace zeroed voxels by original image...', verbose)
nii_smooth = Image('anat_rpi_straight_smooth_curved.nii')
data_smooth = nii_smooth.data
data_input = Image('anat.nii').data
indzero = np.where(data_smooth == 0)
data_smooth[indzero] = data_input[indzero]
nii_smooth.data = data_smooth
nii_smooth.save('anat_rpi_straight_smooth_curved_nonzero.nii')
# come back
os.chdir(curdir)
# Generate output file
sct.printv('\nGenerate output file...')
sct.generate_output_file(os.path.join(path_tmp, "anat_rpi_straight_smooth_curved_nonzero.nii"),
file_anat + '_smooth' + ext_anat)
# Remove temporary files
if remove_temp_files == 1:
sct.printv('\nRemove temporary files...')
sct.rmtree(path_tmp)
# Display elapsed time
elapsed_time = time.time() - start_time
sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\n')
sct.display_viewer_syntax([file_anat, file_anat + '_smooth'], verbose=verbose)
|
def main(args=None):
# Initialization
param = Param()
start_time = time.time()
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
fname_anat = arguments['-i']
fname_centerline = arguments['-s']
if '-smooth' in arguments:
sigma = arguments['-smooth']
if '-param' in arguments:
param.update(arguments['-param'])
if '-r' in arguments:
remove_temp_files = int(arguments['-r'])
if '-v' in arguments:
verbose = int(arguments['-v'])
# Display arguments
sct.printv('\nCheck input arguments...')
sct.printv(' Volume to smooth .................. ' + fname_anat)
sct.printv(' Centerline ........................ ' + fname_centerline)
sct.printv(' Sigma (mm) ........................ ' + str(sigma))
sct.printv(' Verbose ........................... ' + str(verbose))
# Check that input is 3D:
from spinalcordtoolbox.image import Image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_anat).dim
dim = 4 # by default, will be adjusted later
if nt == 1:
dim = 3
if nz == 1:
dim = 2
if dim == 4:
sct.printv('WARNING: the input image is 4D, please split your image to 3D before smoothing spinalcord using :\n'
'sct_image -i ' + fname_anat + ' -split t -o ' + fname_anat, verbose, 'warning')
sct.printv('4D images not supported, aborting ...', verbose, 'error')
# Extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
path_tmp = sct.tmp_create(basename="smooth_spinalcord", verbose=verbose)
# Copying input data to tmp folder
sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
sct.copy(fname_anat, os.path.join(path_tmp, "anat" + ext_anat))
sct.copy(fname_centerline, os.path.join(path_tmp, "centerline" + ext_centerline))
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# convert to nii format
convert('anat' + ext_anat, 'anat.nii')
convert('centerline' + ext_centerline, 'centerline.nii')
# Change orientation of the input image into RPI
sct.printv('\nOrient input volume to RPI orientation...')
fname_anat_rpi = msct_image.Image("anat.nii") \
.change_orientation("RPI", generate_path=True) \
.save() \
.absolutepath
# Change orientation of the input image into RPI
sct.printv('\nOrient centerline to RPI orientation...')
fname_centerline_rpi = msct_image.Image("centerline.nii") \
.change_orientation("RPI", generate_path=True) \
.save() \
.absolutepath
# Straighten the spinal cord
# straighten segmentation
sct.printv('\nStraighten the spinal cord using centerline/segmentation...', verbose)
cache_sig = sct.cache_signature(input_files=[fname_anat_rpi, fname_centerline_rpi],
input_params={"x": "spline"})
cachefile = os.path.join(curdir, "straightening.cache")
if sct.cache_valid(cachefile, cache_sig) and os.path.isfile(os.path.join(curdir, 'warp_curve2straight.nii.gz')) and os.path.isfile(os.path.join(curdir, 'warp_straight2curve.nii.gz')) and os.path.isfile(os.path.join(curdir, 'straight_ref.nii.gz')):
# if they exist, copy them into current folder
sct.printv('Reusing existing warping field which seems to be valid', verbose, 'warning')
sct.copy(os.path.join(curdir, 'warp_curve2straight.nii.gz'), 'warp_curve2straight.nii.gz')
sct.copy(os.path.join(curdir, 'warp_straight2curve.nii.gz'), 'warp_straight2curve.nii.gz')
sct.copy(os.path.join(curdir, 'straight_ref.nii.gz'), 'straight_ref.nii.gz')
# apply straightening
sct.run(['sct_apply_transfo', '-i', fname_anat_rpi, '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'anat_rpi_straight.nii', '-x', 'spline'], verbose)
else:
sct.run(['sct_straighten_spinalcord', '-i', fname_anat_rpi, '-o', 'anat_rpi_straight.nii', '-s', fname_centerline_rpi, '-x', 'spline', '-param', 'algo_fitting='+param.algo_fitting], verbose)
sct.cache_save(cachefile, cache_sig)
# move warping fields locally (to use caching next time)
sct.copy('warp_curve2straight.nii.gz', os.path.join(curdir, 'warp_curve2straight.nii.gz'))
sct.copy('warp_straight2curve.nii.gz', os.path.join(curdir, 'warp_straight2curve.nii.gz'))
# Smooth the straightened image along z
sct.printv('\nSmooth the straightened image...')
sct_maths.main(args=['-i', 'anat_rpi_straight.nii',
'-smooth', sigma_smooth,
'-o', 'anat_rpi_straight_smooth.nii',
'-v', '0'])
# Apply the reversed warping field to get back the curved spinal cord
sct.printv('\nApply the reversed warping field to get back the curved spinal cord...')
sct.run(['sct_apply_transfo', '-i', 'anat_rpi_straight_smooth.nii', '-o', 'anat_rpi_straight_smooth_curved.nii', '-d', 'anat.nii', '-w', 'warp_straight2curve.nii.gz', '-x', 'spline'], verbose)
# replace zeroed voxels by original image (issue #937)
sct.printv('\nReplace zeroed voxels by original image...', verbose)
nii_smooth = Image('anat_rpi_straight_smooth_curved.nii')
data_smooth = nii_smooth.data
data_input = Image('anat.nii').data
indzero = np.where(data_smooth == 0)
data_smooth[indzero] = data_input[indzero]
nii_smooth.data = data_smooth
nii_smooth.save('anat_rpi_straight_smooth_curved_nonzero.nii')
# come back
os.chdir(curdir)
# Generate output file
sct.printv('\nGenerate output file...')
sct.generate_output_file(os.path.join(path_tmp, "anat_rpi_straight_smooth_curved_nonzero.nii"),
file_anat + '_smooth' + ext_anat)
# Remove temporary files
if remove_temp_files == 1:
sct.printv('\nRemove temporary files...')
sct.rmtree(path_tmp)
# Display elapsed time
elapsed_time = time.time() - start_time
sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\n')
sct.display_viewer_syntax([file_anat, file_anat + '_smooth'], verbose=verbose)
|
46,196 |
def test_camera(qtbot):
"""Test vispy camera model interaction."""
viewer = Viewer()
view = viewer.window.qt_viewer
qtbot.addWidget(view)
np.random.seed(0)
data = np.random.random((10, 10, 10))
viewer.add_image(data)
# Test default values camera values are used and vispy camera has been
# updated
assert viewer.dims.ndisplay == 2
assert viewer.camera.ndisplay == 2
assert viewer.camera.center == (5.0, 5.0)
assert viewer.camera.angles == (0, 0, 90)
assert isinstance(view.view.camera, PanZoomCamera)
assert view.view.camera.rect.center == (5.0, 5.0)
assert view.view.camera.rect.size == (11, 11)
# Change to 3D display and check vispy camera changes
viewer.dims.ndisplay = 3
assert viewer.dims.ndisplay == 3
assert viewer.camera.ndisplay == 3
assert viewer.camera.center == (5.0, 5.0, 5.0)
assert viewer.camera.angles == (0, 0, 90)
assert isinstance(view.view.camera, ArcballCamera)
assert view.view.camera.center == (5.0, 5.0, 5.0)
assert view.view.camera.scale_factor == 11
# Update camera model and check vispy camera changes in 3D
center = (20, 10, 15)
scale = 100
angles = (-20, 10, -45)
viewer.camera.update(center=center, scale=scale, angles=angles)
assert viewer.camera.ndisplay == 3
assert viewer.camera.center == center
assert viewer.camera.scale == scale
assert viewer.camera.angles == angles
assert isinstance(view.view.camera, ArcballCamera)
assert view.view.camera.center == center[::-1]
assert view.view.camera.scale_factor == 100
# Zoom and pan vispy camera and check camera model changes in 2D
view.view.camera.center = (12, -2, 8)
view.view.camera.scale_factor = 20
view.on_draw(None)
assert viewer.camera.center == (8, -2, 12)
assert viewer.camera.scale == 20
# Change back to 2D display and check vispy camera changes
viewer.dims.ndisplay = 2
assert viewer.dims.ndisplay == 2
assert viewer.camera.ndisplay == 2
assert isinstance(view.view.camera, PanZoomCamera)
# Update camera model and check vispy camera changes in 2D
center = (20, 30)
scale = 200
angles = (-20, 10, -45)
viewer.camera.update(center=center, scale=scale, angles=angles)
assert viewer.camera.ndisplay == 2
assert viewer.camera.center == center
assert viewer.camera.scale == scale
assert viewer.camera.angles == angles
assert isinstance(view.view.camera, PanZoomCamera)
assert view.view.camera.rect.center == (30.0, 20.0)
assert view.view.camera.rect.size == (200.0, 200.0)
# Zoom and pan vispy camera and check camera model changes in 2D
view.view.camera.zoom(2)
view.on_draw(None)
assert view.view.camera.rect.size == (400.0, 400.0)
assert viewer.camera.scale == 400
view.view.camera.zoom(0.5)
view.on_draw(None)
assert view.view.camera.rect.size == (200.0, 200.0)
assert viewer.camera.scale == 200
view.view.camera.rect = (-20, -30, 40, 10)
view.on_draw(None)
assert viewer.camera.center == (-25, 0)
assert viewer.camera.scale == 40
|
def test_camera(qtbot):
"""Test vispy camera model interaction."""
viewer = Viewer()
view = viewer.window.qt_viewer.view
qtbot.addWidget(view)
np.random.seed(0)
data = np.random.random((10, 10, 10))
viewer.add_image(data)
# Test default values camera values are used and vispy camera has been
# updated
assert viewer.dims.ndisplay == 2
assert viewer.camera.ndisplay == 2
assert viewer.camera.center == (5.0, 5.0)
assert viewer.camera.angles == (0, 0, 90)
assert isinstance(view.view.camera, PanZoomCamera)
assert view.view.camera.rect.center == (5.0, 5.0)
assert view.view.camera.rect.size == (11, 11)
# Change to 3D display and check vispy camera changes
viewer.dims.ndisplay = 3
assert viewer.dims.ndisplay == 3
assert viewer.camera.ndisplay == 3
assert viewer.camera.center == (5.0, 5.0, 5.0)
assert viewer.camera.angles == (0, 0, 90)
assert isinstance(view.view.camera, ArcballCamera)
assert view.view.camera.center == (5.0, 5.0, 5.0)
assert view.view.camera.scale_factor == 11
# Update camera model and check vispy camera changes in 3D
center = (20, 10, 15)
scale = 100
angles = (-20, 10, -45)
viewer.camera.update(center=center, scale=scale, angles=angles)
assert viewer.camera.ndisplay == 3
assert viewer.camera.center == center
assert viewer.camera.scale == scale
assert viewer.camera.angles == angles
assert isinstance(view.view.camera, ArcballCamera)
assert view.view.camera.center == center[::-1]
assert view.view.camera.scale_factor == 100
# Zoom and pan vispy camera and check camera model changes in 2D
view.view.camera.center = (12, -2, 8)
view.view.camera.scale_factor = 20
view.on_draw(None)
assert viewer.camera.center == (8, -2, 12)
assert viewer.camera.scale == 20
# Change back to 2D display and check vispy camera changes
viewer.dims.ndisplay = 2
assert viewer.dims.ndisplay == 2
assert viewer.camera.ndisplay == 2
assert isinstance(view.view.camera, PanZoomCamera)
# Update camera model and check vispy camera changes in 2D
center = (20, 30)
scale = 200
angles = (-20, 10, -45)
viewer.camera.update(center=center, scale=scale, angles=angles)
assert viewer.camera.ndisplay == 2
assert viewer.camera.center == center
assert viewer.camera.scale == scale
assert viewer.camera.angles == angles
assert isinstance(view.view.camera, PanZoomCamera)
assert view.view.camera.rect.center == (30.0, 20.0)
assert view.view.camera.rect.size == (200.0, 200.0)
# Zoom and pan vispy camera and check camera model changes in 2D
view.view.camera.zoom(2)
view.on_draw(None)
assert view.view.camera.rect.size == (400.0, 400.0)
assert viewer.camera.scale == 400
view.view.camera.zoom(0.5)
view.on_draw(None)
assert view.view.camera.rect.size == (200.0, 200.0)
assert viewer.camera.scale == 200
view.view.camera.rect = (-20, -30, 40, 10)
view.on_draw(None)
assert viewer.camera.center == (-25, 0)
assert viewer.camera.scale == 40
|
42,356 |
def validate_tag_embed_footer(footer: Any) -> None:
"""Raises a ValidationError if the given footer is invalid."""
field_validators = {
'text': (
MinLengthValidator(
limit_value=1,
message="Footer text must not be empty."
),
MaxLengthValidator(limit_value=2048)
),
'icon_url': (),
'proxy_icon_url': ()
}
if not isinstance(footer, Mapping):
raise ValidationError("Embed footer must be a mapping.")
for field_name, value in footer.items():
if field_name not in field_validators:
raise ValidationError(f"Unknown embed footer field: {field_name!r}.")
for validator in field_validators[field_name]:
validator(value)
|
def validate_tag_embed_footer(footer: Dict[str, str]) -> None:
"""Raises a ValidationError if the given footer is invalid."""
field_validators = {
'text': (
MinLengthValidator(
limit_value=1,
message="Footer text must not be empty."
),
MaxLengthValidator(limit_value=2048)
),
'icon_url': (),
'proxy_icon_url': ()
}
if not isinstance(footer, Mapping):
raise ValidationError("Embed footer must be a mapping.")
for field_name, value in footer.items():
if field_name not in field_validators:
raise ValidationError(f"Unknown embed footer field: {field_name!r}.")
for validator in field_validators[field_name]:
validator(value)
|
31,478 |
def send_email_to_reviewers(reviewers_emails: str, refresh_token: str, pack_name: str,
pr_number: str):
""" Compose mail and send it to the reviewers_emails, to review the changes in their pack
Args:
reviewers_emails(str): reviewers of the pack to send mail to them
refresh_token(str): refresh token to send mails using gmail API
pack_name(str): pack that was modified
pr_number(str): github pr number
"""
access_token = get_access_token(refresh_token)
credentials = AccessTokenCredentials(access_token, 'Demisto Github send mails to contributors')
service = build('gmail', 'v1', credentials=credentials)
email_content = f"Changes were made to the {pack_name} content pack that you contributed. \n"
email_content += f"Please review the changes in the following files in the contribution "
email_content += f"[Pull Request](https://github.com/demisto/content/pull/{pr_number}/files)."
email_subject = f'Changes made to {pack_name} content pack'
message = MIMEText(email_content, 'plain', 'utf-8')
message['bcc'] = reviewers_emails # send mails to all contributors in bcc
message['from'] = EMAIL_FROM
message['subject'] = email_subject
message_to_send = {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
try:
service.users().messages().send(userId=EMAIL_FROM, body=message_to_send).execute()
print(f'Email sent to {reviewers_emails} reviewers of pack {pack_name}')
except errors.HttpError as e:
print(f'An error occurred during sending emails to contributors: {str(e)}')
sys.exit(1)
|
def send_email_to_reviewers(reviewers_emails: str, refresh_token: str, pack_name: str,
pr_number: str):
""" Compose mail and send it to the reviewers_emails, to review the changes in their pack
Args:
reviewers_emails(str): reviewers of the pack to send mail to them
refresh_token(str): refresh token to send mails using gmail API
pack_name(str): pack that was modified
pr_number(str): github pr number
"""
access_token = get_access_token(refresh_token)
credentials = AccessTokenCredentials(access_token, 'Demisto Github send mails to contributors')
service = build('gmail', 'v1', credentials=credentials)
email_content = f"Changes were made to the {pack_name} content pack that you contributed. \n"
email_content += f"Please review the changes in the following files in the contribution "
email_content += f"[Pull Request](https://github.com/demisto/content/pull/{pr_number}/files)."
email_subject = f'Changes made to {pack_name} content pack'
message = MIMEText(email_content, 'plain', 'utf-8')
message['bcc'] = reviewers_emails # send mails to all contributors in bcc
message['from'] = EMAIL_FROM
message['subject'] = email_subject
message_to_send = {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
try:
service.users().messages().send(userId=EMAIL_FROM, body=message_to_send).execute()
print(f'Email sent to {reviewers_emails} contributors of pack {pack_name}')
except errors.HttpError as e:
print(f'An error occurred during sending emails to contributors: {str(e)}')
sys.exit(1)
|
2,588 |
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, "uniform", "distance", _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert y_pred_so.shape == y_test.shape
assert len(y_pred_proba_so) == n_output
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert y_pred_mo.shape == y_test.shape
assert_allclose(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert len(y_pred_proba_mo) == n_output
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_allclose(proba_mo, proba_so)
|
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, "uniform", "distance", _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert y_pred_so.shape == y_test.shape
assert len(y_pred_proba_so) == n_output
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert y_pred_mo.shape == y_test.shape
assert_allclose(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert len(y_pred_proba_mo) == n_output
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_equal(proba_mo, proba_so)
|
3,910 |
def test_greedy_modularity_communities_components():
# Test for gh-5530
G = nx.Graph([(0, 1), (2, 3), (4, 5), (5, 6)])
# usual case with 3 components
assert greedy_modularity_communities(G) == [{4, 5, 6}, {0, 1}, {2, 3}]
# best_n can make the algorithm continue even when modulatiry goes down
assert greedy_modularity_communities(G, best_n=3) == [{4, 5, 6}, {0, 1}, {2, 3}]
assert greedy_modularity_communities(G, best_n=2) == [{0, 1, 4, 5, 6}, {2, 3}]
assert greedy_modularity_communities(G, best_n=1) == [{0, 1, 2, 3, 4, 5, 6}]
|
def test_greedy_modularity_communities_components():
# Test for gh-5530
G = nx.Graph([(0, 1), (2, 3), (4, 5), (5, 6)])
# usual case with 3 components
assert greedy_modularity_communities(G) == [{4, 5, 6}, {0, 1}, {2, 3}]
# best_n can make the algorithm continue even when modularity goes down
assert greedy_modularity_communities(G, best_n=3) == [{4, 5, 6}, {0, 1}, {2, 3}]
assert greedy_modularity_communities(G, best_n=2) == [{0, 1, 4, 5, 6}, {2, 3}]
assert greedy_modularity_communities(G, best_n=1) == [{0, 1, 2, 3, 4, 5, 6}]
|
29,261 |
def download_and_unzip_files(
source_url, target_parent_dir, zip_root_name, target_root_name):
"""Downloads a zip file, unzips it, and saves the result in a given dir.
The download occurs only if the target directory that the zip file unzips
to does not exist.
NB: This function assumes that the root level of the zip file has exactly
one folder.
Args:
source_url: str. The URL from which to download the zip file.
target_parent_dir: str. The directory to save the contents of the zip
file to.
zip_root_name: str. The name of the top-level folder in the zip
directory.
target_root_name: str. The name that the top-level folder should be
renamed to in the local directory.
"""
if not os.path.exists(os.path.join(target_parent_dir, target_root_name)):
print('Downloading and unzipping file %s to %s ...' % (
zip_root_name, target_parent_dir))
common.ensure_directory_exists(target_parent_dir)
urlrequest.urlretrieve(source_url, filename=TMP_UNZIP_PATH)
try:
with zipfile.ZipFile(TMP_UNZIP_PATH, 'r') as zfile:
zfile.extractall(path=target_parent_dir)
os.remove(TMP_UNZIP_PATH)
except Exception:
if os.path.exists(TMP_UNZIP_PATH):
os.remove(TMP_UNZIP_PATH)
# Some downloads (like jqueryui-themes) may require a user-agent.
req = urlrequest.Request(source_url, None, {})
req.add_header('User-agent', 'python')
# This is needed to get a seekable filestream that can be used
# by zipfile.ZipFile.
file_stream = io.StringIO(utils.url_open(req).read())
with zipfile.ZipFile(file_stream, 'r') as zfile:
zfile.extractall(path=target_parent_dir)
# Rename the target directory.
os.rename(
os.path.join(target_parent_dir, zip_root_name),
os.path.join(target_parent_dir, target_root_name))
print('Download of %s succeeded.' % zip_root_name)
|
def download_and_unzip_files(
source_url, target_parent_dir, zip_root_name, target_root_name):
"""Downloads a zip file, unzips it, and saves the result in a given dir.
The download occurs only if the target directory that the zip file unzips
to does not exist.
NB: This function assumes that the root level of the zip file has exactly
one folder.
Args:
source_url: str. The URL from which to download the zip file.
target_parent_dir: str. The directory to save the contents of the zip
file to.
zip_root_name: str. The name of the top-level folder in the zip
directory.
target_root_name: str. The name that the top-level folder should be
renamed to in the local directory.
"""
if not os.path.exists(os.path.join(target_parent_dir, target_root_name)):
print('Downloading and unzipping file %s to %s ...' % (
zip_root_name, target_parent_dir))
common.ensure_directory_exists(target_parent_dir)
urlrequest.urlretrieve(source_url, filename=TMP_UNZIP_PATH)
try:
with zipfile.ZipFile(TMP_UNZIP_PATH, 'r') as zfile:
zfile.extractall(path=target_parent_dir)
os.remove(TMP_UNZIP_PATH)
except Exception:
if os.path.exists(TMP_UNZIP_PATH):
os.remove(TMP_UNZIP_PATH)
# Some downloads (like jqueryui-themes) may require a user-agent.
req = urllib.request.Request(source_url, None, {})
req.add_header('User-agent', 'python')
# This is needed to get a seekable filestream that can be used
# by zipfile.ZipFile.
file_stream = io.StringIO(utils.url_open(req).read())
with zipfile.ZipFile(file_stream, 'r') as zfile:
zfile.extractall(path=target_parent_dir)
# Rename the target directory.
os.rename(
os.path.join(target_parent_dir, zip_root_name),
os.path.join(target_parent_dir, target_root_name))
print('Download of %s succeeded.' % zip_root_name)
|
6,832 |
def verify_checksum(param_dict, merchant_key, checksum):
# Remove checksum
if 'CHECKSUMHASH' in param_dict:
param_dict.pop('CHECKSUMHASH')
# Get salt
paytm_hash = __decode__(checksum, IV, merchant_key)
salt = paytm_hash[-4:]
calculated_checksum = generate_checksum(
param_dict, merchant_key, salt=salt)
return calculated_checksum == checksum
|
def verify_checksum(param_dict, merchant_key, checksum):
# Remove checksum
param_dict.pop('CHECKSUMHASH', None)
# Get salt
paytm_hash = __decode__(checksum, IV, merchant_key)
salt = paytm_hash[-4:]
calculated_checksum = generate_checksum(
param_dict, merchant_key, salt=salt)
return calculated_checksum == checksum
|
31,420 |
def analyse_sample_file_request(file_entry, should_wait, internet_access, comments='', systems=''):
data = {
'accept-tac': 1,
'internet-access': 1 if internet_access else 0,
}
if comments != '':
data['comments'] = comments
if systems != '':
data['systems[]'] = [s.strip() for s in systems.split(',')] # type: ignore
# removing backslashes from filename as the API does not like it
# if given filename such as dir\file.xlsx - the sample will end with the name file.xlsx
filename = demisto.getFilePath(file_entry)['name']
if type(filename) == unicode: # py2 way of checking if a var is of type unicode
filename = filename.encode('ascii', 'ignore')
filename.replace('\\', '/')
with open(demisto.getFilePath(file_entry)['path'], 'rb') as f:
res = http_post('v2/analysis/submit', data=data, files={'sample': (filename, f)})
if res == 'nothing_to_analyze':
return nothing_to_analyze_output
if 'errors' in res:
LOG('Error! in command sample file: file_entry=%s' % (file_entry,))
LOG('got the following errors:\n' + '\n'.join(e['message'] for e in res['errors']))
raise Exception('command failed to run.')
shutil.rmtree(demisto.getFilePath(file_entry)['name'], ignore_errors=True)
if should_wait:
return poll_webid(res['data']['webids'][0])
web_id = res['data']['webids'][0]
result = info_request(web_id)
return analysis_to_entry('Analysis #%s' % (web_id,), result['data'])
|
def analyse_sample_file_request(file_entry, should_wait, internet_access, comments='', systems=''):
data = {
'accept-tac': 1,
'internet-access': 1 if internet_access else 0,
}
if comments != '':
data['comments'] = comments
if systems != '':
data['systems[]'] = [s.strip() for s in systems.split(',')] # type: ignore
# removing backslashes from filename as the API does not like it
# if given filename such as dir\file.xlsx - the sample will end with the name file.xlsx
filename = demisto.getFilePath(file_entry)['name']
if isinstance(filename, unicode): # py2 way of checking if a var is of type unicode
filename = filename.encode('ascii', 'ignore')
filename.replace('\\', '/')
with open(demisto.getFilePath(file_entry)['path'], 'rb') as f:
res = http_post('v2/analysis/submit', data=data, files={'sample': (filename, f)})
if res == 'nothing_to_analyze':
return nothing_to_analyze_output
if 'errors' in res:
LOG('Error! in command sample file: file_entry=%s' % (file_entry,))
LOG('got the following errors:\n' + '\n'.join(e['message'] for e in res['errors']))
raise Exception('command failed to run.')
shutil.rmtree(demisto.getFilePath(file_entry)['name'], ignore_errors=True)
if should_wait:
return poll_webid(res['data']['webids'][0])
web_id = res['data']['webids'][0]
result = info_request(web_id)
return analysis_to_entry('Analysis #%s' % (web_id,), result['data'])
|
51,443 |
def split_indexes(
dims_or_levels: Union[Hashable, Sequence[Hashable]],
variables: Mapping[Hashable, Variable],
coord_names: List[Hashable],
level_coords: Mapping[Hashable, Hashable],
drop: bool = False,
) -> Tuple[Dict[Hashable, Variable], List[Hashable]]:
"""Extract (multi-)indexes (levels) as variables.
Not public API. Used in Dataset and DataArray reset_index
methods.
"""
if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence):
dims_or_levels = [dims_or_levels]
dim_levels: DefaultDict[Any, List[Hashable]] = defaultdict(list)
dims = []
for k in dims_or_levels:
if k in level_coords:
dim_levels[level_coords[k]].append(k)
else:
dims.append(k)
vars_to_replace = {}
vars_to_create: Dict[Hashable, Variable] = {}
vars_to_remove = []
for d in dims:
index = variables[d].to_index()
if isinstance(index, pd.MultiIndex):
dim_levels[d] = index.names
else:
vars_to_remove.append(d)
if not drop:
vars_to_create[str(d) + "_"] = Variable(d, index, variables[d].attrs)
for d, levs in dim_levels.items():
index = variables[d].to_index()
if len(levs) == index.nlevels:
vars_to_remove.append(d)
else:
vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))
if not drop:
for lev in levs:
idx = index.get_level_values(lev)
vars_to_create[idx.name] = Variable(d, idx, variables[d].attrs)
new_variables = dict(variables)
for v in set(vars_to_remove):
del new_variables[v]
new_variables.update(vars_to_replace)
new_variables.update(vars_to_create)
new_coord_names = coord_names + [x for x in vars_to_create if x not in coord_names]
new_coord_names = [x for x in new_coord_names if x not in vars_to_remove]
return new_variables, new_coord_names
|
def split_indexes(
dims_or_levels: Union[Hashable, Sequence[Hashable]],
variables: Mapping[Hashable, Variable],
coord_names: List[Hashable],
level_coords: Mapping[Hashable, Hashable],
drop: bool = False,
) -> Tuple[Dict[Hashable, Variable], List[Hashable]]:
"""Extract (multi-)indexes (levels) as variables.
Not public API. Used in Dataset and DataArray reset_index
methods.
"""
if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence):
dims_or_levels = [dims_or_levels]
dim_levels: DefaultDict[Any, List[Hashable]] = defaultdict(list)
dims = []
for k in dims_or_levels:
if k in level_coords:
dim_levels[level_coords[k]].append(k)
else:
dims.append(k)
vars_to_replace = {}
vars_to_create: Dict[Hashable, Variable] = {}
vars_to_remove = []
for d in dims:
index = variables[d].to_index()
if isinstance(index, pd.MultiIndex):
dim_levels[d] = index.names
else:
vars_to_remove.append(d)
if not drop:
vars_to_create[str(d) + "_"] = Variable(d, index, variables[d].attrs)
for d, levs in dim_levels.items():
index = variables[d].to_index()
if len(levs) == index.nlevels:
vars_to_remove.append(d)
else:
vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))
if not drop:
for lev in levs:
idx = index.get_level_values(lev)
vars_to_create[idx.name] = Variable(d, idx, variables[d].attrs)
new_variables = dict(variables)
for v in set(vars_to_remove):
del new_variables[v]
new_variables.update(vars_to_replace)
new_variables.update(vars_to_create)
new_coord_names = [x for x in coord_names if x not in vars_to_remove]
new_coord_names += [
x
for x in vars_to_create
if x not in coord_names and x not in vars_to_remove
]
return new_variables, new_coord_names
|
29,069 |
def flip_bits(data: bytes) -> bytes:
"""
Flip all bits in the given bytes
:param data: The bytes whose bits to flip
:return: Bytes with the bits flipped
"""
return bytes(generate_flipped_bits(data))
|
def flip_bits(data: bytes) -> bytes:
"""
Flip all bits in the given bytes
:param data: The bytes whose bits to flip
:return: Bytes with the bits flipped
"""
return bytes(map(lambda byte: 255 - byte, data))
|
53,320 |
def uniform_nullpoint_find(
x_range=[0, 1],
y_range=[0, 1],
z_range=[0, 1],
func=(lambda x, y, z: [x, y, z]),
precision=[0.05, 0.05, 0.05],
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_range: array_like
A 1 by 2 array containing the range of x-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
y_range: array_like
A 1 by 2 array containing the range of y-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
z_range: array_like
A 1 by 2 array containing the range of z-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
func: <class 'function'>
A function that takes in 3 arguments, respectively representing a x, y, and z
coordinate of a point and returns the vector value for that point in the form
of a 1 by 3 array.
precision: array_like
A 1 by 3 array containing the approximate precision values for each dimension,
in the case where uniform arrays are being used.
The default value is [0.05, 0.05, 0.05].
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
Notes
----------
This method is described by :cite:t:`haynes:2007`.
"""
vspace = _vector_space(
None,
None,
None,
x_range,
y_range,
z_range,
None,
None,
None,
func,
precision,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
def uniform_nullpoint_find(
x_range,
y_range,
z_range,
func: Callable,
precision=[0.05, 0.05, 0.05],
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_range: array_like
A 1 by 2 array containing the range of x-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
y_range: array_like
A 1 by 2 array containing the range of y-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
z_range: array_like
A 1 by 2 array containing the range of z-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
func: <class 'function'>
A function that takes in 3 arguments, respectively representing a x, y, and z
coordinate of a point and returns the vector value for that point in the form
of a 1 by 3 array.
precision: array_like
A 1 by 3 array containing the approximate precision values for each dimension,
in the case where uniform arrays are being used.
The default value is [0.05, 0.05, 0.05].
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
Notes
----------
This method is described by :cite:t:`haynes:2007`.
"""
vspace = _vector_space(
None,
None,
None,
x_range,
y_range,
z_range,
None,
None,
None,
func,
precision,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
33,232 |
def auto_model(layout, scan_length=None, one_vs_rest=False):
"""Create a simple default model for each of the tasks in a BIDSLayout.
Contrasts each trial type against all other trial types and trial types
at the run level and then uses dummy contrasts at each other level
present to aggregate these results up.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
A BIDSLayout instance
scan_length : int
Scan length for loading event variables in cases
where the scan length can not be read from the nifti.
Primarily for testing.
one_vs_rest : bool
Set to True if you would like to autogenerate
contrasts of each trial type against everyother trialtype.
Returns
-------
list
list of model dictionaries for each task
"""
base_name = layout._root.name
tasks = layout.entities['task'].unique()
task_models = []
for task_name in tasks:
# Populate model meta-data
model = OrderedDict()
model["Name"] = "_".join([base_name, task_name])
model["Description"] = ("Autogenerated model for the %s task from %s" %
(task_name, base_name))
model["BIDSModelVersion"]= "1.0.0"
model["Input"] = {"task": [task_name]}
nodes = []
# Make run level block
transformations = OrderedDict(
Transformer='pybids-transforms-v1',
Instructions=[
OrderedDict(
Name='Factor',
Input='trial_type'
)
]
)
run = OrderedDict(Level='Run',
Name='Run',
GroupBy=['run', 'subject'],
Transformations=transformations)
# Get trial types
run_nodes = load_variables(layout, task=task_name, levels=['run'],
scan_length=scan_length)
evs = []
for n in run_nodes.nodes:
evs.extend(n.variables['trial_type'].values.values)
trial_types = np.unique(evs)
trial_type_factors = ["trial_type." + tt for tt in trial_types]
run_model = OrderedDict(Type='glm',
X=trial_type_factors)
# Add HRF
run_model['HRF'] = OrderedDict(
Variables=trial_type_factors,
Model="DoubleGamma",
Parameters=OrderedDict(
PeakDelay=3,
PeakDispersion=6,
UndershootDelay=10,
UndershootDispersion=12,
PeakUndershootRatio=0.2
)
)
run["Model"] = run_model
if one_vs_rest:
# If there are multiple trial types, build contrasts
contrasts = []
for tt in trial_types:
cdict = OrderedDict()
if len(trial_types) > 1:
cdict["Name"] = "run_" + tt + "_vs_others"
else:
cdict["Name"] = "run_" + tt
cdict["ConditionList"] = trial_type_factors
# Calculate weights for contrast
weights = np.ones(len(trial_types))
try:
weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
except ZeroDivisionError:
pass
cdict["Weights"] = list(weights)
cdict["Test"] = "t"
contrasts.append(cdict)
run["Contrasts"] = contrasts
nodes.append(run)
if one_vs_rest:
# if there are multiple sessions, t-test run level contrasts at
# session level
sessions = layout.get_sessions()
if len(sessions) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Session", contrast_names))
subjects = layout.get_subjects()
if len(subjects) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Subject", contrast_names))
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Dataset", contrast_names))
model["Nodes"] = nodes
task_models.append(model)
return task_models
|
def auto_model(layout, scan_length=None, one_vs_rest=False):
"""Create a simple default model for each of the tasks in a BIDSLayout.
Contrasts each trial type against all other trial types and trial types
at the run level and then uses dummy contrasts at each other level
present to aggregate these results up.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
A BIDSLayout instance
scan_length : int
Scan length for loading event variables in cases
where the scan length can not be read from the nifti.
Primarily for testing.
one_vs_rest : bool
Set to True if you would like to autogenerate
contrasts of each trial type against everyother trialtype.
Returns
-------
list
list of model dictionaries for each task
"""
base_name = layout._root.name
tasks = layout.entities['task'].unique()
task_models = []
for task_name in tasks:
# Populate model meta-data
model = OrderedDict()
model["Name"] = "_".join([base_name, task_name])
model["Description"] = ("Autogenerated model for the %s task from %s" %
(task_name, base_name))
model["BIDSModelVersion"]= "1.0.0"
model["Input"] = {"task": [task_name]}
nodes = []
# Make run level block
transformations = OrderedDict(
Transformer='pybids-transforms-v1',
Instructions=[
OrderedDict(
Name='Factor',
Input='trial_type'
)
]
)
run = OrderedDict(Level='Run',
Name='Run',
GroupBy=['run', 'subject'],
Transformations=transformations)
# Get trial types
run_nodes = load_variables(layout, task=task_name, levels=['run'],
scan_length=scan_length)
evs = []
for n in run_nodes.nodes:
evs.extend(n.variables['trial_type'].values.values)
trial_types = np.unique(evs)
trial_type_factors = ["trial_type." + tt for tt in trial_types]
run_model = dict(Type='glm', X=trial_type_factors)
# Add HRF
run_model['HRF'] = OrderedDict(
Variables=trial_type_factors,
Model="DoubleGamma",
Parameters=OrderedDict(
PeakDelay=3,
PeakDispersion=6,
UndershootDelay=10,
UndershootDispersion=12,
PeakUndershootRatio=0.2
)
)
run["Model"] = run_model
if one_vs_rest:
# If there are multiple trial types, build contrasts
contrasts = []
for tt in trial_types:
cdict = OrderedDict()
if len(trial_types) > 1:
cdict["Name"] = "run_" + tt + "_vs_others"
else:
cdict["Name"] = "run_" + tt
cdict["ConditionList"] = trial_type_factors
# Calculate weights for contrast
weights = np.ones(len(trial_types))
try:
weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
except ZeroDivisionError:
pass
cdict["Weights"] = list(weights)
cdict["Test"] = "t"
contrasts.append(cdict)
run["Contrasts"] = contrasts
nodes.append(run)
if one_vs_rest:
# if there are multiple sessions, t-test run level contrasts at
# session level
sessions = layout.get_sessions()
if len(sessions) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Session", contrast_names))
subjects = layout.get_subjects()
if len(subjects) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Subject", contrast_names))
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
nodes.append(_make_passthrough_contrast(
"Dataset", contrast_names))
model["Nodes"] = nodes
task_models.append(model)
return task_models
|
12,248 |
def get_parser() -> ArgumentParser:
parser = ArgumentParser(
usage="%(prog)s [options] [connection string]",
description=(
"htop like application for PostgreSQL " "server activity monitoring."
),
epilog=(
"The connection string can be in the form of a list of "
"Key/Value parameters or an URI as described in the PostgreSQL documentation. "
"The parsing is delegated to the libpq: different versions of the client library "
"may support different formats or parameters (for example, connection URIs are "
"only supported from libpq 9.2)"
),
add_help=False,
)
# Connection string
parser.add_argument(
"connection_string",
help="A valid connection string to the database, e.g.: "
"'host=HOSTNAME port=PORT user=USER dbname=DBNAME'.",
nargs="?",
)
# -U / --username
parser.add_argument(
"-U",
"--username",
dest="username",
help="Database user name",
metavar="USERNAME",
)
# -p / --port
parser.add_argument(
"-p",
"--port",
dest="port",
help="Database server port",
metavar="PORT",
)
# -h / --host
parser.add_argument(
"-h",
"--host",
dest="host",
help="Database server host or socket directory",
metavar="HOSTNAME",
)
# -d / --dbname
parser.add_argument(
"-d",
"--dbname",
dest="dbname",
help="Database name to connect to",
metavar="DBNAME",
)
# --blocksize
parser.add_argument(
"--blocksize",
dest="blocksize",
help="Filesystem blocksize (default: %(default)s)",
metavar="BLOCKSIZE",
type=int,
default=4096,
)
# --rds
parser.add_argument(
"--rds",
dest="rds",
action="store_true",
help="Enable support for AWS RDS",
default=False,
)
# --output
parser.add_argument(
"--output",
dest="output",
help="Store running queries as CSV",
metavar="FILEPATH",
default=None,
)
# --no-db-size
parser.add_argument(
"--no-db-size",
dest="nodbsize",
action="store_true",
help="Skip total size of DB",
default=False,
)
# --wrap-query
parser.add_argument(
"-w",
"--wrap-query",
dest="wrap_query",
action="store_true",
help="Wrap query column instead of truncating",
default=False,
)
# --duration-mode
parser.add_argument(
"--duration-mode",
dest="durationmode",
help="Duration mode. Values: 1-QUERY(default), 2-TRANSACTION, 3-BACKEND",
metavar="DURATION_MODE",
choices=["1", "2", "3"],
default="1",
)
# --min-duration
parser.add_argument(
"--min-duration",
dest="minduration",
help="Don't display queries with smaller than specified duration (in seconds)",
metavar="SECONDS",
type=float,
default=0,
)
# --filter
parser.add_argument(
"--filter",
dest="filters",
help=(
"Filter activities with a (case insensitive) regular expression applied on selected fields. "
"Known fields are: dbname."
),
action="append",
metavar="FIELD:REGEX",
default=[],
)
# --version
parser.add_argument(
"--version",
help="show program's version number and exit",
action="version",
version=f"%(prog)s {__version__}",
)
# --help
parser.add_argument(
"--help",
dest="help",
action="store_true",
help="Show this help message and exit",
default=False,
)
group = parser.add_argument_group(
"Display Options", "you can exclude some columns by using them."
)
# --no-pid
group.add_argument(
"--no-pid",
dest="nopid",
action="store_true",
help="Disable PID.",
default=False,
)
# --no-database
group.add_argument(
"--no-database",
dest="nodb",
action="store_true",
help="Disable DATABASE",
default=False,
)
# --no-user
group.add_argument(
"--no-user",
dest="nouser",
action="store_true",
help="Disable USER",
default=False,
)
# --no-client
group.add_argument(
"--no-client",
dest="noclient",
action="store_true",
help="Disable CLIENT",
default=False,
)
# --no-cpu
group.add_argument(
"--no-cpu",
dest="nocpu",
action="store_true",
help="Disable CPU%%",
default=False,
)
# --no-mem
group.add_argument(
"--no-mem",
dest="nomem",
action="store_true",
help="Disable MEM%%",
default=False,
)
# --no-read
group.add_argument(
"--no-read",
dest="noread",
action="store_true",
help="Disable READ/s",
default=False,
)
# --no-write
group.add_argument(
"--no-write",
dest="nowrite",
action="store_true",
help="Disable WRITE/s",
default=False,
)
# --no-time
group.add_argument(
"--no-time",
dest="notime",
action="store_true",
help="Disable TIME+",
default=False,
)
# --no-wait
group.add_argument(
"--no-wait", dest="nowait", action="store_true", help="Disable W", default=False
)
# --no-app-name
group.add_argument(
"--no-app-name",
dest="noappname",
action="store_true",
help="Disable App",
default=False,
)
# --hide-queries-in-logs
group.add_argument(
"--hide-queries-in-logs",
dest="hide_queries_in_logs",
action="store_true",
help="Disable log_min_duration_statements and log_min_duration_sample for pg_activity",
default=False,
)
# --no-inst-info
group.add_argument(
"--no-inst-info",
dest="show_instance_info_in_header",
action="store_false",
help="Display instance information in header",
default=True,
)
# --no-sys-info
group.add_argument(
"--no-sys-info",
dest="show_system_info_in_header",
action="store_false",
help="Display system information in header",
default=True,
)
# --no-proc-info
group.add_argument(
"--no-proc-info",
dest="show_worker_info_in_header",
action="store_false",
help="Display workers process information in header",
default=True,
)
return parser
|
def get_parser() -> ArgumentParser:
parser = ArgumentParser(
usage="%(prog)s [options] [connection string]",
description=(
"htop like application for PostgreSQL server activity monitoring."
),
epilog=(
"The connection string can be in the form of a list of "
"Key/Value parameters or an URI as described in the PostgreSQL documentation. "
"The parsing is delegated to the libpq: different versions of the client library "
"may support different formats or parameters (for example, connection URIs are "
"only supported from libpq 9.2)"
),
add_help=False,
)
# Connection string
parser.add_argument(
"connection_string",
help="A valid connection string to the database, e.g.: "
"'host=HOSTNAME port=PORT user=USER dbname=DBNAME'.",
nargs="?",
)
# -U / --username
parser.add_argument(
"-U",
"--username",
dest="username",
help="Database user name",
metavar="USERNAME",
)
# -p / --port
parser.add_argument(
"-p",
"--port",
dest="port",
help="Database server port",
metavar="PORT",
)
# -h / --host
parser.add_argument(
"-h",
"--host",
dest="host",
help="Database server host or socket directory",
metavar="HOSTNAME",
)
# -d / --dbname
parser.add_argument(
"-d",
"--dbname",
dest="dbname",
help="Database name to connect to",
metavar="DBNAME",
)
# --blocksize
parser.add_argument(
"--blocksize",
dest="blocksize",
help="Filesystem blocksize (default: %(default)s)",
metavar="BLOCKSIZE",
type=int,
default=4096,
)
# --rds
parser.add_argument(
"--rds",
dest="rds",
action="store_true",
help="Enable support for AWS RDS",
default=False,
)
# --output
parser.add_argument(
"--output",
dest="output",
help="Store running queries as CSV",
metavar="FILEPATH",
default=None,
)
# --no-db-size
parser.add_argument(
"--no-db-size",
dest="nodbsize",
action="store_true",
help="Skip total size of DB",
default=False,
)
# --wrap-query
parser.add_argument(
"-w",
"--wrap-query",
dest="wrap_query",
action="store_true",
help="Wrap query column instead of truncating",
default=False,
)
# --duration-mode
parser.add_argument(
"--duration-mode",
dest="durationmode",
help="Duration mode. Values: 1-QUERY(default), 2-TRANSACTION, 3-BACKEND",
metavar="DURATION_MODE",
choices=["1", "2", "3"],
default="1",
)
# --min-duration
parser.add_argument(
"--min-duration",
dest="minduration",
help="Don't display queries with smaller than specified duration (in seconds)",
metavar="SECONDS",
type=float,
default=0,
)
# --filter
parser.add_argument(
"--filter",
dest="filters",
help=(
"Filter activities with a (case insensitive) regular expression applied on selected fields. "
"Known fields are: dbname."
),
action="append",
metavar="FIELD:REGEX",
default=[],
)
# --version
parser.add_argument(
"--version",
help="show program's version number and exit",
action="version",
version=f"%(prog)s {__version__}",
)
# --help
parser.add_argument(
"--help",
dest="help",
action="store_true",
help="Show this help message and exit",
default=False,
)
group = parser.add_argument_group(
"Display Options", "you can exclude some columns by using them."
)
# --no-pid
group.add_argument(
"--no-pid",
dest="nopid",
action="store_true",
help="Disable PID.",
default=False,
)
# --no-database
group.add_argument(
"--no-database",
dest="nodb",
action="store_true",
help="Disable DATABASE",
default=False,
)
# --no-user
group.add_argument(
"--no-user",
dest="nouser",
action="store_true",
help="Disable USER",
default=False,
)
# --no-client
group.add_argument(
"--no-client",
dest="noclient",
action="store_true",
help="Disable CLIENT",
default=False,
)
# --no-cpu
group.add_argument(
"--no-cpu",
dest="nocpu",
action="store_true",
help="Disable CPU%%",
default=False,
)
# --no-mem
group.add_argument(
"--no-mem",
dest="nomem",
action="store_true",
help="Disable MEM%%",
default=False,
)
# --no-read
group.add_argument(
"--no-read",
dest="noread",
action="store_true",
help="Disable READ/s",
default=False,
)
# --no-write
group.add_argument(
"--no-write",
dest="nowrite",
action="store_true",
help="Disable WRITE/s",
default=False,
)
# --no-time
group.add_argument(
"--no-time",
dest="notime",
action="store_true",
help="Disable TIME+",
default=False,
)
# --no-wait
group.add_argument(
"--no-wait", dest="nowait", action="store_true", help="Disable W", default=False
)
# --no-app-name
group.add_argument(
"--no-app-name",
dest="noappname",
action="store_true",
help="Disable App",
default=False,
)
# --hide-queries-in-logs
group.add_argument(
"--hide-queries-in-logs",
dest="hide_queries_in_logs",
action="store_true",
help="Disable log_min_duration_statements and log_min_duration_sample for pg_activity",
default=False,
)
# --no-inst-info
group.add_argument(
"--no-inst-info",
dest="show_instance_info_in_header",
action="store_false",
help="Display instance information in header",
default=True,
)
# --no-sys-info
group.add_argument(
"--no-sys-info",
dest="show_system_info_in_header",
action="store_false",
help="Display system information in header",
default=True,
)
# --no-proc-info
group.add_argument(
"--no-proc-info",
dest="show_worker_info_in_header",
action="store_false",
help="Display workers process information in header",
default=True,
)
return parser
|
33,497 |
def _call_safe(func: Callable, args: Tuple, exception_message: str):
"""
Call the given function with the given arguments, and if it fails, log the given exception_message.
If loggin.DEBUG is set for the logger, then we also log the traceback.
:param func: function to call
:param args: arguments to pass
:param exception_message: message to log on exception
:return: whatever the func returns
"""
try:
return func(*args)
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception(exception_message)
else:
LOG.error("%s: %s", exception_message, e)
|
def _call_safe(func: Callable, args: Tuple, exception_message: str):
"""
Call the given function with the given arguments, and if it fails, log the given exception_message.
If logging.DEBUG is set for the logger, then we also log the traceback.
:param func: function to call
:param args: arguments to pass
:param exception_message: message to log on exception
:return: whatever the func returns
"""
try:
return func(*args)
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception(exception_message)
else:
LOG.error("%s: %s", exception_message, e)
|
26,439 |
def setup_matchedfltr_dax_generated_multi(workflow, science_segs, datafind_outs,
tmplt_banks, output_dir,
injection_file=None,
tags=None):
'''
Setup matched-filter jobs that are generated as part of the workflow in
which a single job reads in and generates triggers over multiple ifos.
This
module can support any matched-filter code that is similar in principle to
pycbc_multi_inspiral or lalapps_coh_PTF_inspiral, but for new codes some
additions are needed to define Executable and Job sub-classes
(see jobutils.py).
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the coincidence jobs will be added to.
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
datafind_outs : pycbc.workflow.core.FileList
An FileList of the datafind files that are needed to obtain the
data used in the analysis.
tmplt_banks : pycbc.workflow.core.FileList
An FileList of the template bank files that will serve as input
in this stage.
output_dir : path
The directory in which output will be stored.
injection_file : pycbc.workflow.core.File, optional (default=None)
If given the file containing the simulation file to be sent to these
jobs on the command line. If not given no file will be sent.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. An example might be ['BNSINJECTIONS'] or
['NOINJECTIONANALYSIS']. This will be used in output names.
Returns
-------
inspiral_outs : pycbc.workflow.core.FileList
A list of output files written by this stage. This *will not* contain
any intermediate products produced within this stage of the workflow.
If you require access to any intermediate products produced at this
stage you can call the various sub-functions directly.
'''
if tags is None:
tags = []
# Need to get the exe to figure out what sections are analysed, what is
# discarded etc. This should *not* be hardcoded, so using a new executable
# will require a bit of effort here ....
cp = workflow.cp
ifos = sorted(science_segs.keys())
match_fltr_exe = os.path.basename(cp.get('executables','inspiral'))
# List for holding the output
inspiral_outs = FileList([])
logging.info("Setting up matched-filtering for %s." %(' '.join(ifos),))
if match_fltr_exe == 'pycbc_multi_inspiral':
exe_class = select_matchedfilter_class(match_fltr_exe)
cp.set('inspiral', 'ra',\
str(radians(float(cp.get('workflow', 'ra')))))
cp.set('inspiral', 'dec',\
str(radians(float(cp.get('workflow', 'dec')))))
# At the moment we aren't using sky grids, but when we do this code
# might be used then.
# from pycbc.workflow.grb_utils import get_sky_grid_scale
# if cp.has_option("jitter_skyloc", "apply-fermi-error"):
# cp.set('inspiral', 'sky-error',
# str(get_sky_grid_scale(float(cp.get('workflow',
# 'sky-error')))))
# else:
# cp.set('inspiral', 'sky-error',
# str(get_sky_grid_scale(float(cp.get('workflow',
# 'sky-error')),
# sigma_sys=0.0)))
# cp.set('inspiral', 'trigger-time',\
# cp.get('workflow', 'trigger-time'))
# cp.set('inspiral', 'block-duration',
# str(abs(science_segs[ifos[0]][0]) - \
# 2 * int(cp.get('inspiral', 'pad-data'))))
job_instance = exe_class(workflow.cp, 'inspiral', ifo=ifos,
out_dir=output_dir,
injection_file=injection_file,
tags=tags)
if cp.has_option("workflow", "do-long-slides") and "slide" in tags[-1]:
slide_num = int(tags[-1].replace("slide", ""))
logging.info("Setting up matched-filtering for slide {}"
.format(slide_num))
slide_shift = int(cp.get("inspiral", "segment-length"))
time_slide_dict = {ifo: (slide_num + 1) * ix * slide_shift
for ix, ifo in enumerate(ifos)}
multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance,
science_segs, datafind_outs,
output_dir, parents=tmplt_banks,
slide_dict=time_slide_dict)
else:
multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance,
science_segs, datafind_outs,
output_dir, parents=tmplt_banks)
else:
# Select the appropriate class
raise ValueError("Not currently supported.")
return inspiral_outs
|
def setup_matchedfltr_dax_generated_multi(workflow, science_segs, datafind_outs,
tmplt_banks, output_dir,
injection_file=None,
tags=None):
'''
Setup matched-filter jobs that are generated as part of the workflow in
which a single job reads in and generates triggers over multiple ifos.
This
module can support any matched-filter code that is similar in principle to
pycbc_multi_inspiral or lalapps_coh_PTF_inspiral, but for new codes some
additions are needed to define Executable and Job sub-classes
(see jobutils.py).
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the coincidence jobs will be added to.
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
datafind_outs : pycbc.workflow.core.FileList
An FileList of the datafind files that are needed to obtain the
data used in the analysis.
tmplt_banks : pycbc.workflow.core.FileList
An FileList of the template bank files that will serve as input
in this stage.
output_dir : path
The directory in which output will be stored.
injection_file : pycbc.workflow.core.File, optional (default=None)
If given the file containing the simulation file to be sent to these
jobs on the command line. If not given no file will be sent.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. An example might be ['BNSINJECTIONS'] or
['NOINJECTIONANALYSIS']. This will be used in output names.
Returns
-------
inspiral_outs : pycbc.workflow.core.FileList
A list of output files written by this stage. This *will not* contain
any intermediate products produced within this stage of the workflow.
If you require access to any intermediate products produced at this
stage you can call the various sub-functions directly.
'''
if tags is None:
tags = []
# Need to get the exe to figure out what sections are analysed, what is
# discarded etc. This should *not* be hardcoded, so using a new executable
# will require a bit of effort here ....
cp = workflow.cp
ifos = sorted(science_segs.keys())
match_fltr_exe = os.path.basename(cp.get('executables','inspiral'))
# List for holding the output
inspiral_outs = FileList([])
logging.info("Setting up matched-filtering for %s." %(' '.join(ifos),))
if match_fltr_exe == 'pycbc_multi_inspiral':
exe_class = select_matchedfilter_class(match_fltr_exe)
cp.set('inspiral', 'ra',
str(radians(float(cp.get('workflow', 'ra')))))
cp.set('inspiral', 'dec',\
str(radians(float(cp.get('workflow', 'dec')))))
# At the moment we aren't using sky grids, but when we do this code
# might be used then.
# from pycbc.workflow.grb_utils import get_sky_grid_scale
# if cp.has_option("jitter_skyloc", "apply-fermi-error"):
# cp.set('inspiral', 'sky-error',
# str(get_sky_grid_scale(float(cp.get('workflow',
# 'sky-error')))))
# else:
# cp.set('inspiral', 'sky-error',
# str(get_sky_grid_scale(float(cp.get('workflow',
# 'sky-error')),
# sigma_sys=0.0)))
# cp.set('inspiral', 'trigger-time',\
# cp.get('workflow', 'trigger-time'))
# cp.set('inspiral', 'block-duration',
# str(abs(science_segs[ifos[0]][0]) - \
# 2 * int(cp.get('inspiral', 'pad-data'))))
job_instance = exe_class(workflow.cp, 'inspiral', ifo=ifos,
out_dir=output_dir,
injection_file=injection_file,
tags=tags)
if cp.has_option("workflow", "do-long-slides") and "slide" in tags[-1]:
slide_num = int(tags[-1].replace("slide", ""))
logging.info("Setting up matched-filtering for slide {}"
.format(slide_num))
slide_shift = int(cp.get("inspiral", "segment-length"))
time_slide_dict = {ifo: (slide_num + 1) * ix * slide_shift
for ix, ifo in enumerate(ifos)}
multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance,
science_segs, datafind_outs,
output_dir, parents=tmplt_banks,
slide_dict=time_slide_dict)
else:
multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance,
science_segs, datafind_outs,
output_dir, parents=tmplt_banks)
else:
# Select the appropriate class
raise ValueError("Not currently supported.")
return inspiral_outs
|
35,153 |
def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
|
def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
return b[:n]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
|
46,448 |
def concatenate(series: Sequence['TimeSeries'],
axis: Union[str, int] = 0,
ignore_time_axes: bool = False):
"""Concatenates multiple ``TimeSeries`` along a given axis.
``axis`` can be an integer in (0, 1, 2) to denote (time, component, sample) or, alternatively,
a string denoting the corresponding dimension of the underlying ``DataArray``.
Parameters
----------
series : Sequence[TimeSeries]
sequence of ``TimeSeries`` to concatenate
axis : Union[str, int]
axis along which the series will be concatenated.
ignore_time_axes : bool
Allow concatenation even when some series do not have matching time axes.
When done along component or sample dimensions, concatenation will work as long as the series
have the same lengths (in this case the resulting series will have the time axis of the first
provided series). When done along time dimension, concatenation will work even if the time axes
are not contiguous (in this case, the resulting series will have a start time matching the start time
of the first provided series). Default: False.
Returns
-------
TimeSeries
concatenated series
"""
time_dims = [ts.time_dim for ts in series]
if isinstance(axis, str):
if axis == DIMS[1]:
axis = 1
elif axis == DIMS[2]:
axis = 2
else:
raise_if_not(len(set(time_dims)) == 1 and axis == time_dims[0],
'Unrecognised `axis` name. If `axis` denotes the time axis, all provided '
'series must have the same time axis name (if that is not the case, try providing '
'`axis=0` to concatenate along time dimension).')
axis = 0
time_dim_name = time_dims[0] # At this point all series are supposed to have same time dim name
da_sequence = [ts.data_array(copy=False) for ts in series]
component_axis_equal = len(set([ts.width for ts in series])) == 1
sample_axis_equal = len(set([ts.n_samples for ts in series])) == 1
if axis == 0:
# time
raise_if((axis == 0 and not (component_axis_equal and sample_axis_equal)),
'when concatenating along time dimension, the component and sample dimensions of all '
'provided series must match.')
da_concat = xr.concat(da_sequence, dim=time_dim_name)
# check, if timeseries are consecutive
consecutive_time_axes = True
for i in range(1, len(series)):
if series[i - 1].end_time() + series[0].freq != \
series[i].start_time():
consecutive_time_axes = False
break
if not consecutive_time_axes:
raise_if_not(ignore_time_axes, "When concatenating over time axis, all series need to be contiguous"
"in the time dimension. Use `ignore_time_axis=True` to override "
"this behavior and concatenate the series by extending the time axis "
"of the first series.")
if series[0].has_datetime_index:
tindex = pd.date_range(series[0].start_time(),
freq=series[0].freq,
periods=da_concat.shape[0])
else:
tindex = pd.RangeIndex(start=series[0].start_time(),
stop=series[0].start_time() + da_concat.shape[0],
step=1)
da_concat = da_concat.assign_coords({time_dim_name: tindex})
else:
time_axes_equal = all(list(map(lambda t: t[0].has_same_time_as(t[1]), zip(series[0:-1], series[1:]))))
time_axes_ok = (time_axes_equal if not ignore_time_axes else len(set([len(ts) for ts in series])) == 1)
raise_if_not((time_axes_ok and (axis == 1 and sample_axis_equal) or (axis == 2 and component_axis_equal)),
'When concatenating along component or sample dimensions, all the series must have the same time '
'axes (unless `ignore_time_axes` is True), or time axes of same lengths (if `ignore_time_axes` is '
'True), and all series must have the same number of samples (if concatenating along component '
'dimension), or the same number of components (if concatenating along sample dimension).')
# we concatenate raw values using Numpy because not all series might have the same time axes
# and joining using xarray.concatenate() won't work in some cases
concat_vals = np.concatenate([da.values for da in da_sequence], axis=axis)
if axis == 1:
# when concatenating along component dimension, we have to re-create a component index
component_coords = []
existing_components = set()
for i, ts in enumerate(series):
for comp in ts.components:
if comp not in existing_components:
component_coords.append(comp)
existing_components.add(comp)
else:
new_comp_name = '{}_{}'.format(i, comp)
component_coords.append(new_comp_name)
existing_components.add(new_comp_name)
component_index = pd.Index(component_coords)
else:
component_index = da_sequence[0].get_index(DIMS[1])
da_concat = xr.DataArray(concat_vals,
dims=(time_dim_name,) + DIMS[-2:],
coords={time_dim_name: series[0].time_index, DIMS[1]: component_index})
return TimeSeries(da_concat)
|
def concatenate(series: Sequence['TimeSeries'],
axis: Union[str, int] = 0,
ignore_time_axes: bool = False):
"""Concatenates multiple ``TimeSeries`` along a given axis.
``axis`` can be an integer in (0, 1, 2) to denote (time, component, sample) or, alternatively,
a string denoting the corresponding dimension of the underlying ``DataArray``.
Parameters
----------
series : Sequence[TimeSeries]
sequence of ``TimeSeries`` to concatenate
axis : Union[str, int]
axis along which the series will be concatenated.
ignore_time_axes : bool
Allow concatenation even when some series do not have matching time axes.
When done along component or sample dimensions, concatenation will work as long as the series
have the same lengths (in this case the resulting series will have the time axis of the first
provided series). When done along time dimension, concatenation will work even if the time axes
are not contiguous (in this case, the resulting series will have a start time matching the start time
of the first provided series). Default: False.
Returns
-------
TimeSeries
concatenated series
"""
time_dims = [ts.time_dim for ts in series]
if isinstance(axis, str):
if axis == DIMS[1]:
axis = 1
elif axis == DIMS[2]:
axis = 2
else:
raise_if_not(len(set(time_dims)) == 1 and axis == time_dims[0],
'Unrecognised `axis` name. If `axis` denotes the time axis, all provided '
'series must have the same time axis name (if that is not the case, try providing '
'`axis=0` to concatenate along time dimension).')
axis = 0
time_dim_name = time_dims[0] # At this point all series are supposed to have same time dim name
da_sequence = [ts.data_array(copy=False) for ts in series]
component_axis_equal = len(set([ts.width for ts in series])) == 1
sample_axis_equal = len(set([ts.n_samples for ts in series])) == 1
if axis == 0:
# time
raise_if((axis == 0 and not (component_axis_equal and sample_axis_equal)),
'when concatenating along time dimension, the component and sample dimensions of all '
'provided series must match.')
da_concat = xr.concat(da_sequence, dim=time_dim_name)
# check, if timeseries are consecutive
consecutive_time_axes = True
for i in range(1, len(series)):
if series[i - 1].end_time() + series[0].freq != series[i].start_time():
consecutive_time_axes = False
break
if not consecutive_time_axes:
raise_if_not(ignore_time_axes, "When concatenating over time axis, all series need to be contiguous"
"in the time dimension. Use `ignore_time_axis=True` to override "
"this behavior and concatenate the series by extending the time axis "
"of the first series.")
if series[0].has_datetime_index:
tindex = pd.date_range(series[0].start_time(),
freq=series[0].freq,
periods=da_concat.shape[0])
else:
tindex = pd.RangeIndex(start=series[0].start_time(),
stop=series[0].start_time() + da_concat.shape[0],
step=1)
da_concat = da_concat.assign_coords({time_dim_name: tindex})
else:
time_axes_equal = all(list(map(lambda t: t[0].has_same_time_as(t[1]), zip(series[0:-1], series[1:]))))
time_axes_ok = (time_axes_equal if not ignore_time_axes else len(set([len(ts) for ts in series])) == 1)
raise_if_not((time_axes_ok and (axis == 1 and sample_axis_equal) or (axis == 2 and component_axis_equal)),
'When concatenating along component or sample dimensions, all the series must have the same time '
'axes (unless `ignore_time_axes` is True), or time axes of same lengths (if `ignore_time_axes` is '
'True), and all series must have the same number of samples (if concatenating along component '
'dimension), or the same number of components (if concatenating along sample dimension).')
# we concatenate raw values using Numpy because not all series might have the same time axes
# and joining using xarray.concatenate() won't work in some cases
concat_vals = np.concatenate([da.values for da in da_sequence], axis=axis)
if axis == 1:
# when concatenating along component dimension, we have to re-create a component index
component_coords = []
existing_components = set()
for i, ts in enumerate(series):
for comp in ts.components:
if comp not in existing_components:
component_coords.append(comp)
existing_components.add(comp)
else:
new_comp_name = '{}_{}'.format(i, comp)
component_coords.append(new_comp_name)
existing_components.add(new_comp_name)
component_index = pd.Index(component_coords)
else:
component_index = da_sequence[0].get_index(DIMS[1])
da_concat = xr.DataArray(concat_vals,
dims=(time_dim_name,) + DIMS[-2:],
coords={time_dim_name: series[0].time_index, DIMS[1]: component_index})
return TimeSeries(da_concat)
|
1,372 |
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Generally, ``y`` is in a multilabel format
if it has the following three properties
1. It has exactly two dimensions
2. Its second dimension has least 2 elements
3. Its data type is either bool, int or unsign int
Parameters
----------
y : an array-like object of target values. y can be a sparse matrix too.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
>>> is_multilabel(None)
False
>>> is_multilabel([])
False
>>> is_multilabel([[1, 2], [1, 1]])
True
>>> is_multilabel(np.array([[1, 2], [3, 1]]))
False
"""
if issparse(y):
if not (y.ndim == 2 and y.shape[1] > 1):
return False
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in ('b', 'i', 'u') or # bool, int, uint
_is_integral_float(np.unique(y.data))))
y = np.asarray(y)
if y.ndim != 2 or y.shape[1] < 2:
return False
labels = np.unique(y)
return len(labels) < 3 and \
(y.dtype.kind in ('b', 'i', 'u') or _is_integral_float(labels))
|
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Generally, ``y`` is in a multilabel format
if it has the following three properties
1. It has exactly two dimensions
2. Its second dimension has least 2 elements
3. Its data type is either bool, int or unsigned int
Parameters
----------
y : an array-like object of target values. y can be a sparse matrix too.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
>>> is_multilabel(None)
False
>>> is_multilabel([])
False
>>> is_multilabel([[1, 2], [1, 1]])
True
>>> is_multilabel(np.array([[1, 2], [3, 1]]))
False
"""
if issparse(y):
if not (y.ndim == 2 and y.shape[1] > 1):
return False
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in ('b', 'i', 'u') or # bool, int, uint
_is_integral_float(np.unique(y.data))))
y = np.asarray(y)
if y.ndim != 2 or y.shape[1] < 2:
return False
labels = np.unique(y)
return len(labels) < 3 and \
(y.dtype.kind in ('b', 'i', 'u') or _is_integral_float(labels))
|
3,915 |
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem
[havel1955]_, [hakimi1962]_, [CL1996]_.
Worst-case run time is $O(s)$ where $s$ is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
>>> sequence = (v for d, v in G.degree())
>>> nx.is_valid_degree_sequence_havel_hakimi(sequence)
True
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
.. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs"
Casopis Pest. Mat. 80, 477-480, 1955.
.. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as
Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962.
.. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs",
Chapman and Hall/CRC, 1996.
"""
try:
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
return True
modstubs = [0] * (dmax + 1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n - 1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax] - 1, n - 1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k] - 1, n - 1
if k > 1:
modstubs[mslen] = k - 1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub] + 1, n + 1
return True
|
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem
[havel1955]_, [hakimi1962]_, [CL1996]_.
Worst-case run time is $O(s)$ where $s$ is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Examples
--------
>>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
>>> sequence = (v for d, v in G.degree())
>>> nx.is_valid_degree_sequence_havel_hakimi(sequence)
True
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
.. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs"
Casopis Pest. Mat. 80, 477-480, 1955.
.. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as
Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962.
.. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs",
Chapman and Hall/CRC, 1996.
"""
try:
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
return True
modstubs = [0] * (dmax + 1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n - 1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax] - 1, n - 1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k] - 1, n - 1
if k > 1:
modstubs[mslen] = k - 1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub] + 1, n + 1
return True
|
46,012 |
def add_snow(image: torch.Tensor, snow_coef: torch.Tensor, brightness_coef: torch.Tensor) -> torch.Tensor:
"""Add snow to the image.
Snow is added in the form of bleach of some pixels.
"""
KORNIA_CHECK_IS_TENSOR(image)
KORNIA_CHECK_IS_COLOR(image, f"with shape {image.shape}")
snow_coef *= 0.5 # = 255 / 2
snow_coef += 0.33 # = 255 / 3
hls = rgb_to_hls(image)
hls[:, 1, :, :][hls[:, 1, :, :] < snow_coef] = hls[:, 1, :, :][hls[:, 1, :, :] < snow_coef] * brightness_coef
hls[:, 1, :, :][hls[:, 1, :, :] > 1] = 1
rgb = hls_to_rgb(hls)
return rgb
|
def add_snow(image: Tensor, snow_coef: Tensor, brightness_coef: Tensor) -> Tensor:
"""Add snow to the image.
Snow is added in the form of bleach of some pixels.
"""
KORNIA_CHECK_IS_TENSOR(image)
KORNIA_CHECK_IS_COLOR(image, f"with shape {image.shape}")
snow_coef *= 0.5 # = 255 / 2
snow_coef += 0.33 # = 255 / 3
hls = rgb_to_hls(image)
hls[:, 1, :, :][hls[:, 1, :, :] < snow_coef] = hls[:, 1, :, :][hls[:, 1, :, :] < snow_coef] * brightness_coef
hls[:, 1, :, :][hls[:, 1, :, :] > 1] = 1
rgb = hls_to_rgb(hls)
return rgb
|
49,939 |
def is_screen_black(frame: Optional[Frame] = None,
mask: MaskTypes = Region.ALL,
threshold: int = None,
region: Region = Region.ALL) -> "_IsScreenBlackResult":
"""Check for the presence of a black screen in a video frame.
:param Frame frame:
If this is specified it is used as the video frame to check; otherwise a
new frame is grabbed from the device-under-test. This is an image in
OpenCV format (for example as returned by `frames` and `get_frame`).
:param str|numpy.ndarray|Mask|Region mask:
A `Region` or a mask that specifies which parts of the image to
analyse. This accepts anything that can be converted to a Mask using
`stbt.load_mask`. See :ref:`Masks`.
:param int threshold:
Even when a video frame appears to be black, the intensity of its pixels
is not always 0. To differentiate almost-black from non-black pixels, a
binary threshold is applied to the frame. The ``threshold`` value is in
the range 0 (black) to 255 (white). The global default (20) can be
changed by setting ``threshold`` in the ``[is_screen_black]`` section of
:ref:`.stbt.conf`.
:returns:
An object that will evaluate to true if the frame was black, or false
if not black. The object has the following attributes:
* **black** (*bool*) – True if the frame was black.
* **frame** (`stbt.Frame`) – The video frame that was analysed.
Changed in v33: ``mask`` accepts anything that can be converted to a Mask
using `load_mask`. The ``region`` parameter is deprecated; pass your
`Region` to ``mask`` instead. You can't specify ``mask`` and ``region``
at the same time.
"""
if threshold is None:
threshold = get_config('is_screen_black', 'threshold', type_=int)
if frame is None:
from stbt_core import get_frame
frame = get_frame()
mask, region, frame_region = _validate_mask(frame, mask, region)
imglog = ImageLogger("is_screen_black", mask=mask, threshold=threshold)
imglog.imwrite("source", frame)
greyframe = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
if mask != Region.ALL:
mask_ = mask.to_array(frame_region)
imglog.imwrite("mask", mask_)
cv2.bitwise_and(greyframe, crop(mask_, region), dst=greyframe)
maxVal = greyframe.max()
result = _IsScreenBlackResult(bool(maxVal <= threshold), frame)
debug("is_screen_black: {found} black screen using mask={mask}, "
"threshold={threshold}: {result}, maximum_intensity={maxVal}".format(
found="Found" if result.black else "Didn't find",
mask=mask,
threshold=threshold,
result=result,
maxVal=maxVal))
if imglog.enabled:
imglog.imwrite("grey", greyframe)
_, thresholded = cv2.threshold(greyframe, threshold, 255,
cv2.THRESH_BINARY)
imglog.imwrite("non_black", thresholded)
imglog.set(maxVal=maxVal,
non_black_region=pixel_bounding_box(thresholded))
_log_image_debug(imglog, result)
return result
|
def is_screen_black(frame: Optional[Frame] = None,
mask: MaskTypes = Region.ALL,
threshold: int = None,
region: Region = Region.ALL) -> "_IsScreenBlackResult":
"""Check for the presence of a black screen in a video frame.
:param Frame frame:
If this is specified it is used as the video frame to check; otherwise a
new frame is grabbed from the device-under-test. This is an image in
OpenCV format (for example as returned by `frames` and `get_frame`).
:param str|numpy.ndarray|Mask|Region mask:
A `Region` or a mask that specifies which parts of the image to
analyse. This accepts anything that can be converted to a Mask using
`stbt.load_mask`. See :ref:`Masks`.
:param int threshold:
Even when a video frame appears to be black, the intensity of its pixels
is not always 0. To differentiate almost-black from non-black pixels, a
binary threshold is applied to the frame. The ``threshold`` value is in
the range 0 (black) to 255 (white). The global default (20) can be
changed by setting ``threshold`` in the ``[is_screen_black]`` section of
:ref:`.stbt.conf`.
:returns:
An object that will evaluate to true if the frame was black, or false
if not black. The object has the following attributes:
* **black** (*bool*) – True if the frame was black.
* **frame** (`stbt.Frame`) – The video frame that was analysed.
Changed in v33: ``mask`` accepts anything that can be converted to a Mask
using `load_mask`. The ``region`` parameter is deprecated; pass your
`Region` to ``mask`` instead. You can't specify ``mask`` and ``region``
:type region: `Region`
:param region:
Synonym for `mask`. Use `mask` instead - this will be removed in a future version of `stbt`.
"""
if threshold is None:
threshold = get_config('is_screen_black', 'threshold', type_=int)
if frame is None:
from stbt_core import get_frame
frame = get_frame()
mask, region, frame_region = _validate_mask(frame, mask, region)
imglog = ImageLogger("is_screen_black", mask=mask, threshold=threshold)
imglog.imwrite("source", frame)
greyframe = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
if mask != Region.ALL:
mask_ = mask.to_array(frame_region)
imglog.imwrite("mask", mask_)
cv2.bitwise_and(greyframe, crop(mask_, region), dst=greyframe)
maxVal = greyframe.max()
result = _IsScreenBlackResult(bool(maxVal <= threshold), frame)
debug("is_screen_black: {found} black screen using mask={mask}, "
"threshold={threshold}: {result}, maximum_intensity={maxVal}".format(
found="Found" if result.black else "Didn't find",
mask=mask,
threshold=threshold,
result=result,
maxVal=maxVal))
if imglog.enabled:
imglog.imwrite("grey", greyframe)
_, thresholded = cv2.threshold(greyframe, threshold, 255,
cv2.THRESH_BINARY)
imglog.imwrite("non_black", thresholded)
imglog.set(maxVal=maxVal,
non_black_region=pixel_bounding_box(thresholded))
_log_image_debug(imglog, result)
return result
|
33,736 |
def push_readmes_and_tags():
if not _merge_build():
print("Not pushing because this is a PR build.")
return
username, password = _get_docker_creds()
environment = {"DOCKER_USE": username, "DOCKER_PASS": password}
for image, tag_line in DOCKER_HUB_DESCRIPTION.items():
cmd_string = (f"--file /myvol/docker/{image}/README.md "
f"--short {tag_line} --debug rayproject/{image}")
DOCKER_CLIENT.containers.run(
"chko/docker-pushrm:1",
command=cmd_string,
mounts=[
docker.types.Mount(target="/myvol/", source=_get_root_dir())
],
environment=environment,
remove=True,
tty=True)
|
def push_readmes():
if not _merge_build():
print("Not pushing because this is a PR build.")
return
username, password = _get_docker_creds()
environment = {"DOCKER_USE": username, "DOCKER_PASS": password}
for image, tag_line in DOCKER_HUB_DESCRIPTION.items():
cmd_string = (f"--file /myvol/docker/{image}/README.md "
f"--short {tag_line} --debug rayproject/{image}")
DOCKER_CLIENT.containers.run(
"chko/docker-pushrm:1",
command=cmd_string,
mounts=[
docker.types.Mount(target="/myvol/", source=_get_root_dir())
],
environment=environment,
remove=True,
tty=True)
|
48,660 |
def light(app):
""" Apply Light Theme to the Qt application instance.
Args:
app (QApplication): QApplication instance.
"""
lightPalette = QPalette()
# base
lightPalette.setColor(QPalette.WindowText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Button, QColor(240, 240, 240))
lightPalette.setColor(QPalette.Light, QColor(180, 180, 180))
lightPalette.setColor(QPalette.Midlight, QColor(200, 200, 200))
lightPalette.setColor(QPalette.Dark, QColor(225, 225, 225))
lightPalette.setColor(QPalette.Text, QColor(0, 0, 0))
lightPalette.setColor(QPalette.BrightText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.ButtonText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Base, QColor(237, 237, 237))
lightPalette.setColor(QPalette.Window, QColor(240, 240, 240))
lightPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))
lightPalette.setColor(QPalette.Highlight, QColor(76, 163, 224))
lightPalette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Link, QColor(0, 162, 232))
lightPalette.setColor(QPalette.AlternateBase, QColor(225, 225, 225))
lightPalette.setColor(QPalette.ToolTipBase, QColor(240, 240, 240))
lightPalette.setColor(QPalette.ToolTipText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.LinkVisited, QColor(222, 222, 222))
# disabled
lightPalette.setColor(QPalette.Disabled, QPalette.WindowText,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.Text,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.ButtonText,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.Highlight,
QColor(190, 190, 190))
lightPalette.setColor(QPalette.Disabled, QPalette.HighlightedText,
QColor(115, 115, 115))
app.style().unpolish(app)
app.setPalette(lightPalette)
app.setStyle('Fusion')
|
def light(app):
"""
Apply light theme to the Qt application instance.
Args:
app (QApplication): QApplication instance.
"""
lightPalette = QPalette()
# base
lightPalette.setColor(QPalette.WindowText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Button, QColor(240, 240, 240))
lightPalette.setColor(QPalette.Light, QColor(180, 180, 180))
lightPalette.setColor(QPalette.Midlight, QColor(200, 200, 200))
lightPalette.setColor(QPalette.Dark, QColor(225, 225, 225))
lightPalette.setColor(QPalette.Text, QColor(0, 0, 0))
lightPalette.setColor(QPalette.BrightText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.ButtonText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Base, QColor(237, 237, 237))
lightPalette.setColor(QPalette.Window, QColor(240, 240, 240))
lightPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))
lightPalette.setColor(QPalette.Highlight, QColor(76, 163, 224))
lightPalette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Link, QColor(0, 162, 232))
lightPalette.setColor(QPalette.AlternateBase, QColor(225, 225, 225))
lightPalette.setColor(QPalette.ToolTipBase, QColor(240, 240, 240))
lightPalette.setColor(QPalette.ToolTipText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.LinkVisited, QColor(222, 222, 222))
# disabled
lightPalette.setColor(QPalette.Disabled, QPalette.WindowText,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.Text,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.ButtonText,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.Highlight,
QColor(190, 190, 190))
lightPalette.setColor(QPalette.Disabled, QPalette.HighlightedText,
QColor(115, 115, 115))
app.style().unpolish(app)
app.setPalette(lightPalette)
app.setStyle('Fusion')
|
30,331 |
def test_is_default_arguments_non_default():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"commands": [
{
"name": "file",
"arguments": [
{
"name": "file",
"required": True,
"default": False
},
{
"name": "verbose"
}
]
}
]
}
}
validator.old_integration = None
assert validator.is_default_arguments() is False, \
"The integration validator did not find invalid arg (needed to be default)"
|
def test_is_default_arguments_non_default():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"commands": [
{
"name": "file",
"arguments": [
{
"name": "file",
"required": True,
"default": False
},
{
"name": "verbose"
}
]
}
]
}
}
validator.old_integration = None
assert validator.is_default_arguments() is False, \
"The integration validator did not find invalid arg (needed to be default and not required)"
|
26,026 |
def load_arguments(self, _):
# pylint: disable=line-too-long
# PARAMETER REGISTRATION
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
sku_arg_type = CLIArgumentType(
help='The pricing tiers, e.g., F1(Free), D1(Shared), B1(Basic Small), B2(Basic Medium), B3(Basic Large), S1(Standard Small), P1V2(Premium V2 Small), P1V3(Premium V3 Small), P2V3(Premium V3 Medium), P3V3(Premium V3 Large), PC2 (Premium Container Small), PC3 (Premium Container Medium), PC4 (Premium Container Large), I1 (Isolated Small), I2 (Isolated Medium), I3 (Isolated Large), I1v2 (Isolated V2 Small), I2v2 (Isolated V2 Medium), I3v2 (Isolated V2 Large), WS1 (Logic Apps Workflow Standard 1), WS2 (Logic Apps Workflow Standard 2), WS3 (Logic Apps Workflow Standard 3)',
arg_type=get_enum_type(
['F1', 'FREE', 'D1', 'SHARED', 'B1', 'B2', 'B3', 'S1', 'S2', 'S3', 'P1V2', 'P2V2', 'P3V2', 'P1V3', 'P2V3', 'P3V3', 'PC2', 'PC3',
'PC4', 'I1', 'I2', 'I3', 'I1v2', 'I2v2', 'I3v2', 'WS1', 'WS2', 'WS3']))
webapp_name_arg_type = CLIArgumentType(configured_default='web', options_list=['--name', '-n'], metavar='NAME',
completer=get_resource_name_completion_list('Microsoft.Web/sites'),
id_part='name',
help="name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`",
local_context_attribute=LocalContextAttribute(name='web_name', actions=[
LocalContextAction.GET]))
functionapp_name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME',
help="name of the function app.",
local_context_attribute=LocalContextAttribute(name='functionapp_name',
actions=[
LocalContextAction.GET]))
logicapp_name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME',
help="name of the logic app.",
local_context_attribute=LocalContextAttribute(name='logicapp_name',
actions=[LocalContextAction.GET]))
name_arg_type_dict = {
'functionapp': functionapp_name_arg_type,
'logicapp': logicapp_name_arg_type
}
isolated_sku_arg_type = CLIArgumentType(
help='The Isolated pricing tiers, e.g., I1 (Isolated Small), I2 (Isolated Medium), I3 (Isolated Large)',
arg_type=get_enum_type(['I1', 'I2', 'I3']))
static_web_app_sku_arg_type = CLIArgumentType(
help='The pricing tiers for Static Web App',
arg_type=get_enum_type(['Free', 'Standard'])
)
# use this hidden arg to give a command the right instance, that functionapp commands
# work on function app and webapp ones work on web app
with self.argument_context('webapp') as c:
c.ignore('app_instance')
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('slot', options_list=['--slot', '-s'],
help="the name of the slot. Default to the productions slot if not specified")
c.argument('name', arg_type=webapp_name_arg_type)
with self.argument_context('appservice') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
with self.argument_context('appservice list-locations') as c:
c.argument('linux_workers_enabled', action='store_true',
help='get regions which support hosting web apps on Linux workers')
c.argument('sku', arg_type=sku_arg_type)
with self.argument_context('appservice plan') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name',
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('number_of_workers', help='Number of workers to be allocated.', type=int, default=1)
c.ignore('max_burst')
with self.argument_context('appservice plan create') as c:
c.argument('name', options_list=['--name', '-n'], help="Name of the new app service plan", completer=None,
validator=validate_asp_create,
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.SET],
scopes=['appservice', 'webapp', 'functionapp']))
c.argument('number_of_workers', help='Number of workers to be allocated.', type=int, default=1)
c.argument('app_service_environment', options_list=['--app-service-environment', '-e'],
help="Name or ID of the app service environment",
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
c.argument('sku', arg_type=sku_arg_type)
c.argument('is_linux', action='store_true', required=False, help='host web app on Linux worker')
c.argument('hyper_v', action='store_true', required=False, help='Host web app on Windows container')
c.argument('per_site_scaling', action='store_true', required=False, help='Enable per-app scaling at the '
'App Service plan level to allow for '
'scaling an app independently from '
'the App Service plan that hosts it.')
c.argument('zone_redundant', options_list=['--zone-redundant', '-z'], help='Enable zone redundancy for high availability. Cannot be changed after plan creation. Minimum instance count is 3.')
c.argument('tags', arg_type=tags_type)
with self.argument_context('appservice plan update') as c:
c.argument('sku', arg_type=sku_arg_type)
c.argument('elastic_scale', arg_type=get_three_state_flag(), is_preview=True, help='Enable or disable automatic scaling. Set to "true" to enable elastic scale for this plan, or "false" to disable elastic scale for this plan. The SKU must be a Premium V2 SKU (P1V2, P2V2, P3V2) or a Premium V3 SKU (P1V3, P2V3, P3V3)')
c.argument('max_elastic_worker_count', options_list=['--max-elastic-worker-count', '-m'], type=int, is_preview=True, help='Maximum number of instances that the plan can scale out to. The plan must be an elastic scale plan.')
c.argument('number_of_workers', type=int, help='Number of workers to be allocated.')
c.ignore('allow_pending_state')
with self.argument_context('appservice plan delete') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name', local_context_attribute=None)
with self.argument_context('webapp create') as c:
c.argument('name', options_list=['--name', '-n'], help='name of the new web app',
validator=validate_site_create,
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.SET],
scopes=['webapp', 'cupertino']))
c.argument('startup_file', help="Linux only. The web's startup file")
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-s'], help='the container registry server username')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'], help='The container registry server password. Required for private registries.')
c.argument('multicontainer_config_type', options_list=['--multicontainer-config-type'], help="Linux only.", arg_type=get_enum_type(MULTI_CONTAINER_TYPES))
c.argument('multicontainer_config_file', options_list=['--multicontainer-config-file'], help="Linux only. Config file for multicontainer apps. (local or remote)")
c.argument('runtime', options_list=['--runtime', '-r'], help="canonicalized web runtime in the format of Framework:Version, e.g. \"PHP:7.2\". Allowed delimiters: \"|\" or \":\". If using powershell, please use the \":\" delimiter or be sure to properly escape the \"|\" character. "
"Use `az webapp list-runtimes` for available list") # TODO ADD completer
c.argument('plan', options_list=['--plan', '-p'], configured_default='appserviceplan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
help="name or resource id of the app service plan. Use 'appservice plan create' to get one",
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('vnet', help="Name or resource ID of the regional virtual network. If there are multiple vnets of the same name across different resource groups, use vnet resource id to specify which vnet to use. If vnet name is used, by default, the vnet in the same resource group as the webapp will be used. Must be used with --subnet argument.")
c.argument('subnet', help="Name or resource ID of the pre-existing subnet to have the webapp join. The --vnet is argument also needed if specifying subnet by name.")
c.argument('https_only', help="Redirect all traffic made to an app using HTTP to HTTPS.",
arg_type=get_three_state_flag(return_label=True))
c.ignore('language')
c.ignore('using_webapp_up')
with self.argument_context('webapp show') as c:
c.argument('name', arg_type=webapp_name_arg_type)
with self.argument_context('webapp list-instances') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('slot', options_list=['--slot', '-s'], help='Name of the web app slot. Default to the productions slot if not specified.')
with self.argument_context('webapp list-runtimes') as c:
c.argument('linux', action='store_true', help='list runtime stacks for linux based web apps', deprecate_info=c.deprecate(redirect="--os-type"))
c.argument('os_type', options_list=["--os", "--os-type"], help="limit the output to just windows or linux runtimes", arg_type=get_enum_type([LINUX_OS_NAME, WINDOWS_OS_NAME]))
with self.argument_context('functionapp list-runtimes') as c:
c.argument('os_type', options_list=["--os", "--os-type"], help="limit the output to just windows or linux runtimes", arg_type=get_enum_type([LINUX_OS_NAME, WINDOWS_OS_NAME]))
with self.argument_context('webapp deleted list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('slot', options_list=['--slot', '-s'], help='Name of the deleted web app slot.')
with self.argument_context('webapp deleted restore') as c:
c.argument('deleted_id', options_list=['--deleted-id'], help='Resource ID of the deleted web app')
c.argument('name', options_list=['--name', '-n'], help='name of the web app to restore the deleted content to')
c.argument('slot', options_list=['--slot', '-s'], help='slot to restore the deleted content to')
c.argument('restore_content_only', action='store_true',
help='restore only deleted files without web app settings')
with self.argument_context('webapp traffic-routing') as c:
c.argument('distribution', options_list=['--distribution', '-d'], nargs='+',
help='space-separated slot routings in a format of `<slot-name>=<percentage>` e.g. staging=50. Unused traffic percentage will go to the Production slot')
with self.argument_context('webapp update') as c:
c.argument('client_affinity_enabled', help="Enables sending session affinity cookies.",
arg_type=get_three_state_flag(return_label=True))
c.argument('https_only', help="Redirect all traffic made to an app using HTTP to HTTPS.",
arg_type=get_three_state_flag(return_label=True))
c.argument('force_dns_registration', help="If true, web app hostname is force registered with DNS",
arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0'))
c.argument('skip_custom_domain_verification',
help="If true, custom (non *.azurewebsites.net) domains associated with web app are not verified",
arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0'))
c.argument('ttl_in_seconds', help="Time to live in seconds for web app's default domain name",
arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0'))
c.argument('skip_dns_registration', help="If true web app hostname is not registered with DNS on creation",
arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0'))
c.argument('minimum_elastic_instance_count', options_list=["--minimum-elastic-instance-count", "-i"], type=int, is_preview=True, help="Minimum number of instances. App must be in an elastic scale App Service Plan.")
c.argument('prewarmed_instance_count', options_list=["--prewarmed-instance-count", "-w"], type=int, is_preview=True, help="Number of preWarmed instances. App must be in an elastic scale App Service Plan.")
with self.argument_context('webapp browse') as c:
c.argument('logs', options_list=['--logs', '-l'], action='store_true',
help='Enable viewing the log stream immediately after launching the web app')
with self.argument_context('webapp delete') as c:
c.argument('name', arg_type=webapp_name_arg_type, local_context_attribute=None)
c.argument('keep_empty_plan', action='store_true', help='keep empty app service plan')
c.argument('keep_metrics', action='store_true', help='keep app metrics')
c.argument('keep_dns_registration', action='store_true', help='keep DNS registration',
deprecate_info=c.deprecate(expiration='3.0.0'))
with self.argument_context('webapp webjob') as c:
c.argument('webjob_name', help='The name of the webjob', options_list=['--webjob-name', '-w'])
with self.argument_context('webapp webjob continuous list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
with self.argument_context('webapp webjob triggered list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
for scope in ['webapp', 'functionapp', 'logicapp']:
with self.argument_context(scope + ' create') as c:
c.argument('deployment_container_image_name', options_list=['--deployment-container-image-name', '-i'],
help='Container image name from Docker Hub, e.g. publisher/image-name:tag')
c.argument('deployment_local_git', action='store_true', options_list=['--deployment-local-git', '-l'],
help='enable local git')
c.argument('deployment_zip', options_list=['--deployment-zip', '-z'],
help='perform deployment using zip file')
c.argument('deployment_source_url', options_list=['--deployment-source-url', '-u'],
help='Git repository URL to link with manual integration')
c.argument('deployment_source_branch', options_list=['--deployment-source-branch', '-b'],
help='the branch to deploy')
c.argument('tags', arg_type=tags_type)
for scope in ['webapp', 'functionapp']:
with self.argument_context(scope) as c:
c.argument('assign_identities', nargs='*', options_list=['--assign-identity'],
help='accept system or user assigned identities separated by spaces. Use \'[system]\' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples')
c.argument('scope', options_list=['--scope'], help="Scope that the system assigned identity can access")
c.argument('role', options_list=['--role'], help="Role name or id the system assigned identity will have")
with self.argument_context(scope + ' config ssl bind') as c:
c.argument('ssl_type', help='The ssl cert type', arg_type=get_enum_type(['SNI', 'IP']))
with self.argument_context(scope + ' config ssl upload') as c:
c.argument('certificate_password', help='The ssl cert password')
c.argument('certificate_file', type=file_type, help='The filepath for the .pfx file')
c.argument('slot', options_list=['--slot', '-s'],
help='The name of the slot. Default to the productions slot if not specified')
with self.argument_context(scope + ' config ssl') as c:
c.argument('certificate_thumbprint', help='The ssl cert thumbprint')
with self.argument_context(scope + ' config appsettings') as c:
c.argument('settings', nargs='+', help="space-separated app settings in a format of `<name>=<value>`")
c.argument('setting_names', nargs='+', help="space-separated app setting names")
with self.argument_context(scope + ' config ssl import') as c:
c.argument('key_vault', help='The name or resource ID of the Key Vault')
c.argument('key_vault_certificate_name', help='The name of the certificate in Key Vault')
with self.argument_context(scope + ' config ssl create') as c:
c.argument('hostname', help='The custom domain name')
c.argument('name', options_list=['--name', '-n'], help='Name of the web app.')
c.argument('resource-group', options_list=['--resource-group', '-g'], help='Name of resource group.')
with self.argument_context(scope + ' config ssl show') as c:
c.argument('certificate_name', help='The name of the certificate')
with self.argument_context(scope + ' config hostname') as c:
c.argument('hostname', completer=get_hostname_completion_list,
help="hostname assigned to the site, such as custom domains", id_part='child_name_1')
with self.argument_context(scope + ' deployment user') as c:
c.argument('user_name', help='user name')
c.argument('password', help='password, will prompt if not specified')
with self.argument_context(scope + ' deployment source') as c:
c.argument('manual_integration', action='store_true',
help='disable automatic sync between source control and web')
c.argument('repo_url', options_list=['--repo-url', '-u'],
help='repository url to pull the latest source from, e.g. https://github.com/foo/foo-web')
c.argument('branch', help='the branch name of the repository')
c.argument('repository_type', help='repository type',
arg_type=get_enum_type(['git', 'mercurial', 'github', 'externalgit', 'localgit']))
c.argument('git_token', help='Git access token required for auto sync')
c.argument('github_action', options_list=['--github-action'], help='If using GitHub action, default to False')
with self.argument_context(scope + ' identity') as c:
c.argument('scope', help="The scope the managed identity has access to")
c.argument('role', help="Role name or id the managed identity will be assigned")
with self.argument_context(scope + ' identity assign') as c:
c.argument('assign_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
with self.argument_context(scope + ' identity remove') as c:
c.argument('remove_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
with self.argument_context(scope + ' deployment source config-zip') as c:
c.argument('src', help='a zip file path for deployment')
c.argument('build_remote', help='enable remote build during deployment',
arg_type=get_three_state_flag(return_label=True))
c.argument('timeout', type=int, options_list=['--timeout', '-t'],
help='Configurable timeout in seconds for checking the status of deployment',
validator=validate_timeout_value)
with self.argument_context(scope + ' config appsettings list') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type),
id_part=None)
with self.argument_context(scope + ' config hostname list') as c:
c.argument('webapp_name', arg_type=webapp_name_arg_type, id_part=None, options_list='--webapp-name')
with self.argument_context(scope + ' cors') as c:
c.argument('allowed_origins', options_list=['--allowed-origins', '-a'], nargs='*',
help='space separated origins that should be allowed to make cross-origin calls (for example: http://example.com:12345). To allow all, use "*" and remove all other origins from the list')
with self.argument_context(scope + ' config set') as c:
c.argument('number_of_workers', help='The number of workers to be allocated.', type=int)
c.argument('remote_debugging_enabled', help='enable or disable remote debugging',
arg_type=get_three_state_flag(return_label=True))
c.argument('web_sockets_enabled', help='enable or disable web sockets',
arg_type=get_three_state_flag(return_label=True))
c.argument('always_on',
help='ensure web app gets loaded all the time, rather unloaded after been idle. Recommended when you have continuous web jobs running',
arg_type=get_three_state_flag(return_label=True))
c.argument('auto_heal_enabled', help='enable or disable auto heal',
arg_type=get_three_state_flag(return_label=True))
c.argument('use32_bit_worker_process', options_list=['--use-32bit-worker-process'],
help='use 32 bits worker process or not', arg_type=get_three_state_flag(return_label=True))
c.argument('php_version', help='The version used to run your web app if using PHP, e.g., 5.5, 5.6, 7.0')
c.argument('python_version', help='The version used to run your web app if using Python, e.g., 2.7, 3.4')
c.argument('net_framework_version', help="The version used to run your web app if using .NET Framework, e.g., 'v4.0' for .NET 4.6 and 'v3.0' for .NET 3.5")
c.argument('linux_fx_version', help="The runtime stack used for your linux-based webapp, e.g., \"RUBY|2.5.5\", \"NODE|12LTS\", \"PHP|7.2\", \"DOTNETCORE|2.1\". See https://aka.ms/linux-stacks for more info.")
c.argument('windows_fx_version', help="A docker image name used for your windows container web app, e.g., microsoft/nanoserver:ltsc2016")
if scope == 'functionapp':
c.ignore('windows_fx_version')
c.argument('pre_warmed_instance_count', options_list=['--prewarmed-instance-count'],
help="Number of pre-warmed instances a function app has")
if scope == 'webapp':
c.ignore('reserved_instance_count')
c.argument('java_version',
help="The version used to run your web app if using Java, e.g., '1.7' for Java 7, '1.8' for Java 8")
c.argument('java_container', help="The java container, e.g., Tomcat, Jetty")
c.argument('java_container_version', help="The version of the java container, e.g., '8.0.23' for Tomcat")
c.argument('min_tls_version',
help="The minimum version of TLS required for SSL requests, e.g., '1.0', '1.1', '1.2'")
c.argument('http20_enabled', help="configures a web site to allow clients to connect over http2.0.",
arg_type=get_three_state_flag(return_label=True))
c.argument('app_command_line', options_list=['--startup-file'],
help="The startup file for linux hosted web apps, e.g. 'process.json' for Node.js web")
c.argument('ftps_state', help="Set the Ftps state value for an app. Default value is 'AllAllowed'.",
arg_type=get_enum_type(FTPS_STATE_TYPES))
c.argument('vnet_route_all_enabled', help="Configure regional VNet integration to route all traffic to the VNet.",
arg_type=get_three_state_flag(return_label=True))
c.argument('generic_configurations', nargs='+',
help='Provide site configuration list in a format of either `key=value` pair or `@<json_file>`. PowerShell and Windows Command Prompt users should use a JSON file to provide these configurations to avoid compatibility issues with escape characters.')
with self.argument_context(scope + ' config container') as c:
c.argument('docker_registry_server_url', options_list=['--docker-registry-server-url', '-r'],
help='the container registry server url')
c.argument('docker_custom_image_name', options_list=['--docker-custom-image-name', '-c', '-i'],
help='the container custom image name and optionally the tag name (e.g., <registry-name>/<image-name>:<tag>)')
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-u'],
help='the container registry server username')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-p'],
help='the container registry server password')
c.argument('websites_enable_app_service_storage', options_list=['--enable-app-service-storage', '-t'],
help='enables platform storage (custom container only)',
arg_type=get_three_state_flag(return_label=True))
c.argument('multicontainer_config_type', options_list=['--multicontainer-config-type'], help='config type',
arg_type=get_enum_type(MULTI_CONTAINER_TYPES))
c.argument('multicontainer_config_file', options_list=['--multicontainer-config-file'],
help="config file for multicontainer apps")
c.argument('show_multicontainer_config', action='store_true',
help='shows decoded config if a multicontainer config is set')
with self.argument_context(scope + ' deployment container config') as c:
c.argument('enable', options_list=['--enable-cd', '-e'], help='enable/disable continuous deployment',
arg_type=get_three_state_flag(return_label=True))
with self.argument_context('webapp config connection-string list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
with self.argument_context('webapp config storage-account list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
with self.argument_context('webapp config hostname') as c:
c.argument('webapp_name',
help="webapp name. You can configure the default using `az configure --defaults web=<name>`",
configured_default='web',
completer=get_resource_name_completion_list('Microsoft.Web/sites'), id_part='name',
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET]))
with self.argument_context('webapp deployment list-publishing-profiles') as c:
c.argument('xml', options_list=['--xml'], required=False, help='retrieves the publishing profile details in XML format')
with self.argument_context('webapp deployment slot') as c:
c.argument('slot', help='the name of the slot')
c.argument('webapp', arg_type=name_arg_type, completer=get_resource_name_completion_list('Microsoft.Web/sites'),
help='Name of the webapp', id_part='name',
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET]))
c.argument('auto_swap_slot', help='target slot to auto swap', default='production')
c.argument('disable', help='disable auto swap', action='store_true')
c.argument('target_slot', help="target slot to swap, default to 'production'")
c.argument('preserve_vnet', help="preserve Virtual Network to the slot during swap, default to 'true'",
arg_type=get_three_state_flag(return_label=True))
with self.argument_context('webapp deployment slot create') as c:
c.argument('configuration_source',
help="source slot to clone configurations from. Use web app's name to refer to the production slot")
c.argument('deployment_container_image_name', options_list=['--deployment-container-image-name', '-i'],
help='Container image name, e.g. publisher/image-name:tag')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'],
help='The container registry server password')
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-u'], help='the container registry server username')
with self.argument_context('webapp deployment slot swap') as c:
c.argument('action',
help="swap types. use 'preview' to apply target slot's settings on the source slot first; use 'swap' to complete it; use 'reset' to reset the swap",
arg_type=get_enum_type(['swap', 'preview', 'reset']))
with self.argument_context('webapp deployment github-actions')as c:
c.argument('name', arg_type=webapp_name_arg_type)
c.argument('resource_group', arg_type=resource_group_name_type, options_list=['--resource-group', '-g'])
c.argument('repo', help='The GitHub repository to which the workflow file will be added. In the format: <owner>/<repository-name>')
c.argument('token', help='A Personal Access Token with write access to the specified repository. For more information: https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line')
c.argument('slot', options_list=['--slot', '-s'], help='The name of the slot. Default to the production slot if not specified.')
c.argument('branch', options_list=['--branch', '-b'], help='The branch to which the workflow file will be added. Defaults to "master" if not specified.')
c.argument('login_with_github', help='Interactively log in with GitHub to retrieve the Personal Access Token', action='store_true')
with self.argument_context('webapp deployment github-actions add')as c:
c.argument('runtime', options_list=['--runtime', '-r'], help='Canonicalized web runtime in the format of Framework|Version, e.g. "PHP|5.6". Use "az webapp list-runtimes" for available list.')
c.argument('force', options_list=['--force', '-f'], help='When true, the command will overwrite any workflow file with a conflicting name.', action='store_true')
with self.argument_context('webapp log config') as c:
c.argument('application_logging', help='configure application logging',
arg_type=get_enum_type(['filesystem', 'azureblobstorage', 'off']))
c.argument('detailed_error_messages', help='configure detailed error messages',
arg_type=get_three_state_flag(return_label=True))
c.argument('failed_request_tracing', help='configure failed request tracing',
arg_type=get_three_state_flag(return_label=True))
c.argument('level', help='logging level',
arg_type=get_enum_type(['error', 'warning', 'information', 'verbose']))
c.argument('web_server_logging', help='configure Web server logging',
arg_type=get_enum_type(['off', 'filesystem']))
c.argument('docker_container_logging', help='configure gathering STDOUT and STDERR output from container',
arg_type=get_enum_type(['off', 'filesystem']))
with self.argument_context('webapp log tail') as c:
c.argument('provider',
help="By default all live traces configured by `az webapp log config` will be shown, but you can scope to certain providers/folders, e.g. 'application', 'http', etc. For details, check out https://github.com/projectkudu/kudu/wiki/Diagnostic-Log-Stream")
with self.argument_context('webapp log download') as c:
c.argument('log_file', default='webapp_logs.zip', type=file_type, completer=FilesCompleter(),
help='the downloaded zipped log file path')
with self.argument_context('webapp log deployment show') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified")
c.argument('deployment_id', options_list=['--deployment-id'], help='Deployment ID. If none specified, returns the deployment logs of the latest deployment.')
with self.argument_context('webapp log deployment list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified")
with self.argument_context('functionapp log deployment show') as c:
c.argument('name', arg_type=functionapp_name_arg_type, id_part=None)
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified")
c.argument('deployment_id', options_list=['--deployment-id'], help='Deployment ID. If none specified, returns the deployment logs of the latest deployment.')
with self.argument_context('functionapp log deployment list') as c:
c.argument('name', arg_type=functionapp_name_arg_type, id_part=None)
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified")
for scope in ['appsettings', 'connection-string']:
with self.argument_context('webapp config ' + scope) as c:
c.argument('settings', nargs='+', help="space-separated {} in a format of `<name>=<value>`".format(scope))
c.argument('slot_settings', nargs='+',
help="space-separated slot {} in a format of either `<name>=<value>` or `@<json_file>`".format(
scope))
c.argument('setting_names', nargs='+', help="space-separated {} names".format(scope))
with self.argument_context('webapp config connection-string') as c:
c.argument('connection_string_type', options_list=['--connection-string-type', '-t'],
help='connection string type', arg_type=get_enum_type(ConnectionStringType))
c.argument('ids', options_list=['--ids'],
help="One or more resource IDs (space delimited). If provided no other 'Resource Id' arguments should be specified.",
required=True)
c.argument('resource_group', options_list=['--resource-group', '-g'],
help='Name of resource group. You can configure the default group using `az configure --default-group=<name>`. If `--ids` is provided this should NOT be specified.')
c.argument('name', options_list=['--name', '-n'],
help='Name of the web app. You can configure the default using `az configure --defaults web=<name>`. If `--ids` is provided this should NOT be specified.',
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET]))
with self.argument_context('webapp config storage-account') as c:
c.argument('custom_id', options_list=['--custom-id', '-i'], help='name of the share configured within the web app')
c.argument('storage_type', options_list=['--storage-type', '-t'], help='storage type',
arg_type=get_enum_type(AzureStorageType))
c.argument('account_name', options_list=['--account-name', '-a'], help='storage account name')
c.argument('share_name', options_list=['--share-name', '--sn'],
help='name of the file share as given in the storage account')
c.argument('access_key', options_list=['--access-key', '-k'], help='storage account access key')
c.argument('mount_path', options_list=['--mount-path', '-m'],
help='the path which the web app uses to read-write data ex: /share1 or /share2')
c.argument('slot', options_list=['--slot', '-s'],
help="the name of the slot. Default to the productions slot if not specified")
with self.argument_context('webapp config storage-account add') as c:
c.argument('slot_setting', options_list=['--slot-setting'], help="slot setting")
with self.argument_context('webapp config storage-account update') as c:
c.argument('slot_setting', options_list=['--slot-setting'], help="slot setting")
with self.argument_context('webapp config backup') as c:
c.argument('storage_account_url', help='URL with SAS token to the blob storage container',
options_list=['--container-url'])
c.argument('webapp_name', help='The name of the web app',
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET]))
c.argument('db_name', help='Name of the database in the backup', arg_group='Database')
c.argument('db_connection_string', help='Connection string for the database in the backup',
arg_group='Database')
c.argument('db_type', help='Type of database in the backup', arg_group='Database',
arg_type=get_enum_type(DatabaseType))
with self.argument_context('webapp config backup create') as c:
c.argument('backup_name',
help='Name of the backup. If unspecified, the backup will be named with the web app name and a timestamp',
local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.SET],
scopes=['webapp']))
with self.argument_context('webapp config backup update') as c:
c.argument('backup_name',
help='Name of the backup. If unspecified, the backup will be named with the web app name and a timestamp',
local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.GET]))
c.argument('frequency',
help='How often to backup. Use a number followed by d or h, e.g. 5d = 5 days, 2h = 2 hours')
c.argument('keep_at_least_one_backup', help='Always keep one backup, regardless of how old it is',
options_list=['--retain-one'], arg_type=get_three_state_flag(return_label=True))
c.argument('retention_period_in_days',
help='How many days to keep a backup before automatically deleting it. Set to 0 for indefinite retention',
options_list=['--retention'])
with self.argument_context('webapp config backup restore') as c:
c.argument('backup_name', help='Name of the backup to restore',
local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.GET]))
c.argument('target_name',
help='The name to use for the restored web app. If unspecified, will default to the name that was used when the backup was created')
c.argument('overwrite', help='Overwrite the source web app, if --target-name is not specified',
action='store_true')
c.argument('ignore_hostname_conflict', help='Ignores custom hostnames stored in the backup',
action='store_true')
with self.argument_context('webapp config snapshot') as c:
c.argument('name', arg_type=webapp_name_arg_type)
c.argument('slot', options_list=['--slot', '-s'], help='The name of the slot.')
with self.argument_context('webapp config snapshot list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
with self.argument_context('webapp config snapshot restore') as c:
c.argument('time', help='Timestamp of the snapshot to restore.')
c.argument('restore_content_only', help='Restore the web app files without restoring the settings.')
c.argument('source_resource_group', help='Name of the resource group to retrieve snapshot from.')
c.argument('source_name', help='Name of the web app to retrieve snapshot from.')
c.argument('source_slot', help='Name of the web app slot to retrieve snapshot from.')
with self.argument_context('webapp auth update') as c:
c.argument('enabled', arg_type=get_three_state_flag(return_label=True))
c.argument('token_store_enabled', options_list=['--token-store'],
arg_type=get_three_state_flag(return_label=True), help='use App Service Token Store')
c.argument('action', arg_type=get_enum_type(AUTH_TYPES))
c.argument('runtime_version',
help='Runtime version of the Authentication/Authorization feature in use for the current app')
c.argument('token_refresh_extension_hours', type=float, help="Hours, must be formattable into a float")
c.argument('allowed_external_redirect_urls', nargs='+', help="One or more urls (space-delimited).")
c.argument('client_id', options_list=['--aad-client-id'], arg_group='Azure Active Directory',
help='Application ID to integrate AAD organization account Sign-in into your web app')
c.argument('client_secret', options_list=['--aad-client-secret'], arg_group='Azure Active Directory',
help='AAD application secret')
c.argument('client_secret_certificate_thumbprint', options_list=['--aad-client-secret-certificate-thumbprint', '--thumbprint'], arg_group='Azure Active Directory',
help='Alternative to AAD Client Secret, thumbprint of a certificate used for signing purposes')
c.argument('allowed_audiences', nargs='+', options_list=['--aad-allowed-token-audiences'],
arg_group='Azure Active Directory', help="One or more token audiences (comma-delimited).")
c.argument('issuer', options_list=['--aad-token-issuer-url'],
help='This url can be found in the JSON output returned from your active directory endpoint using your tenantID. The endpoint can be queried from `az cloud show` at \"endpoints.activeDirectory\". '
'The tenantID can be found using `az account show`. Get the \"issuer\" from the JSON at <active directory endpoint>/<tenantId>/.well-known/openid-configuration.',
arg_group='Azure Active Directory')
c.argument('facebook_app_id', arg_group='Facebook',
help="Application ID to integrate Facebook Sign-in into your web app")
c.argument('facebook_app_secret', arg_group='Facebook', help='Facebook Application client secret')
c.argument('facebook_oauth_scopes', nargs='+',
help="One or more facebook authentication scopes (comma-delimited).", arg_group='Facebook')
c.argument('twitter_consumer_key', arg_group='Twitter',
help='Application ID to integrate Twitter Sign-in into your web app')
c.argument('twitter_consumer_secret', arg_group='Twitter', help='Twitter Application client secret')
c.argument('google_client_id', arg_group='Google',
help='Application ID to integrate Google Sign-in into your web app')
c.argument('google_client_secret', arg_group='Google', help='Google Application client secret')
c.argument('google_oauth_scopes', nargs='+', help="One or more Google authentication scopes (space-delimited).",
arg_group='Google')
c.argument('microsoft_account_client_id', arg_group='Microsoft',
help="AAD V2 Application ID to integrate Microsoft account Sign-in into your web app")
c.argument('microsoft_account_client_secret', arg_group='Microsoft', help='AAD V2 Application client secret')
c.argument('microsoft_account_oauth_scopes', nargs='+',
help="One or more Microsoft authentification scopes (comma-delimited).", arg_group='Microsoft')
with self.argument_context('webapp hybrid-connection') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('slot', help="the name of the slot. Default to the productions slot if not specified")
c.argument('namespace', help="Hybrid connection namespace")
c.argument('hybrid_connection', help="Hybrid connection name")
with self.argument_context('functionapp hybrid-connection') as c:
c.argument('name', id_part=None, local_context_attribute=LocalContextAttribute(name='functionapp_name',
actions=[
LocalContextAction.GET]))
c.argument('slot', help="the name of the slot. Default to the productions slot if not specified")
c.argument('namespace', help="Hybrid connection namespace")
c.argument('hybrid_connection', help="Hybrid connection name")
with self.argument_context('appservice hybrid-connection set-key') as c:
c.argument('plan', help="AppService plan",
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('namespace', help="Hybrid connection namespace")
c.argument('hybrid_connection', help="Hybrid connection name")
c.argument('key_type', help="Which key (primary or secondary) should be used")
with self.argument_context('appservice vnet-integration list') as c:
c.argument('plan', help="AppService plan",
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('resource_group', arg_type=resource_group_name_type)
with self.argument_context('webapp up') as c:
c.argument('name', arg_type=webapp_name_arg_type,
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET,
LocalContextAction.SET],
scopes=['webapp', 'cupertino']))
c.argument('plan', options_list=['--plan', '-p'],
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
help="name of the app service plan associated with the webapp",
configured_default='appserviceplan')
c.argument('sku', arg_type=sku_arg_type)
c.argument('os_type', options_list=['--os-type'], arg_type=get_enum_type(OS_TYPES), help="Set the OS type for the app to be created.")
c.argument('runtime', options_list=['--runtime', '-r'], help="canonicalized web runtime in the format of Framework:Version, e.g. \"PHP:7.2\". Allowed delimiters: \"|\" or \":\". If using powershell, please use the \":\" delimiter or be sure to properly escape the \"|\" character. "
"Use `az webapp list-runtimes` for available list.")
c.argument('dryrun', help="show summary of the create and deploy operation instead of executing it",
default=False, action='store_true')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('launch_browser', help="Launch the created app using the default browser", default=False,
action='store_true', options_list=['--launch-browser', '-b'])
c.argument('logs',
help="Configure default logging required to enable viewing log stream immediately after launching the webapp",
default=False, action='store_true')
c.argument('html', help="Ignore app detection and deploy as an html app", default=False, action='store_true')
c.argument('app_service_environment', options_list=['--app-service-environment', '-e'], help='name or resource ID of the (pre-existing) App Service Environment to deploy to. Requires an Isolated V2 sku [I1v2, I2v2, I3v2]')
with self.argument_context('webapp ssh') as c:
c.argument('port', options_list=['--port', '-p'],
help='Port for the remote connection. Default: Random available port', type=int)
c.argument('timeout', options_list=['--timeout', '-t'], help='timeout in seconds. Defaults to none', type=int)
c.argument('instance', options_list=['--instance', '-i'], help='Webapp instance to connect to. Defaults to none.')
with self.argument_context('webapp create-remote-connection') as c:
c.argument('port', options_list=['--port', '-p'],
help='Port for the remote connection. Default: Random available port', type=int)
c.argument('timeout', options_list=['--timeout', '-t'], help='timeout in seconds. Defaults to none', type=int)
c.argument('instance', options_list=['--instance', '-i'], help='Webapp instance to connect to. Defaults to none.')
with self.argument_context('webapp vnet-integration') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.")
c.argument('vnet', help="The name or resource ID of the Vnet",
local_context_attribute=LocalContextAttribute(name='vnet_name', actions=[LocalContextAction.GET]))
c.argument('subnet', help="The name or resource ID of the subnet",
local_context_attribute=LocalContextAttribute(name='subnet_name', actions=[LocalContextAction.GET]))
c.argument('skip_delegation_check', help="Skip check if you do not have permission or the VNet is in another subscription.",
arg_type=get_three_state_flag(return_label=True))
with self.argument_context('webapp deploy') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the webapp to deploy to.')
c.argument('src_path', options_list=['--src-path'], help='Path of the artifact to be deployed. Ex: "myapp.zip" or "/myworkspace/apps/myapp.war"')
c.argument('src_url', options_list=['--src-url'], help='URL of the artifact. The webapp will pull the artifact from this URL. Ex: "http://mysite.com/files/myapp.war?key=123"')
c.argument('target_path', options_list=['--target-path'], help='Absolute path that the artifact should be deployed to. Defaults to "home/site/wwwroot/" Ex: "/home/site/deployments/tools/", "/home/site/scripts/startup-script.sh".')
c.argument('artifact_type', options_list=['--type'], help='Used to override the type of artifact being deployed.', choices=['war', 'jar', 'ear', 'lib', 'startup', 'static', 'zip'])
c.argument('is_async', options_list=['--async'], help='If true, the artifact is deployed asynchronously. (The command will exit once the artifact is pushed to the web app.)', choices=['true', 'false'])
c.argument('restart', options_list=['--restart'], help='If true, the web app will be restarted following the deployment. Set this to false if you are deploying multiple artifacts and do not want to restart the site on the earlier deployments.', choices=['true', 'false'])
c.argument('clean', options_list=['--clean'], help='If true, cleans the target directory prior to deploying the file(s). Default value is determined based on artifact type.', choices=['true', 'false'])
c.argument('ignore_stack', options_list=['--ignore-stack'], help='If true, any stack-specific defaults are ignored.', choices=['true', 'false'])
c.argument('timeout', options_list=['--timeout'], help='Timeout for the deployment operation in milliseconds.')
c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.")
with self.argument_context('functionapp deploy') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the function app to deploy to.')
c.argument('src_path', options_list=['--src-path'], help='Path of the artifact to be deployed. Ex: "myapp.zip" or "/myworkspace/apps/myapp.war"')
c.argument('src_url', options_list=['--src-url'], help='URL of the artifact. The webapp will pull the artifact from this URL. Ex: "http://mysite.com/files/myapp.war?key=123"')
c.argument('target_path', options_list=['--target-path'], help='Absolute path that the artifact should be deployed to. Defaults to "home/site/wwwroot/". Ex: "/home/site/deployments/tools/", "/home/site/scripts/startup-script.sh".')
c.argument('artifact_type', options_list=['--type'], help='Used to override the type of artifact being deployed.', choices=['war', 'jar', 'ear', 'lib', 'startup', 'static', 'zip'])
c.argument('is_async', options_list=['--async'], help='Asynchronous deployment', choices=['true', 'false'])
c.argument('restart', options_list=['--restart'], help='If true, the web app will be restarted following the deployment, default value is true. Set this to false if you are deploying multiple artifacts and do not want to restart the site on the earlier deployments.', choices=['true', 'false'])
c.argument('clean', options_list=['--clean'], help='If true, cleans the target directory prior to deploying the file(s). Default value is determined based on artifact type.', choices=['true', 'false'])
c.argument('ignore_stack', options_list=['--ignore-stack'], help='If true, any stack-specific defaults are ignored.', choices=['true', 'false'])
c.argument('timeout', options_list=['--timeout'], help='Timeout for the deployment operation in milliseconds.')
c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.")
with self.argument_context('functionapp create') as c:
c.argument('vnet', options_list=['--vnet'], help="Name or resource ID of the regional virtual network. If there are multiple vnets of the same name across different resource groups, use vnet resource id to specify which vnet to use. If vnet name is used, by default, the vnet in the same resource group as the webapp will be used. Must be used with --subnet argument.")
c.argument('subnet', options_list=['--subnet'], help="Name or resource ID of the pre-existing subnet to have the webapp join. The --vnet is argument also needed if specifying subnet by name.")
with self.argument_context('functionapp vnet-integration') as c:
c.argument('name', arg_type=functionapp_name_arg_type, id_part=None)
c.argument('slot', help="The name of the slot. Default to the productions slot if not specified")
c.argument('vnet', help="The name or resource ID of the Vnet",
local_context_attribute=LocalContextAttribute(name='vnet_name', actions=[LocalContextAction.GET]))
c.argument('subnet', help="The name or resource ID of the subnet",
local_context_attribute=LocalContextAttribute(name='subnet_name', actions=[LocalContextAction.GET]))
c.argument('skip_delegation_check', help="Skip check if you do not have permission or the VNet is in another subscription.",
arg_type=get_three_state_flag(return_label=True))
for scope in ['functionapp', 'logicapp']:
app_type = scope[:-3] # 'function' or 'logic'
with self.argument_context(scope) as c:
c.ignore('app_instance')
c.argument('name', arg_type=name_arg_type_dict[scope], id_part='name', help='name of the {} app'.format(app_type))
c.argument('slot', options_list=['--slot', '-s'],
help="the name of the slot. Default to the productions slot if not specified")
with self.argument_context(scope + ' create') as c:
c.argument('plan', options_list=['--plan', '-p'], configured_default='appserviceplan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
help="name or resource id of the {} app service plan. Use 'appservice plan create' to get one. If using an App Service plan from a different resource group, the full resource id must be used and not the plan name.".format(scope),
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('name', options_list=['--name', '-n'], help='name of the new {} app'.format(app_type),
local_context_attribute=LocalContextAttribute(name=scope + '_name', actions=[LocalContextAction.SET], scopes=[scope]))
c.argument('storage_account', options_list=['--storage-account', '-s'],
help='Provide a string value of a Storage Account in the provided Resource Group. Or Resource ID of a Storage Account in a different Resource Group',
local_context_attribute=LocalContextAttribute(name='storage_account_name', actions=[LocalContextAction.GET]))
c.argument('consumption_plan_location', options_list=['--consumption-plan-location', '-c'],
help="Geographic location where {} app will be hosted. Use `az {} list-consumption-locations` to view available locations.".format(app_type, scope))
c.argument('os_type', arg_type=get_enum_type(OS_TYPES), help="Set the OS type for the app to be created.")
c.argument('app_insights_key', help="Instrumentation key of App Insights to be added.")
c.argument('app_insights',
help="Name of the existing App Insights project to be added to the {} app. Must be in the ".format(app_type) +
"same resource group.")
c.argument('disable_app_insights', arg_type=get_three_state_flag(return_label=True),
help="Disable creating application insights resource during {} create. No logs will be available.".format(scope))
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-d'], help='The container registry server username.')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'],
help='The container registry server password. Required for private registries.')
if scope == 'functionapp':
c.argument('functions_version', help='The functions app version. NOTE: This will be required starting the next release cycle', arg_type=get_enum_type(FUNCTIONS_VERSIONS))
c.argument('runtime', help='The functions runtime stack. Use "az functionapp list-runtimes" to check supported runtimes and versions')
c.argument('runtime_version',
help='The version of the functions runtime stack. '
'The functions runtime stack. Use "az functionapp list-runtimes" to check supported runtimes and versions')
with self.argument_context('functionapp config hostname') as c:
c.argument('webapp_name', arg_type=functionapp_name_arg_type, id_part='name')
# For commands with shared impl between web app and function app and has output, we apply type validation to avoid confusions
with self.argument_context('functionapp show') as c:
c.argument('name', arg_type=functionapp_name_arg_type)
with self.argument_context('functionapp delete') as c:
c.argument('name', arg_type=functionapp_name_arg_type, local_context_attribute=None)
with self.argument_context('functionapp config appsettings') as c:
c.argument('slot_settings', nargs='+', help="space-separated slot app settings in a format of `<name>=<value>`")
with self.argument_context('logicapp show') as c:
c.argument('name', arg_type=logicapp_name_arg_type)
with self.argument_context('logicapp delete') as c:
c.argument('name', arg_type=logicapp_name_arg_type, local_context_attribute=None)
with self.argument_context('functionapp plan') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name',
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('is_linux', arg_type=get_three_state_flag(return_label=True), required=False,
help='host function app on Linux worker')
c.argument('number_of_workers', options_list=['--number-of-workers', '--min-instances'],
help='The number of workers for the app service plan.')
c.argument('max_burst',
help='The maximum number of elastic workers for the plan.')
c.argument('tags', arg_type=tags_type)
with self.argument_context('functionapp update') as c:
c.argument('plan', required=False, help='The name or resource id of the plan to update the functionapp with.')
c.argument('force', required=False, help='Required if attempting to migrate functionapp from Premium to Consumption --plan.',
action='store_true')
with self.argument_context('functionapp plan create') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name',
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.SET],
scopes=['appservice', 'webapp', 'functionapp']))
c.argument('zone_redundant', options_list=['--zone-redundant', '-z'], help='Enable zone redundancy for high availability. Cannot be changed after plan creation. Minimum instance count is 3.')
c.argument('sku', required=True, help='The SKU of the app service plan. e.g., F1(Free), D1(Shared), B1(Basic Small), '
'B2(Basic Medium), B3(Basic Large), S1(Standard Small), '
'P1V2(Premium V2 Small), PC2 (Premium Container Small), PC3 '
'(Premium Container Medium), PC4 (Premium Container Large), I1 '
'(Isolated Small), I2 (Isolated Medium), I3 (Isolated Large), K1 '
'(Kubernetes).')
with self.argument_context('functionapp plan update') as c:
c.argument('sku', required=False, help='The SKU of the app service plan.')
with self.argument_context('functionapp plan delete') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name',
local_context_attribute=None)
with self.argument_context('functionapp deployment list-publishing-profiles') as c:
c.argument('xml', options_list=['--xml'], required=False, help='retrieves the publishing profile details in XML format')
with self.argument_context('functionapp deployment slot') as c:
c.argument('slot', help='the name of the slot')
# This is set to webapp to simply reuse webapp functions, without rewriting same functions for function apps.
# The help will still show "-n or --name", so it should not be a problem to do it this way
c.argument('webapp', arg_type=functionapp_name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Web/sites'),
help='Name of the function app', id_part='name')
c.argument('auto_swap_slot', help='target slot to auto swap', default='production')
c.argument('disable', help='disable auto swap', action='store_true')
c.argument('target_slot', help="target slot to swap, default to 'production'")
c.argument('preserve_vnet', help="preserve Virtual Network to the slot during swap, default to 'true'",
arg_type=get_three_state_flag(return_label=True))
with self.argument_context('functionapp deployment slot create') as c:
c.argument('configuration_source',
help="source slot to clone configurations from. Use function app's name to refer to the production slot")
c.argument('deployment_container_image_name', options_list=['--deployment-container-image-name', '-i'],
help='Container image name, e.g. publisher/image-name:tag')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-d'],
help='The container registry server password')
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-u'], help='the container registry server username')
with self.argument_context('functionapp deployment slot swap') as c:
c.argument('action',
help="swap types. use 'preview' to apply target slot's settings on the source slot first; use 'swap' to complete it; use 'reset' to reset the swap",
arg_type=get_enum_type(['swap', 'preview', 'reset']))
with self.argument_context('functionapp keys', id_part=None) as c:
c.argument('resource_group_name', arg_type=resource_group_name_type,)
c.argument('name', arg_type=functionapp_name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Web/sites'),
help='Name of the function app')
c.argument('slot', options_list=['--slot', '-s'],
help="The name of the slot. Defaults to the productions slot if not specified")
with self.argument_context('functionapp keys set', id_part=None) as c:
c.argument('key_name', help="Name of the key to set.")
c.argument('key_value', help="Value of the new key. If not provided, a value will be generated.")
c.argument('key_type', help="Type of key.", arg_type=get_enum_type(['systemKey', 'functionKeys', 'masterKey']))
with self.argument_context('functionapp keys delete', id_part=None) as c:
c.argument('key_name', help="Name of the key to set.")
c.argument('key_type', help="Type of key.", arg_type=get_enum_type(['systemKey', 'functionKeys', 'masterKey']))
with self.argument_context('functionapp function', id_part=None) as c:
c.argument('resource_group_name', arg_type=resource_group_name_type,)
c.argument('name', arg_type=functionapp_name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Web/sites'),
help='Name of the function app')
c.argument('function_name', help="Name of the Function")
with self.argument_context('functionapp function keys', id_part=None) as c:
c.argument('slot', options_list=['--slot', '-s'],
help="The name of the slot. Defaults to the productions slot if not specified")
with self.argument_context('functionapp function keys set', id_part=None) as c:
c.argument('key_name', help="Name of the key to set.")
c.argument('key_value', help="Value of the new key. If not provided, a value will be generated.")
with self.argument_context('functionapp function keys delete', id_part=None) as c:
c.argument('key_name', help="Name of the key to set.")
# Access Restriction Commands
for scope in ['webapp', 'functionapp']:
with self.argument_context(scope + ' config access-restriction show') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type))
with self.argument_context(scope + ' config access-restriction add') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type))
c.argument('rule_name', options_list=['--rule-name', '-r'],
help='Name of the access restriction rule to add')
c.argument('priority', options_list=['--priority', '-p'],
help="Priority of the access restriction rule")
c.argument('description', help='Description of the access restriction rule')
c.argument('action', arg_type=get_enum_type(ACCESS_RESTRICTION_ACTION_TYPES),
help="Allow or deny access")
c.argument('ip_address', help="IP address or CIDR range (optional comma separated list of up to 8 ranges)",
validator=validate_ip_address)
c.argument('service_tag', help="Service Tag (optional comma separated list of up to 8 tags)",
validator=validate_service_tag)
c.argument('vnet_name', help="vNet name")
c.argument('subnet', help="Subnet name (requires vNet name) or subnet resource id")
c.argument('ignore_missing_vnet_service_endpoint',
options_list=['--ignore-missing-endpoint', '-i'],
help='Create access restriction rule with checking if the subnet has Microsoft.Web service endpoint enabled',
arg_type=get_three_state_flag(), default=False)
c.argument('scm_site', help='True if access restrictions is added for scm site',
arg_type=get_three_state_flag())
c.argument('vnet_resource_group', help='Resource group of virtual network (default is web app resource group)')
c.argument('http_headers', nargs='+', help="space-separated http headers in a format of `<name>=<value>`")
with self.argument_context(scope + ' config access-restriction remove') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type))
c.argument('rule_name', options_list=['--rule-name', '-r'],
help='Name of the access restriction to remove')
c.argument('ip_address', help="IP address or CIDR range (optional comma separated list of up to 8 ranges)",
validator=validate_ip_address)
c.argument('service_tag', help="Service Tag (optional comma separated list of up to 8 tags)",
validator=validate_service_tag)
c.argument('vnet_name', help="vNet name")
c.argument('subnet', help="Subnet name (requires vNet name) or subnet resource id")
c.argument('scm_site', help='True if access restriction should be removed from scm site',
arg_type=get_three_state_flag())
c.argument('action', arg_type=get_enum_type(ACCESS_RESTRICTION_ACTION_TYPES),
help="Allow or deny access")
with self.argument_context(scope + ' config access-restriction set') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type))
c.argument('use_same_restrictions_for_scm_site',
help="Use same access restrictions for scm site",
arg_type=get_three_state_flag())
# App Service Environment Commands
with self.argument_context('appservice ase show') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
with self.argument_context('appservice ase create') as c:
c.argument('name', options_list=['--name', '-n'], validator=validate_ase_create,
help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.SET],
scopes=['appservice']))
c.argument('kind', options_list=['--kind', '-k'], arg_type=get_enum_type(ASE_KINDS),
default='ASEv2', help="Specify App Service Environment version")
c.argument('subnet', help='Name or ID of existing subnet. To create vnet and/or subnet \
use `az network vnet [subnet] create`')
c.argument('vnet_name', help='Name of the vNet. Mandatory if only subnet name is specified.')
c.argument('virtual_ip_type', arg_type=get_enum_type(ASE_LOADBALANCER_MODES),
help="Specify if app service environment should be accessible from internet")
c.argument('ignore_subnet_size_validation', arg_type=get_three_state_flag(),
help='Do not check if subnet is sized according to recommendations.')
c.argument('ignore_route_table', arg_type=get_three_state_flag(),
help='Configure route table manually. Applies to ASEv2 only.')
c.argument('ignore_network_security_group', arg_type=get_three_state_flag(),
help='Configure network security group manually. Applies to ASEv2 only.')
c.argument('force_route_table', arg_type=get_three_state_flag(),
help='Override route table for subnet. Applies to ASEv2 only.')
c.argument('force_network_security_group', arg_type=get_three_state_flag(),
help='Override network security group for subnet. Applies to ASEv2 only.')
c.argument('front_end_scale_factor', type=int, validator=validate_front_end_scale_factor,
help='Scale of front ends to app service plan instance ratio. Applies to ASEv2 only.', default=15)
c.argument('front_end_sku', arg_type=isolated_sku_arg_type, default='I1',
help='Size of front end servers. Applies to ASEv2 only.')
c.argument('os_preference', arg_type=get_enum_type(ASE_OS_PREFERENCE_TYPES),
help='Determine if app service environment should start with Linux workers. Applies to ASEv2 only.')
c.argument('zone_redundant', arg_type=get_three_state_flag(),
help='Configure App Service Environment as Zone Redundant. Applies to ASEv3 only.')
with self.argument_context('appservice ase delete') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment')
with self.argument_context('appservice ase update') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
c.argument('front_end_scale_factor', type=int, validator=validate_front_end_scale_factor,
help='(ASEv2 only) Scale of front ends to app service plan instance ratio between 5 and 15.')
c.argument('front_end_sku', arg_type=isolated_sku_arg_type,
help='(ASEv2 only) Size of front end servers.')
c.argument('allow_new_private_endpoint_connections', arg_type=get_three_state_flag(),
options_list=['--allow-new-private-endpoint-connections', '-p'],
help='(ASEv3 only) Configure Apps in App Service Environment to allow new private endpoint connections.')
with self.argument_context('appservice ase list-addresses') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
with self.argument_context('appservice ase list-plans') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
with self.argument_context('appservice ase create-inbound-services') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
c.argument('subnet', help='Name or ID of existing subnet for DNS Zone link. \
To create vnet and/or subnet use `az network vnet [subnet] create`')
c.argument('vnet_name', help='Name of the vNet. Mandatory if only subnet name is specified.')
c.argument('skip_dns', arg_type=get_three_state_flag(),
help='Do not create Private DNS Zone and DNS records.',
deprecate_info=c.deprecate(expiration='3.0.0'))
# App Service Domain Commands
with self.argument_context('appservice domain create') as c:
c.argument('hostname', options_list=['--hostname', '-n'], help='Name of the custom domain')
c.argument('contact_info', options_list=['--contact-info', '-c'], help='The file path to a JSON object with your contact info for domain registration. '
'Please see the following link for the format of the JSON file expected: '
'https://github.com/AzureAppServiceCLI/appservice_domains_templates/blob/master/contact_info.json')
c.argument('privacy', options_list=['--privacy', '-p'], help='Enable privacy protection')
c.argument('auto_renew', options_list=['--auto-renew', '-a'], help='Enable auto-renew on the domain')
c.argument('accept_terms', options_list=['--accept-terms'], help='By using this flag, you are accepting '
'the conditions shown using the --show-hostname-purchase-terms flag. ')
c.argument('tags', arg_type=tags_type)
c.argument('dryrun', help='Show summary of the purchase and create operation instead of executing it')
c.argument('no_wait', help='Do not wait for the create to complete, and return immediately after queuing the create.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources')
with self.argument_context('appservice domain show-terms') as c:
c.argument('hostname', options_list=['--hostname', '-n'], help='Name of the custom domain')
with self.argument_context('staticwebapp', validator=validate_public_cloud) as c:
c.argument('source', options_list=['--source', '-s'], help="URL for the repository of the static site.", arg_group="Github")
c.argument('token', options_list=['--token', '-t'], arg_group="Github",
help="A user's GitHub repository token. This is used to setup the Github Actions workflow file and "
"API secrets. If you need to create a Github Personal Access Token, "
"please run with the '--login-with-github' flag or follow the steps found at the following link:\n"
"https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line")
c.argument('login_with_github', help="Interactively log in with Github to retrieve the Personal Access Token", arg_group="Github")
c.argument('branch', options_list=['--branch', '-b'], help="The target branch in the repository.", arg_group="Github")
c.ignore('format_output')
c.argument('name', options_list=['--name', '-n'], metavar='NAME', help="Name of the static site")
with self.argument_context('staticwebapp environment') as c:
c.argument('environment_name',
options_list=['--environment-name'], help="Name of the environment of static site")
with self.argument_context('staticwebapp hostname') as c:
c.argument('hostname',
options_list=['--hostname'],
help="custom hostname such as www.example.com. Only support sub domain in preview.")
with self.argument_context('staticwebapp hostname set') as c:
c.argument('validation_method',
options_list=['--validation-method', '-m'],
help="Validation method for the custom domain.",
arg_type=get_enum_type(["cname-delegation", "dns-txt-token"]))
with self.argument_context('staticwebapp appsettings') as c:
c.argument('setting_pairs', options_list=['--setting-names'],
help="Space-separated app settings in 'key=value' format. ",
nargs='*')
c.argument('setting_names', options_list=['--setting-names'], help="Space-separated app setting names.",
nargs='*')
with self.argument_context('staticwebapp users') as c:
c.argument('authentication_provider', options_list=['--authentication-provider'],
help="Authentication provider of the user identity such as AAD, Facebook, GitHub, Google, Twitter.")
c.argument('user_details', options_list=['--user-details'],
help="Email for AAD, Facebook, and Google. Account name (handle) for GitHub and Twitter.")
c.argument('user_id',
help="Given id of registered user.")
c.argument('domain', options_list=['--domain'],
help="A domain added to the static app in quotes.")
c.argument('roles', options_list=['--roles'],
help="Comma-separated default or user-defined role names. "
"Roles that can be assigned to a user are comma separated and case-insensitive (at most 50 "
"roles up to 25 characters each and restricted to 0-9,A-Z,a-z, and _). "
"Define roles in routes.json during root directory of your GitHub repo.")
c.argument('invitation_expiration_in_hours', options_list=['--invitation-expiration-in-hours'],
help="This value sets when the link will expire in hours. The maximum is 168 (7 days).")
with self.argument_context('staticwebapp identity') as c:
c.argument('scope', help="The scope the managed identity has access to")
c.argument('role', help="Role name or id the managed identity will be assigned")
with self.argument_context('staticwebapp identity assign') as c:
c.argument('assign_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
with self.argument_context('staticwebapp identity remove') as c:
c.argument('remove_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
with self.argument_context('staticwebapp create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', arg_type=tags_type)
c.argument('sku', arg_type=static_web_app_sku_arg_type)
c.argument('app_location', options_list=['--app-location'],
help="Location of your application code. For example, '/' represents the root of your app, "
"while '/app' represents a directory called 'app'")
c.argument('api_location', options_list=['--api-location'],
help="Location of your Azure Functions code. For example, '/api' represents a folder called 'api'.")
c.argument('app_artifact_location', options_list=['--app-artifact-location'],
help="The path of your build output relative to your apps location. For example, setting a value "
"of 'build' when your app location is set to '/app' will cause the content at '/app/build' to "
"be served.",
deprecate_info=c.deprecate(expiration='2.22.1'))
c.argument('output_location', options_list=['--output-location'],
help="The path of your build output relative to your apps location. For example, setting a value "
"of 'build' when your app location is set to '/app' will cause the content at '/app/build' to "
"be served.")
with self.argument_context('staticwebapp update') as c:
c.argument('tags', arg_type=tags_type)
c.argument('sku', arg_type=static_web_app_sku_arg_type)
with self.argument_context('staticwebapp functions link') as c:
c.argument('function_resource_id', help="Resource ID of the functionapp to link. Can be retrieved with 'az functionapp --query id'")
c.argument('force', help="Force the function link even if the function is already linked to a static webapp. May be needed if the function was previously linked to a static webapp.")
with self.argument_context('staticwebapp enterprise-edge') as c:
c.argument("no_register", help="Don't try to register the Microsoft.CDN provider. Registration can be done manually with: az provider register --wait --namespace Microsoft.CDN. For more details, please review the documentation available at https://go.microsoft.com/fwlink/?linkid=2184995 .", default=False)
|
def load_arguments(self, _):
# pylint: disable=line-too-long
# PARAMETER REGISTRATION
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
sku_arg_type = CLIArgumentType(
help='The pricing tiers, e.g., F1(Free), D1(Shared), B1(Basic Small), B2(Basic Medium), B3(Basic Large), S1(Standard Small), P1V2(Premium V2 Small), P1V3(Premium V3 Small), P2V3(Premium V3 Medium), P3V3(Premium V3 Large), PC2 (Premium Container Small), PC3 (Premium Container Medium), PC4 (Premium Container Large), I1 (Isolated Small), I2 (Isolated Medium), I3 (Isolated Large), I1v2 (Isolated V2 Small), I2v2 (Isolated V2 Medium), I3v2 (Isolated V2 Large), WS1 (Logic Apps Workflow Standard 1), WS2 (Logic Apps Workflow Standard 2), WS3 (Logic Apps Workflow Standard 3)',
arg_type=get_enum_type(
['F1', 'FREE', 'D1', 'SHARED', 'B1', 'B2', 'B3', 'S1', 'S2', 'S3', 'P1V2', 'P2V2', 'P3V2', 'P1V3', 'P2V3', 'P3V3', 'PC2', 'PC3',
'PC4', 'I1', 'I2', 'I3', 'I1v2', 'I2v2', 'I3v2', 'WS1', 'WS2', 'WS3']))
webapp_name_arg_type = CLIArgumentType(configured_default='web', options_list=['--name', '-n'], metavar='NAME',
completer=get_resource_name_completion_list('Microsoft.Web/sites'),
id_part='name',
help="name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`",
local_context_attribute=LocalContextAttribute(name='web_name', actions=[
LocalContextAction.GET]))
functionapp_name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME',
help="name of the function app.",
local_context_attribute=LocalContextAttribute(name='functionapp_name',
actions=[
LocalContextAction.GET]))
logicapp_name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME',
help="name of the logic app.",
local_context_attribute=LocalContextAttribute(name='logicapp_name',
actions=[LocalContextAction.GET]))
name_arg_type_dict = {
'functionapp': functionapp_name_arg_type,
'logicapp': logicapp_name_arg_type
}
isolated_sku_arg_type = CLIArgumentType(
help='The Isolated pricing tiers, e.g., I1 (Isolated Small), I2 (Isolated Medium), I3 (Isolated Large)',
arg_type=get_enum_type(['I1', 'I2', 'I3']))
static_web_app_sku_arg_type = CLIArgumentType(
help='The pricing tiers for Static Web App',
arg_type=get_enum_type(['Free', 'Standard'])
)
# use this hidden arg to give a command the right instance, that functionapp commands
# work on function app and webapp ones work on web app
with self.argument_context('webapp') as c:
c.ignore('app_instance')
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('slot', options_list=['--slot', '-s'],
help="the name of the slot. Default to the productions slot if not specified")
c.argument('name', arg_type=webapp_name_arg_type)
with self.argument_context('appservice') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
with self.argument_context('appservice list-locations') as c:
c.argument('linux_workers_enabled', action='store_true',
help='get regions which support hosting web apps on Linux workers')
c.argument('sku', arg_type=sku_arg_type)
with self.argument_context('appservice plan') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name',
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('number_of_workers', help='Number of workers to be allocated.', type=int, default=1)
c.ignore('max_burst')
with self.argument_context('appservice plan create') as c:
c.argument('name', options_list=['--name', '-n'], help="Name of the new app service plan", completer=None,
validator=validate_asp_create,
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.SET],
scopes=['appservice', 'webapp', 'functionapp']))
c.argument('number_of_workers', help='Number of workers to be allocated.', type=int, default=1)
c.argument('app_service_environment', options_list=['--app-service-environment', '-e'],
help="Name or ID of the app service environment",
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
c.argument('sku', arg_type=sku_arg_type)
c.argument('is_linux', action='store_true', required=False, help='host web app on Linux worker')
c.argument('hyper_v', action='store_true', required=False, help='Host web app on Windows container')
c.argument('per_site_scaling', action='store_true', required=False, help='Enable per-app scaling at the '
'App Service plan level to allow for '
'scaling an app independently from '
'the App Service plan that hosts it.')
c.argument('zone_redundant', options_list=['--zone-redundant', '-z'], help='Enable zone redundancy for high availability. Cannot be changed after plan creation. Minimum instance count is 3.')
c.argument('tags', arg_type=tags_type)
with self.argument_context('appservice plan update') as c:
c.argument('sku', arg_type=sku_arg_type)
c.argument('elastic_scale', arg_type=get_three_state_flag(), is_preview=True, help='Enable or disable automatic scaling. Set to "true" to enable elastic scale for this plan, or "false" to disable elastic scale for this plan. The SKU must be a Premium V2 SKU (P1V2, P2V2, P3V2) or a Premium V3 SKU (P1V3, P2V3, P3V3)')
c.argument('max_elastic_worker_count', options_list=['--max-elastic-worker-count', '-m'], type=int, is_preview=True, help='Maximum number of instances that the plan can scale out to. The plan must be an elastic scale plan.')
c.argument('number_of_workers', type=int, help='Number of workers to be allocated.')
c.ignore('allow_pending_state')
with self.argument_context('appservice plan delete') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name', local_context_attribute=None)
with self.argument_context('webapp create') as c:
c.argument('name', options_list=['--name', '-n'], help='name of the new web app',
validator=validate_site_create,
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.SET],
scopes=['webapp', 'cupertino']))
c.argument('startup_file', help="Linux only. The web's startup file")
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-s'], help='the container registry server username')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'], help='The container registry server password. Required for private registries.')
c.argument('multicontainer_config_type', options_list=['--multicontainer-config-type'], help="Linux only.", arg_type=get_enum_type(MULTI_CONTAINER_TYPES))
c.argument('multicontainer_config_file', options_list=['--multicontainer-config-file'], help="Linux only. Config file for multicontainer apps. (local or remote)")
c.argument('runtime', options_list=['--runtime', '-r'], help="canonicalized web runtime in the format of Framework:Version, e.g. \"PHP:7.2\". Allowed delimiters: \"|\" or \":\". If using powershell, please use the \":\" delimiter or be sure to properly escape the \"|\" character. "
"Use `az webapp list-runtimes` for available list") # TODO ADD completer
c.argument('plan', options_list=['--plan', '-p'], configured_default='appserviceplan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
help="name or resource id of the app service plan. Use 'appservice plan create' to get one",
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('vnet', help="Name or resource ID of the regional virtual network. If there are multiple vnets of the same name across different resource groups, use vnet resource id to specify which vnet to use. If vnet name is used, by default, the vnet in the same resource group as the webapp will be used. Must be used with --subnet argument.")
c.argument('subnet', help="Name or resource ID of the pre-existing subnet to have the webapp join. The --vnet is argument also needed if specifying subnet by name.")
c.argument('https_only', help="Redirect all traffic made to an app using HTTP to HTTPS.",
arg_type=get_three_state_flag(return_label=True))
c.ignore('language')
c.ignore('using_webapp_up')
with self.argument_context('webapp show') as c:
c.argument('name', arg_type=webapp_name_arg_type)
with self.argument_context('webapp list-instances') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('slot', options_list=['--slot', '-s'], help='Name of the web app slot. Default to the productions slot if not specified.')
with self.argument_context('webapp list-runtimes') as c:
c.argument('linux', action='store_true', help='list runtime stacks for linux based web apps', deprecate_info=c.deprecate(redirect="--os-type"))
c.argument('os_type', options_list=["--os", "--os-type"], help="limit the output to just windows or linux runtimes", arg_type=get_enum_type([LINUX_OS_NAME, WINDOWS_OS_NAME]))
with self.argument_context('functionapp list-runtimes') as c:
c.argument('os_type', options_list=["--os", "--os-type"], help="limit the output to just windows or linux runtimes", arg_type=get_enum_type([LINUX_OS_NAME, WINDOWS_OS_NAME]))
with self.argument_context('webapp deleted list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('slot', options_list=['--slot', '-s'], help='Name of the deleted web app slot.')
with self.argument_context('webapp deleted restore') as c:
c.argument('deleted_id', options_list=['--deleted-id'], help='Resource ID of the deleted web app')
c.argument('name', options_list=['--name', '-n'], help='name of the web app to restore the deleted content to')
c.argument('slot', options_list=['--slot', '-s'], help='slot to restore the deleted content to')
c.argument('restore_content_only', action='store_true',
help='restore only deleted files without web app settings')
with self.argument_context('webapp traffic-routing') as c:
c.argument('distribution', options_list=['--distribution', '-d'], nargs='+',
help='space-separated slot routings in a format of `<slot-name>=<percentage>` e.g. staging=50. Unused traffic percentage will go to the Production slot')
with self.argument_context('webapp update') as c:
c.argument('client_affinity_enabled', help="Enables sending session affinity cookies.",
arg_type=get_three_state_flag(return_label=True))
c.argument('https_only', help="Redirect all traffic made to an app using HTTP to HTTPS.",
arg_type=get_three_state_flag(return_label=True))
c.argument('force_dns_registration', help="If true, web app hostname is force registered with DNS",
arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0'))
c.argument('skip_custom_domain_verification',
help="If true, custom (non *.azurewebsites.net) domains associated with web app are not verified",
arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0'))
c.argument('ttl_in_seconds', help="Time to live in seconds for web app's default domain name",
arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0'))
c.argument('skip_dns_registration', help="If true web app hostname is not registered with DNS on creation",
arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration='3.0.0'))
c.argument('minimum_elastic_instance_count', options_list=["--minimum-elastic-instance-count", "-i"], type=int, is_preview=True, help="Minimum number of instances. App must be in an elastic scale App Service Plan.")
c.argument('prewarmed_instance_count', options_list=["--prewarmed-instance-count", "-w"], type=int, is_preview=True, help="Number of preWarmed instances. App must be in an elastic scale App Service Plan.")
with self.argument_context('webapp browse') as c:
c.argument('logs', options_list=['--logs', '-l'], action='store_true',
help='Enable viewing the log stream immediately after launching the web app')
with self.argument_context('webapp delete') as c:
c.argument('name', arg_type=webapp_name_arg_type, local_context_attribute=None)
c.argument('keep_empty_plan', action='store_true', help='keep empty app service plan')
c.argument('keep_metrics', action='store_true', help='keep app metrics')
c.argument('keep_dns_registration', action='store_true', help='keep DNS registration',
deprecate_info=c.deprecate(expiration='3.0.0'))
with self.argument_context('webapp webjob') as c:
c.argument('webjob_name', help='The name of the webjob', options_list=['--webjob-name', '-w'])
with self.argument_context('webapp webjob continuous list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
with self.argument_context('webapp webjob triggered list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
for scope in ['webapp', 'functionapp', 'logicapp']:
with self.argument_context(scope + ' create') as c:
c.argument('deployment_container_image_name', options_list=['--deployment-container-image-name', '-i'],
help='Container image name from Docker Hub, e.g. publisher/image-name:tag')
c.argument('deployment_local_git', action='store_true', options_list=['--deployment-local-git', '-l'],
help='enable local git')
c.argument('deployment_zip', options_list=['--deployment-zip', '-z'],
help='perform deployment using zip file')
c.argument('deployment_source_url', options_list=['--deployment-source-url', '-u'],
help='Git repository URL to link with manual integration')
c.argument('deployment_source_branch', options_list=['--deployment-source-branch', '-b'],
help='the branch to deploy')
c.argument('tags', arg_type=tags_type)
for scope in ['webapp', 'functionapp']:
with self.argument_context(scope) as c:
c.argument('assign_identities', nargs='*', options_list=['--assign-identity'],
help='accept system or user assigned identities separated by spaces. Use \'[system]\' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples')
c.argument('scope', options_list=['--scope'], help="Scope that the system assigned identity can access")
c.argument('role', options_list=['--role'], help="Role name or id the system assigned identity will have")
with self.argument_context(scope + ' config ssl bind') as c:
c.argument('ssl_type', help='The ssl cert type', arg_type=get_enum_type(['SNI', 'IP']))
with self.argument_context(scope + ' config ssl upload') as c:
c.argument('certificate_password', help='The ssl cert password')
c.argument('certificate_file', type=file_type, help='The filepath for the .pfx file')
c.argument('slot', options_list=['--slot', '-s'],
help='The name of the slot. Default to the productions slot if not specified')
with self.argument_context(scope + ' config ssl') as c:
c.argument('certificate_thumbprint', help='The ssl cert thumbprint')
with self.argument_context(scope + ' config appsettings') as c:
c.argument('settings', nargs='+', help="space-separated app settings in a format of `<name>=<value>`")
c.argument('setting_names', nargs='+', help="space-separated app setting names")
with self.argument_context(scope + ' config ssl import') as c:
c.argument('key_vault', help='The name or resource ID of the Key Vault')
c.argument('key_vault_certificate_name', help='The name of the certificate in Key Vault')
with self.argument_context(scope + ' config ssl create') as c:
c.argument('hostname', help='The custom domain name')
c.argument('name', options_list=['--name', '-n'], help='Name of the web app.')
c.argument('resource-group', options_list=['--resource-group', '-g'], help='Name of resource group.')
with self.argument_context(scope + ' config ssl show') as c:
c.argument('certificate_name', help='The name of the certificate')
with self.argument_context(scope + ' config hostname') as c:
c.argument('hostname', completer=get_hostname_completion_list,
help="hostname assigned to the site, such as custom domains", id_part='child_name_1')
with self.argument_context(scope + ' deployment user') as c:
c.argument('user_name', help='user name')
c.argument('password', help='password, will prompt if not specified')
with self.argument_context(scope + ' deployment source') as c:
c.argument('manual_integration', action='store_true',
help='disable automatic sync between source control and web')
c.argument('repo_url', options_list=['--repo-url', '-u'],
help='repository url to pull the latest source from, e.g. https://github.com/foo/foo-web')
c.argument('branch', help='the branch name of the repository')
c.argument('repository_type', help='repository type',
arg_type=get_enum_type(['git', 'mercurial', 'github', 'externalgit', 'localgit']))
c.argument('git_token', help='Git access token required for auto sync')
c.argument('github_action', options_list=['--github-action'], help='If using GitHub action, default to False')
with self.argument_context(scope + ' identity') as c:
c.argument('scope', help="The scope the managed identity has access to")
c.argument('role', help="Role name or id the managed identity will be assigned")
with self.argument_context(scope + ' identity assign') as c:
c.argument('assign_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
with self.argument_context(scope + ' identity remove') as c:
c.argument('remove_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
with self.argument_context(scope + ' deployment source config-zip') as c:
c.argument('src', help='a zip file path for deployment')
c.argument('build_remote', help='enable remote build during deployment',
arg_type=get_three_state_flag(return_label=True))
c.argument('timeout', type=int, options_list=['--timeout', '-t'],
help='Configurable timeout in seconds for checking the status of deployment',
validator=validate_timeout_value)
with self.argument_context(scope + ' config appsettings list') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type),
id_part=None)
with self.argument_context(scope + ' config hostname list') as c:
c.argument('webapp_name', arg_type=webapp_name_arg_type, id_part=None, options_list='--webapp-name')
with self.argument_context(scope + ' cors') as c:
c.argument('allowed_origins', options_list=['--allowed-origins', '-a'], nargs='*',
help='space separated origins that should be allowed to make cross-origin calls (for example: http://example.com:12345). To allow all, use "*" and remove all other origins from the list')
with self.argument_context(scope + ' config set') as c:
c.argument('number_of_workers', help='The number of workers to be allocated.', type=int)
c.argument('remote_debugging_enabled', help='enable or disable remote debugging',
arg_type=get_three_state_flag(return_label=True))
c.argument('web_sockets_enabled', help='enable or disable web sockets',
arg_type=get_three_state_flag(return_label=True))
c.argument('always_on',
help='ensure web app gets loaded all the time, rather unloaded after been idle. Recommended when you have continuous web jobs running',
arg_type=get_three_state_flag(return_label=True))
c.argument('auto_heal_enabled', help='enable or disable auto heal',
arg_type=get_three_state_flag(return_label=True))
c.argument('use32_bit_worker_process', options_list=['--use-32bit-worker-process'],
help='use 32 bits worker process or not', arg_type=get_three_state_flag(return_label=True))
c.argument('php_version', help='The version used to run your web app if using PHP, e.g., 5.5, 5.6, 7.0')
c.argument('python_version', help='The version used to run your web app if using Python, e.g., 2.7, 3.4')
c.argument('net_framework_version', help="The version used to run your web app if using .NET Framework, e.g., 'v4.0' for .NET 4.6 and 'v3.0' for .NET 3.5")
c.argument('linux_fx_version', help="The runtime stack used for your linux-based webapp, e.g., \"RUBY|2.5.5\", \"NODE|12LTS\", \"PHP|7.2\", \"DOTNETCORE|2.1\". See https://aka.ms/linux-stacks for more info.")
c.argument('windows_fx_version', help="A docker image name used for your windows container web app, e.g., microsoft/nanoserver:ltsc2016")
if scope == 'functionapp':
c.ignore('windows_fx_version')
c.argument('pre_warmed_instance_count', options_list=['--prewarmed-instance-count'],
help="Number of pre-warmed instances a function app has")
if scope == 'webapp':
c.ignore('reserved_instance_count')
c.argument('java_version',
help="The version used to run your web app if using Java, e.g., '1.7' for Java 7, '1.8' for Java 8")
c.argument('java_container', help="The java container, e.g., Tomcat, Jetty")
c.argument('java_container_version', help="The version of the java container, e.g., '8.0.23' for Tomcat")
c.argument('min_tls_version',
help="The minimum version of TLS required for SSL requests, e.g., '1.0', '1.1', '1.2'")
c.argument('http20_enabled', help="configures a web site to allow clients to connect over http2.0.",
arg_type=get_three_state_flag(return_label=True))
c.argument('app_command_line', options_list=['--startup-file'],
help="The startup file for linux hosted web apps, e.g. 'process.json' for Node.js web")
c.argument('ftps_state', help="Set the Ftps state value for an app. Default value is 'AllAllowed'.",
arg_type=get_enum_type(FTPS_STATE_TYPES))
c.argument('vnet_route_all_enabled', help="Configure regional VNet integration to route all traffic to the VNet.",
arg_type=get_three_state_flag(return_label=True))
c.argument('generic_configurations', nargs='+',
help='Provide site configuration list in a format of either `key=value` pair or `@<json_file>`. PowerShell and Windows Command Prompt users should use a JSON file to provide these configurations to avoid compatibility issues with escape characters.')
with self.argument_context(scope + ' config container') as c:
c.argument('docker_registry_server_url', options_list=['--docker-registry-server-url', '-r'],
help='the container registry server url')
c.argument('docker_custom_image_name', options_list=['--docker-custom-image-name', '-c', '-i'],
help='the container custom image name and optionally the tag name (e.g., <registry-name>/<image-name>:<tag>)')
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-u'],
help='the container registry server username')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-p'],
help='the container registry server password')
c.argument('websites_enable_app_service_storage', options_list=['--enable-app-service-storage', '-t'],
help='enables platform storage (custom container only)',
arg_type=get_three_state_flag(return_label=True))
c.argument('multicontainer_config_type', options_list=['--multicontainer-config-type'], help='config type',
arg_type=get_enum_type(MULTI_CONTAINER_TYPES))
c.argument('multicontainer_config_file', options_list=['--multicontainer-config-file'],
help="config file for multicontainer apps")
c.argument('show_multicontainer_config', action='store_true',
help='shows decoded config if a multicontainer config is set')
with self.argument_context(scope + ' deployment container config') as c:
c.argument('enable', options_list=['--enable-cd', '-e'], help='enable/disable continuous deployment',
arg_type=get_three_state_flag(return_label=True))
with self.argument_context('webapp config connection-string list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
with self.argument_context('webapp config storage-account list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
with self.argument_context('webapp config hostname') as c:
c.argument('webapp_name',
help="webapp name. You can configure the default using `az configure --defaults web=<name>`",
configured_default='web',
completer=get_resource_name_completion_list('Microsoft.Web/sites'), id_part='name',
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET]))
with self.argument_context('webapp deployment list-publishing-profiles') as c:
c.argument('xml', options_list=['--xml'], required=False, help='retrieves the publishing profile details in XML format')
with self.argument_context('webapp deployment slot') as c:
c.argument('slot', help='the name of the slot')
c.argument('webapp', arg_type=name_arg_type, completer=get_resource_name_completion_list('Microsoft.Web/sites'),
help='Name of the webapp', id_part='name',
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET]))
c.argument('auto_swap_slot', help='target slot to auto swap', default='production')
c.argument('disable', help='disable auto swap', action='store_true')
c.argument('target_slot', help="target slot to swap, default to 'production'")
c.argument('preserve_vnet', help="preserve Virtual Network to the slot during swap, default to 'true'",
arg_type=get_three_state_flag(return_label=True))
with self.argument_context('webapp deployment slot create') as c:
c.argument('configuration_source',
help="source slot to clone configurations from. Use web app's name to refer to the production slot")
c.argument('deployment_container_image_name', options_list=['--deployment-container-image-name', '-i'],
help='Container image name, e.g. publisher/image-name:tag')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'],
help='The container registry server password')
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-u'], help='the container registry server username')
with self.argument_context('webapp deployment slot swap') as c:
c.argument('action',
help="swap types. use 'preview' to apply target slot's settings on the source slot first; use 'swap' to complete it; use 'reset' to reset the swap",
arg_type=get_enum_type(['swap', 'preview', 'reset']))
with self.argument_context('webapp deployment github-actions')as c:
c.argument('name', arg_type=webapp_name_arg_type)
c.argument('resource_group', arg_type=resource_group_name_type, options_list=['--resource-group', '-g'])
c.argument('repo', help='The GitHub repository to which the workflow file will be added. In the format: <owner>/<repository-name>')
c.argument('token', help='A Personal Access Token with write access to the specified repository. For more information: https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line')
c.argument('slot', options_list=['--slot', '-s'], help='The name of the slot. Default to the production slot if not specified.')
c.argument('branch', options_list=['--branch', '-b'], help='The branch to which the workflow file will be added. Defaults to "master" if not specified.')
c.argument('login_with_github', help='Interactively log in with GitHub to retrieve the Personal Access Token', action='store_true')
with self.argument_context('webapp deployment github-actions add')as c:
c.argument('runtime', options_list=['--runtime', '-r'], help='Canonicalized web runtime in the format of Framework|Version, e.g. "PHP|5.6". Use "az webapp list-runtimes" for available list.')
c.argument('force', options_list=['--force', '-f'], help='When true, the command will overwrite any workflow file with a conflicting name.', action='store_true')
with self.argument_context('webapp log config') as c:
c.argument('application_logging', help='configure application logging',
arg_type=get_enum_type(['filesystem', 'azureblobstorage', 'off']))
c.argument('detailed_error_messages', help='configure detailed error messages',
arg_type=get_three_state_flag(return_label=True))
c.argument('failed_request_tracing', help='configure failed request tracing',
arg_type=get_three_state_flag(return_label=True))
c.argument('level', help='logging level',
arg_type=get_enum_type(['error', 'warning', 'information', 'verbose']))
c.argument('web_server_logging', help='configure Web server logging',
arg_type=get_enum_type(['off', 'filesystem']))
c.argument('docker_container_logging', help='configure gathering STDOUT and STDERR output from container',
arg_type=get_enum_type(['off', 'filesystem']))
with self.argument_context('webapp log tail') as c:
c.argument('provider',
help="By default all live traces configured by `az webapp log config` will be shown, but you can scope to certain providers/folders, e.g. 'application', 'http', etc. For details, check out https://github.com/projectkudu/kudu/wiki/Diagnostic-Log-Stream")
with self.argument_context('webapp log download') as c:
c.argument('log_file', default='webapp_logs.zip', type=file_type, completer=FilesCompleter(),
help='the downloaded zipped log file path')
with self.argument_context('webapp log deployment show') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified")
c.argument('deployment_id', options_list=['--deployment-id'], help='Deployment ID. If none specified, returns the deployment logs of the latest deployment.')
with self.argument_context('webapp log deployment list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified")
with self.argument_context('functionapp log deployment show') as c:
c.argument('name', arg_type=functionapp_name_arg_type, id_part=None)
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified")
c.argument('deployment_id', options_list=['--deployment-id'], help='Deployment ID. If none specified, returns the deployment logs of the latest deployment.')
with self.argument_context('functionapp log deployment list') as c:
c.argument('name', arg_type=functionapp_name_arg_type, id_part=None)
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('slot', options_list=['--slot', '-s'], help="the name of the slot. Default to the productions slot if not specified")
for scope in ['appsettings', 'connection-string']:
with self.argument_context('webapp config ' + scope) as c:
c.argument('settings', nargs='+', help="space-separated {} in a format of `<name>=<value>`".format(scope))
c.argument('slot_settings', nargs='+',
help="space-separated slot {} in a format of either `<name>=<value>` or `@<json_file>`".format(
scope))
c.argument('setting_names', nargs='+', help="space-separated {} names".format(scope))
with self.argument_context('webapp config connection-string') as c:
c.argument('connection_string_type', options_list=['--connection-string-type', '-t'],
help='connection string type', arg_type=get_enum_type(ConnectionStringType))
c.argument('ids', options_list=['--ids'],
help="One or more resource IDs (space delimited). If provided no other 'Resource Id' arguments should be specified.",
required=True)
c.argument('resource_group', options_list=['--resource-group', '-g'],
help='Name of resource group. You can configure the default group using `az configure --default-group=<name>`. If `--ids` is provided this should NOT be specified.')
c.argument('name', options_list=['--name', '-n'],
help='Name of the web app. You can configure the default using `az configure --defaults web=<name>`. If `--ids` is provided this should NOT be specified.',
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET]))
with self.argument_context('webapp config storage-account') as c:
c.argument('custom_id', options_list=['--custom-id', '-i'], help='name of the share configured within the web app')
c.argument('storage_type', options_list=['--storage-type', '-t'], help='storage type',
arg_type=get_enum_type(AzureStorageType))
c.argument('account_name', options_list=['--account-name', '-a'], help='storage account name')
c.argument('share_name', options_list=['--share-name', '--sn'],
help='name of the file share as given in the storage account')
c.argument('access_key', options_list=['--access-key', '-k'], help='storage account access key')
c.argument('mount_path', options_list=['--mount-path', '-m'],
help='the path which the web app uses to read-write data ex: /share1 or /share2')
c.argument('slot', options_list=['--slot', '-s'],
help="the name of the slot. Default to the productions slot if not specified")
with self.argument_context('webapp config storage-account add') as c:
c.argument('slot_setting', options_list=['--slot-setting'], help="slot setting")
with self.argument_context('webapp config storage-account update') as c:
c.argument('slot_setting', options_list=['--slot-setting'], help="slot setting")
with self.argument_context('webapp config backup') as c:
c.argument('storage_account_url', help='URL with SAS token to the blob storage container',
options_list=['--container-url'])
c.argument('webapp_name', help='The name of the web app',
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET]))
c.argument('db_name', help='Name of the database in the backup', arg_group='Database')
c.argument('db_connection_string', help='Connection string for the database in the backup',
arg_group='Database')
c.argument('db_type', help='Type of database in the backup', arg_group='Database',
arg_type=get_enum_type(DatabaseType))
with self.argument_context('webapp config backup create') as c:
c.argument('backup_name',
help='Name of the backup. If unspecified, the backup will be named with the web app name and a timestamp',
local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.SET],
scopes=['webapp']))
with self.argument_context('webapp config backup update') as c:
c.argument('backup_name',
help='Name of the backup. If unspecified, the backup will be named with the web app name and a timestamp',
local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.GET]))
c.argument('frequency',
help='How often to backup. Use a number followed by d or h, e.g. 5d = 5 days, 2h = 2 hours')
c.argument('keep_at_least_one_backup', help='Always keep one backup, regardless of how old it is',
options_list=['--retain-one'], arg_type=get_three_state_flag(return_label=True))
c.argument('retention_period_in_days',
help='How many days to keep a backup before automatically deleting it. Set to 0 for indefinite retention',
options_list=['--retention'])
with self.argument_context('webapp config backup restore') as c:
c.argument('backup_name', help='Name of the backup to restore',
local_context_attribute=LocalContextAttribute(name='backup_name', actions=[LocalContextAction.GET]))
c.argument('target_name',
help='The name to use for the restored web app. If unspecified, will default to the name that was used when the backup was created')
c.argument('overwrite', help='Overwrite the source web app, if --target-name is not specified',
action='store_true')
c.argument('ignore_hostname_conflict', help='Ignores custom hostnames stored in the backup',
action='store_true')
with self.argument_context('webapp config snapshot') as c:
c.argument('name', arg_type=webapp_name_arg_type)
c.argument('slot', options_list=['--slot', '-s'], help='The name of the slot.')
with self.argument_context('webapp config snapshot list') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
with self.argument_context('webapp config snapshot restore') as c:
c.argument('time', help='Timestamp of the snapshot to restore.')
c.argument('restore_content_only', help='Restore the web app files without restoring the settings.')
c.argument('source_resource_group', help='Name of the resource group to retrieve snapshot from.')
c.argument('source_name', help='Name of the web app to retrieve snapshot from.')
c.argument('source_slot', help='Name of the web app slot to retrieve snapshot from.')
with self.argument_context('webapp auth update') as c:
c.argument('enabled', arg_type=get_three_state_flag(return_label=True))
c.argument('token_store_enabled', options_list=['--token-store'],
arg_type=get_three_state_flag(return_label=True), help='use App Service Token Store')
c.argument('action', arg_type=get_enum_type(AUTH_TYPES))
c.argument('runtime_version',
help='Runtime version of the Authentication/Authorization feature in use for the current app')
c.argument('token_refresh_extension_hours', type=float, help="Hours, must be formattable into a float")
c.argument('allowed_external_redirect_urls', nargs='+', help="One or more urls (space-delimited).")
c.argument('client_id', options_list=['--aad-client-id'], arg_group='Azure Active Directory',
help='Application ID to integrate AAD organization account Sign-in into your web app')
c.argument('client_secret', options_list=['--aad-client-secret'], arg_group='Azure Active Directory',
help='AAD application secret')
c.argument('client_secret_certificate_thumbprint', options_list=['--aad-client-secret-certificate-thumbprint', '--thumbprint'], arg_group='Azure Active Directory',
help='Alternative to AAD Client Secret, thumbprint of a certificate used for signing purposes')
c.argument('allowed_audiences', nargs='+', options_list=['--aad-allowed-token-audiences'],
arg_group='Azure Active Directory', help="One or more token audiences (comma-delimited).")
c.argument('issuer', options_list=['--aad-token-issuer-url'],
help='This url can be found in the JSON output returned from your active directory endpoint using your tenantID. The endpoint can be queried from `az cloud show` at \"endpoints.activeDirectory\". '
'The tenantID can be found using `az account show`. Get the \"issuer\" from the JSON at <active directory endpoint>/<tenantId>/.well-known/openid-configuration.',
arg_group='Azure Active Directory')
c.argument('facebook_app_id', arg_group='Facebook',
help="Application ID to integrate Facebook Sign-in into your web app")
c.argument('facebook_app_secret', arg_group='Facebook', help='Facebook Application client secret')
c.argument('facebook_oauth_scopes', nargs='+',
help="One or more facebook authentication scopes (comma-delimited).", arg_group='Facebook')
c.argument('twitter_consumer_key', arg_group='Twitter',
help='Application ID to integrate Twitter Sign-in into your web app')
c.argument('twitter_consumer_secret', arg_group='Twitter', help='Twitter Application client secret')
c.argument('google_client_id', arg_group='Google',
help='Application ID to integrate Google Sign-in into your web app')
c.argument('google_client_secret', arg_group='Google', help='Google Application client secret')
c.argument('google_oauth_scopes', nargs='+', help="One or more Google authentication scopes (space-delimited).",
arg_group='Google')
c.argument('microsoft_account_client_id', arg_group='Microsoft',
help="AAD V2 Application ID to integrate Microsoft account Sign-in into your web app")
c.argument('microsoft_account_client_secret', arg_group='Microsoft', help='AAD V2 Application client secret')
c.argument('microsoft_account_oauth_scopes', nargs='+',
help="One or more Microsoft authentification scopes (comma-delimited).", arg_group='Microsoft')
with self.argument_context('webapp hybrid-connection') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('slot', help="the name of the slot. Default to the productions slot if not specified")
c.argument('namespace', help="Hybrid connection namespace")
c.argument('hybrid_connection', help="Hybrid connection name")
with self.argument_context('functionapp hybrid-connection') as c:
c.argument('name', id_part=None, local_context_attribute=LocalContextAttribute(name='functionapp_name',
actions=[
LocalContextAction.GET]))
c.argument('slot', help="the name of the slot. Default to the productions slot if not specified")
c.argument('namespace', help="Hybrid connection namespace")
c.argument('hybrid_connection', help="Hybrid connection name")
with self.argument_context('appservice hybrid-connection set-key') as c:
c.argument('plan', help="AppService plan",
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('namespace', help="Hybrid connection namespace")
c.argument('hybrid_connection', help="Hybrid connection name")
c.argument('key_type', help="Which key (primary or secondary) should be used")
with self.argument_context('appservice vnet-integration list') as c:
c.argument('plan', help="AppService plan",
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('resource_group', arg_type=resource_group_name_type)
with self.argument_context('webapp up') as c:
c.argument('name', arg_type=webapp_name_arg_type,
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET,
LocalContextAction.SET],
scopes=['webapp', 'cupertino']))
c.argument('plan', options_list=['--plan', '-p'],
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
help="name of the app service plan associated with the webapp",
configured_default='appserviceplan')
c.argument('sku', arg_type=sku_arg_type)
c.argument('os_type', options_list=['--os-type'], arg_type=get_enum_type(OS_TYPES), help="Set the OS type for the app to be created.")
c.argument('runtime', options_list=['--runtime', '-r'], help="canonicalized web runtime in the format of Framework:Version, e.g. \"PHP:7.2\". Allowed delimiters: \"|\" or \":\". If using powershell, please use the \":\" delimiter or be sure to properly escape the \"|\" character. "
"Use `az webapp list-runtimes` for available list.")
c.argument('dryrun', help="show summary of the create and deploy operation instead of executing it",
default=False, action='store_true')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('launch_browser', help="Launch the created app using the default browser", default=False,
action='store_true', options_list=['--launch-browser', '-b'])
c.argument('logs',
help="Configure default logging required to enable viewing log stream immediately after launching the webapp",
default=False, action='store_true')
c.argument('html', help="Ignore app detection and deploy as an html app", default=False, action='store_true')
c.argument('app_service_environment', options_list=['--app-service-environment', '-e'], help='name or resource ID of the (pre-existing) App Service Environment to deploy to. Requires an Isolated V2 sku [I1v2, I2v2, I3v2]')
with self.argument_context('webapp ssh') as c:
c.argument('port', options_list=['--port', '-p'],
help='Port for the remote connection. Default: Random available port', type=int)
c.argument('timeout', options_list=['--timeout', '-t'], help='timeout in seconds. Defaults to none', type=int)
c.argument('instance', options_list=['--instance', '-i'], help='Webapp instance to connect to. Defaults to none.')
with self.argument_context('webapp create-remote-connection') as c:
c.argument('port', options_list=['--port', '-p'],
help='Port for the remote connection. Default: Random available port', type=int)
c.argument('timeout', options_list=['--timeout', '-t'], help='timeout in seconds. Defaults to none', type=int)
c.argument('instance', options_list=['--instance', '-i'], help='Webapp instance to connect to. Defaults to none.')
with self.argument_context('webapp vnet-integration') as c:
c.argument('name', arg_type=webapp_name_arg_type, id_part=None)
c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.")
c.argument('vnet', help="The name or resource ID of the Vnet",
local_context_attribute=LocalContextAttribute(name='vnet_name', actions=[LocalContextAction.GET]))
c.argument('subnet', help="The name or resource ID of the subnet",
local_context_attribute=LocalContextAttribute(name='subnet_name', actions=[LocalContextAction.GET]))
c.argument('skip_delegation_check', help="Skip check if you do not have permission or the VNet is in another subscription.",
arg_type=get_three_state_flag(return_label=True))
with self.argument_context('webapp deploy') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the webapp to deploy to.')
c.argument('src_path', options_list=['--src-path'], help='Path of the artifact to be deployed. Ex: "myapp.zip" or "/myworkspace/apps/myapp.war"')
c.argument('src_url', options_list=['--src-url'], help='URL of the artifact. The webapp will pull the artifact from this URL. Ex: "http://mysite.com/files/myapp.war?key=123"')
c.argument('target_path', options_list=['--target-path'], help='Absolute path that the artifact should be deployed to. Defaults to "home/site/wwwroot/" Ex: "/home/site/deployments/tools/", "/home/site/scripts/startup-script.sh".')
c.argument('artifact_type', options_list=['--type'], help='Used to override the type of artifact being deployed.', choices=['war', 'jar', 'ear', 'lib', 'startup', 'static', 'zip'])
c.argument('is_async', options_list=['--async'], help='If true, the artifact is deployed asynchronously. (The command will exit once the artifact is pushed to the web app.)', choices=['true', 'false'])
c.argument('restart', options_list=['--restart'], help='If true, the web app will be restarted following the deployment. Set this to false if you are deploying multiple artifacts and do not want to restart the site on the earlier deployments.', choices=['true', 'false'])
c.argument('clean', options_list=['--clean'], help='If true, cleans the target directory prior to deploying the file(s). Default value is determined based on artifact type.', choices=['true', 'false'])
c.argument('ignore_stack', options_list=['--ignore-stack'], help='If true, any stack-specific defaults are ignored.', choices=['true', 'false'])
c.argument('timeout', options_list=['--timeout'], help='Timeout for the deployment operation in milliseconds.')
c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.")
with self.argument_context('functionapp deploy') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the function app to deploy to.')
c.argument('src_path', options_list=['--src-path'], help='Path of the artifact to be deployed. Ex: "myapp.zip" or "/myworkspace/apps/myapp.war"')
c.argument('src_url', options_list=['--src-url'], help='URL of the artifact. The webapp will pull the artifact from this URL. Ex: "http://mysite.com/files/myapp.war?key=123"')
c.argument('target_path', options_list=['--target-path'], help='Absolute path that the artifact should be deployed to. Defaults to "home/site/wwwroot/". Ex: "/home/site/deployments/tools/", "/home/site/scripts/startup-script.sh".')
c.argument('artifact_type', options_list=['--type'], help='Used to override the type of artifact being deployed.', choices=['war', 'jar', 'ear', 'lib', 'startup', 'static', 'zip'])
c.argument('is_async', options_list=['--async'], help='Asynchronous deployment', choices=['true', 'false'])
c.argument('restart', options_list=['--restart'], help='If true, the web app will be restarted following the deployment, default value is true. Set this to false if you are deploying multiple artifacts and do not want to restart the site on the earlier deployments.', choices=['true', 'false'])
c.argument('clean', options_list=['--clean'], help='If true, cleans the target directory prior to deploying the file(s). Default value is determined based on artifact type.', choices=['true', 'false'])
c.argument('ignore_stack', options_list=['--ignore-stack'], help='If true, any stack-specific defaults are ignored.', choices=['true', 'false'])
c.argument('timeout', options_list=['--timeout'], help='Timeout for the deployment operation in milliseconds.')
c.argument('slot', help="The name of the slot. Default to the productions slot if not specified.")
with self.argument_context('functionapp create') as c:
c.argument('vnet', options_list=['--vnet'], help="Name or resource ID of the regional virtual network. If there are multiple vnets of the same name across different resource groups, use vnet resource id to specify which vnet to use. If vnet name is used, by default, the vnet in the same resource group as the webapp will be used. Must be used with --subnet argument.")
c.argument('subnet', options_list=['--subnet'], help="Name or resource ID of the pre-existing subnet to have the webapp join. The --vnet is argument also needed if specifying subnet by name.")
with self.argument_context('functionapp vnet-integration') as c:
c.argument('name', arg_type=functionapp_name_arg_type, id_part=None)
c.argument('slot', help="The name of the slot. Default to the productions slot if not specified")
c.argument('vnet', help="The name or resource ID of the Vnet",
local_context_attribute=LocalContextAttribute(name='vnet_name', actions=[LocalContextAction.GET]))
c.argument('subnet', help="The name or resource ID of the subnet",
local_context_attribute=LocalContextAttribute(name='subnet_name', actions=[LocalContextAction.GET]))
c.argument('skip_delegation_check', help="Skip check if you do not have permission or the VNet is in another subscription.",
arg_type=get_three_state_flag(return_label=True))
for scope in ['functionapp', 'logicapp']:
app_type = scope[:-3] # 'function' or 'logic'
with self.argument_context(scope) as c:
c.ignore('app_instance')
c.argument('name', arg_type=name_arg_type_dict[scope], id_part='name', help='name of the {} app'.format(app_type))
c.argument('slot', options_list=['--slot', '-s'],
help="the name of the slot. Default to the productions slot if not specified")
with self.argument_context(scope + ' create') as c:
c.argument('plan', options_list=['--plan', '-p'], configured_default='appserviceplan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
help="name or resource id of the {} app service plan. Use 'appservice plan create' to get one. If using an App Service plan from a different resource group, the full resource id must be used and not the plan name.".format(scope),
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('name', options_list=['--name', '-n'], help='name of the new {} app'.format(app_type),
local_context_attribute=LocalContextAttribute(name=scope + '_name', actions=[LocalContextAction.SET], scopes=[scope]))
c.argument('storage_account', options_list=['--storage-account', '-s'],
help='Provide a string value of a Storage Account in the provided Resource Group. Or Resource ID of a Storage Account in a different Resource Group',
local_context_attribute=LocalContextAttribute(name='storage_account_name', actions=[LocalContextAction.GET]))
c.argument('consumption_plan_location', options_list=['--consumption-plan-location', '-c'],
help="Geographic location where {} app will be hosted. Use `az {} list-consumption-locations` to view available locations.".format(app_type, scope))
c.argument('os_type', arg_type=get_enum_type(OS_TYPES), help="Set the OS type for the app to be created.")
c.argument('app_insights_key', help="Instrumentation key of App Insights to be added.")
c.argument('app_insights',
help="Name of the existing App Insights project to be added to the {} app. Must be in the ".format(app_type) +
"same resource group.")
c.argument('disable_app_insights', arg_type=get_three_state_flag(return_label=True),
help="Disable creating application insights resource during {} create. No logs will be available.".format(scope))
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-d'], help='The container registry server username.')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'],
help='The container registry server password. Required for private registries.')
if scope == 'functionapp':
c.argument('functions_version', help='The functions app version. NOTE: This will be required starting the next release cycle', arg_type=get_enum_type(FUNCTIONS_VERSIONS))
c.argument('runtime', help='The functions runtime stack. Use "az functionapp list-runtimes" to check supported runtimes and versions')
c.argument('runtime_version',
help='The version of the functions runtime stack. '
'The functions runtime stack. Use "az functionapp list-runtimes" to check supported runtimes and versions')
with self.argument_context('functionapp config hostname') as c:
c.argument('webapp_name', arg_type=functionapp_name_arg_type, id_part='name')
# For commands with shared impl between web app and function app and has output, we apply type validation to avoid confusions
with self.argument_context('functionapp show') as c:
c.argument('name', arg_type=functionapp_name_arg_type)
with self.argument_context('functionapp delete') as c:
c.argument('name', arg_type=functionapp_name_arg_type, local_context_attribute=None)
with self.argument_context('functionapp config appsettings') as c:
c.argument('slot_settings', nargs='+', help="space-separated slot app settings in a format of `<name>=<value>`")
with self.argument_context('logicapp show') as c:
c.argument('name', arg_type=logicapp_name_arg_type)
with self.argument_context('logicapp delete') as c:
c.argument('name', arg_type=logicapp_name_arg_type, local_context_attribute=None)
with self.argument_context('functionapp plan') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name',
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.GET]))
c.argument('is_linux', arg_type=get_three_state_flag(return_label=True), required=False,
help='host function app on Linux worker')
c.argument('number_of_workers', options_list=['--number-of-workers', '--min-instances'],
help='The number of workers for the app service plan.')
c.argument('max_burst',
help='The maximum number of elastic workers for the plan.')
c.argument('tags', arg_type=tags_type)
with self.argument_context('functionapp update') as c:
c.argument('plan', required=False, help='The name or resource id of the plan to update the functionapp with.')
c.argument('force', required=False, help='Required if attempting to migrate functionapp from Premium to Consumption --plan.',
action='store_true')
with self.argument_context('functionapp plan create') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name',
local_context_attribute=LocalContextAttribute(name='plan_name', actions=[LocalContextAction.SET],
scopes=['appservice', 'webapp', 'functionapp']))
c.argument('zone_redundant', options_list=['--zone-redundant', '-z'], help='Enable zone redundancy for high availability. Cannot be changed after plan creation. Minimum instance count is 3.')
c.argument('sku', required=True, help='The SKU of the app service plan. e.g., F1(Free), D1(Shared), B1(Basic Small), '
'B2(Basic Medium), B3(Basic Large), S1(Standard Small), '
'P1V2(Premium V2 Small), PC2 (Premium Container Small), PC3 '
'(Premium Container Medium), PC4 (Premium Container Large), I1 '
'(Isolated Small), I2 (Isolated Medium), I3 (Isolated Large), K1 '
'(Kubernetes).')
with self.argument_context('functionapp plan update') as c:
c.argument('sku', required=False, help='The SKU of the app service plan.')
with self.argument_context('functionapp plan delete') as c:
c.argument('name', arg_type=name_arg_type, help='The name of the app service plan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
configured_default='appserviceplan', id_part='name',
local_context_attribute=None)
with self.argument_context('functionapp deployment list-publishing-profiles') as c:
c.argument('xml', options_list=['--xml'], required=False, help='retrieves the publishing profile details in XML format')
with self.argument_context('functionapp deployment slot') as c:
c.argument('slot', help='the name of the slot')
# This is set to webapp to simply reuse webapp functions, without rewriting same functions for function apps.
# The help will still show "-n or --name", so it should not be a problem to do it this way
c.argument('webapp', arg_type=functionapp_name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Web/sites'),
help='Name of the function app', id_part='name')
c.argument('auto_swap_slot', help='target slot to auto swap', default='production')
c.argument('disable', help='disable auto swap', action='store_true')
c.argument('target_slot', help="target slot to swap, default to 'production'")
c.argument('preserve_vnet', help="preserve Virtual Network to the slot during swap, default to 'true'",
arg_type=get_three_state_flag(return_label=True))
with self.argument_context('functionapp deployment slot create') as c:
c.argument('configuration_source',
help="source slot to clone configurations from. Use function app's name to refer to the production slot")
c.argument('deployment_container_image_name', options_list=['--deployment-container-image-name', '-i'],
help='Container image name, e.g. publisher/image-name:tag')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-d'],
help='The container registry server password')
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-u'], help='the container registry server username')
with self.argument_context('functionapp deployment slot swap') as c:
c.argument('action',
help="swap types. use 'preview' to apply target slot's settings on the source slot first; use 'swap' to complete it; use 'reset' to reset the swap",
arg_type=get_enum_type(['swap', 'preview', 'reset']))
with self.argument_context('functionapp keys', id_part=None) as c:
c.argument('resource_group_name', arg_type=resource_group_name_type,)
c.argument('name', arg_type=functionapp_name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Web/sites'),
help='Name of the function app')
c.argument('slot', options_list=['--slot', '-s'],
help="The name of the slot. Defaults to the productions slot if not specified")
with self.argument_context('functionapp keys set', id_part=None) as c:
c.argument('key_name', help="Name of the key to set.")
c.argument('key_value', help="Value of the new key. If not provided, a value will be generated.")
c.argument('key_type', help="Type of key.", arg_type=get_enum_type(['systemKey', 'functionKeys', 'masterKey']))
with self.argument_context('functionapp keys delete', id_part=None) as c:
c.argument('key_name', help="Name of the key to set.")
c.argument('key_type', help="Type of key.", arg_type=get_enum_type(['systemKey', 'functionKeys', 'masterKey']))
with self.argument_context('functionapp function', id_part=None) as c:
c.argument('resource_group_name', arg_type=resource_group_name_type,)
c.argument('name', arg_type=functionapp_name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Web/sites'),
help='Name of the function app')
c.argument('function_name', help="Name of the Function")
with self.argument_context('functionapp function keys', id_part=None) as c:
c.argument('slot', options_list=['--slot', '-s'],
help="The name of the slot. Defaults to the productions slot if not specified")
with self.argument_context('functionapp function keys set', id_part=None) as c:
c.argument('key_name', help="Name of the key to set.")
c.argument('key_value', help="Value of the new key. If not provided, a value will be generated.")
with self.argument_context('functionapp function keys delete', id_part=None) as c:
c.argument('key_name', help="Name of the key to set.")
# Access Restriction Commands
for scope in ['webapp', 'functionapp']:
with self.argument_context(scope + ' config access-restriction show') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type))
with self.argument_context(scope + ' config access-restriction add') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type))
c.argument('rule_name', options_list=['--rule-name', '-r'],
help='Name of the access restriction rule to add')
c.argument('priority', options_list=['--priority', '-p'],
help="Priority of the access restriction rule")
c.argument('description', help='Description of the access restriction rule')
c.argument('action', arg_type=get_enum_type(ACCESS_RESTRICTION_ACTION_TYPES),
help="Allow or deny access")
c.argument('ip_address', help="IP address or CIDR range (optional comma separated list of up to 8 ranges)",
validator=validate_ip_address)
c.argument('service_tag', help="Service Tag (optional comma separated list of up to 8 tags)",
validator=validate_service_tag)
c.argument('vnet_name', help="vNet name")
c.argument('subnet', help="Subnet name (requires vNet name) or subnet resource id")
c.argument('ignore_missing_vnet_service_endpoint',
options_list=['--ignore-missing-endpoint', '-i'],
help='Create access restriction rule with checking if the subnet has Microsoft.Web service endpoint enabled',
arg_type=get_three_state_flag(), default=False)
c.argument('scm_site', help='True if access restrictions is added for scm site',
arg_type=get_three_state_flag())
c.argument('vnet_resource_group', help='Resource group of virtual network (default is web app resource group)')
c.argument('http_headers', nargs='+', help="space-separated http headers in a format of `<name>=<value>`")
with self.argument_context(scope + ' config access-restriction remove') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type))
c.argument('rule_name', options_list=['--rule-name', '-r'],
help='Name of the access restriction to remove')
c.argument('ip_address', help="IP address or CIDR range (optional comma separated list of up to 8 ranges)",
validator=validate_ip_address)
c.argument('service_tag', help="Service Tag (optional comma separated list of up to 8 tags)",
validator=validate_service_tag)
c.argument('vnet_name', help="vNet name")
c.argument('subnet', help="Subnet name (requires vNet name) or subnet resource id")
c.argument('scm_site', help='True if access restriction should be removed from scm site',
arg_type=get_three_state_flag())
c.argument('action', arg_type=get_enum_type(ACCESS_RESTRICTION_ACTION_TYPES),
help="Allow or deny access")
with self.argument_context(scope + ' config access-restriction set') as c:
c.argument('name', arg_type=(webapp_name_arg_type if scope == 'webapp' else functionapp_name_arg_type))
c.argument('use_same_restrictions_for_scm_site',
help="Use same access restrictions for scm site",
arg_type=get_three_state_flag())
# App Service Environment Commands
with self.argument_context('appservice ase show') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
with self.argument_context('appservice ase create') as c:
c.argument('name', options_list=['--name', '-n'], validator=validate_ase_create,
help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.SET],
scopes=['appservice']))
c.argument('kind', options_list=['--kind', '-k'], arg_type=get_enum_type(ASE_KINDS),
default='ASEv2', help="Specify App Service Environment version")
c.argument('subnet', help='Name or ID of existing subnet. To create vnet and/or subnet \
use `az network vnet [subnet] create`')
c.argument('vnet_name', help='Name of the vNet. Mandatory if only subnet name is specified.')
c.argument('virtual_ip_type', arg_type=get_enum_type(ASE_LOADBALANCER_MODES),
help="Specify if app service environment should be accessible from internet")
c.argument('ignore_subnet_size_validation', arg_type=get_three_state_flag(),
help='Do not check if subnet is sized according to recommendations.')
c.argument('ignore_route_table', arg_type=get_three_state_flag(),
help='Configure route table manually. Applies to ASEv2 only.')
c.argument('ignore_network_security_group', arg_type=get_three_state_flag(),
help='Configure network security group manually. Applies to ASEv2 only.')
c.argument('force_route_table', arg_type=get_three_state_flag(),
help='Override route table for subnet. Applies to ASEv2 only.')
c.argument('force_network_security_group', arg_type=get_three_state_flag(),
help='Override network security group for subnet. Applies to ASEv2 only.')
c.argument('front_end_scale_factor', type=int, validator=validate_front_end_scale_factor,
help='Scale of front ends to app service plan instance ratio. Applies to ASEv2 only.', default=15)
c.argument('front_end_sku', arg_type=isolated_sku_arg_type, default='I1',
help='Size of front end servers. Applies to ASEv2 only.')
c.argument('os_preference', arg_type=get_enum_type(ASE_OS_PREFERENCE_TYPES),
help='Determine if app service environment should start with Linux workers. Applies to ASEv2 only.')
c.argument('zone_redundant', arg_type=get_three_state_flag(),
help='Configure App Service Environment as Zone Redundant. Applies to ASEv3 only.')
with self.argument_context('appservice ase delete') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment')
with self.argument_context('appservice ase update') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
c.argument('front_end_scale_factor', type=int, validator=validate_front_end_scale_factor,
help='(ASEv2 only) Scale of front ends to app service plan instance ratio between 5 and 15.')
c.argument('front_end_sku', arg_type=isolated_sku_arg_type,
help='(ASEv2 only) Size of front end servers.')
c.argument('allow_new_private_endpoint_connections', arg_type=get_three_state_flag(),
options_list=['--allow-new-private-endpoint-connections', '-p'],
help='(ASEv3 only) Configure Apps in App Service Environment to allow new private endpoint connections.')
with self.argument_context('appservice ase list-addresses') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
with self.argument_context('appservice ase list-plans') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
with self.argument_context('appservice ase create-inbound-services') as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the app service environment',
local_context_attribute=LocalContextAttribute(name='ase_name', actions=[LocalContextAction.GET]))
c.argument('subnet', help='Name or ID of existing subnet for DNS Zone link. \
To create vnet and/or subnet use `az network vnet [subnet] create`')
c.argument('vnet_name', help='Name of the vNet. Mandatory if only subnet name is specified.')
c.argument('skip_dns', arg_type=get_three_state_flag(),
help='Do not create Private DNS Zone and DNS records.',
deprecate_info=c.deprecate(expiration='3.0.0'))
# App Service Domain Commands
with self.argument_context('appservice domain create') as c:
c.argument('hostname', options_list=['--hostname', '-n'], help='Name of the custom domain')
c.argument('contact_info', options_list=['--contact-info', '-c'], help='The file path to a JSON object with your contact info for domain registration. '
'Please see the following link for the format of the JSON file expected: '
'https://github.com/AzureAppServiceCLI/appservice_domains_templates/blob/master/contact_info.json')
c.argument('privacy', options_list=['--privacy', '-p'], help='Enable privacy protection')
c.argument('auto_renew', options_list=['--auto-renew', '-a'], help='Enable auto-renew on the domain')
c.argument('accept_terms', options_list=['--accept-terms'], help='By using this flag, you are accepting '
'the conditions shown using the --show-hostname-purchase-terms flag. ')
c.argument('tags', arg_type=tags_type)
c.argument('dryrun', help='Show summary of the purchase and create operation instead of executing it')
c.argument('no_wait', help='Do not wait for the create to complete, and return immediately after queuing the create.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources')
with self.argument_context('appservice domain show-terms') as c:
c.argument('hostname', options_list=['--hostname', '-n'], help='Name of the custom domain')
with self.argument_context('staticwebapp', validator=validate_public_cloud) as c:
c.argument('source', options_list=['--source', '-s'], help="URL for the repository of the static site.", arg_group="Github")
c.argument('token', options_list=['--token', '-t'], arg_group="Github",
help="A user's GitHub repository token. This is used to setup the Github Actions workflow file and "
"API secrets. If you need to create a Github Personal Access Token, "
"please run with the '--login-with-github' flag or follow the steps found at the following link:\n"
"https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line")
c.argument('login_with_github', help="Interactively log in with Github to retrieve the Personal Access Token", arg_group="Github")
c.argument('branch', options_list=['--branch', '-b'], help="The target branch in the repository.", arg_group="Github")
c.ignore('format_output')
c.argument('name', options_list=['--name', '-n'], metavar='NAME', help="Name of the static site")
with self.argument_context('staticwebapp environment') as c:
c.argument('environment_name',
options_list=['--environment-name'], help="Name of the environment of static site")
with self.argument_context('staticwebapp hostname') as c:
c.argument('hostname',
options_list=['--hostname'],
help="custom hostname such as www.example.com. Only support sub domain in preview.")
with self.argument_context('staticwebapp hostname set') as c:
c.argument('validation_method',
options_list=['--validation-method', '-m'],
help="Validation method for the custom domain.",
arg_type=get_enum_type(["cname-delegation", "dns-txt-token"]))
with self.argument_context('staticwebapp appsettings') as c:
c.argument('setting_pairs', options_list=['--setting-names'],
help="Space-separated app settings in 'key=value' format. ",
nargs='*')
c.argument('setting_names', options_list=['--setting-names'], help="Space-separated app setting names.",
nargs='*')
with self.argument_context('staticwebapp users') as c:
c.argument('authentication_provider', options_list=['--authentication-provider'],
help="Authentication provider of the user identity such as AAD, Facebook, GitHub, Google, Twitter.")
c.argument('user_details', options_list=['--user-details'],
help="Email for AAD, Facebook, and Google. Account name (handle) for GitHub and Twitter.")
c.argument('user_id',
help="Given id of registered user.")
c.argument('domain', options_list=['--domain'],
help="A domain added to the static app in quotes.")
c.argument('roles', options_list=['--roles'],
help="Comma-separated default or user-defined role names. "
"Roles that can be assigned to a user are comma separated and case-insensitive (at most 50 "
"roles up to 25 characters each and restricted to 0-9,A-Z,a-z, and _). "
"Define roles in routes.json during root directory of your GitHub repo.")
c.argument('invitation_expiration_in_hours', options_list=['--invitation-expiration-in-hours'],
help="This value sets when the link will expire in hours. The maximum is 168 (7 days).")
with self.argument_context('staticwebapp identity') as c:
c.argument('scope', help="The scope the managed identity has access to")
c.argument('role', help="Role name or id the managed identity will be assigned")
with self.argument_context('staticwebapp identity assign') as c:
c.argument('assign_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
with self.argument_context('staticwebapp identity remove') as c:
c.argument('remove_identities', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
with self.argument_context('staticwebapp create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', arg_type=tags_type)
c.argument('sku', arg_type=static_web_app_sku_arg_type)
c.argument('app_location', options_list=['--app-location'],
help="Location of your application code. For example, '/' represents the root of your app, "
"while '/app' represents a directory called 'app'")
c.argument('api_location', options_list=['--api-location'],
help="Location of your Azure Functions code. For example, '/api' represents a folder called 'api'.")
c.argument('app_artifact_location', options_list=['--app-artifact-location'],
help="The path of your build output relative to your apps location. For example, setting a value "
"of 'build' when your app location is set to '/app' will cause the content at '/app/build' to "
"be served.",
deprecate_info=c.deprecate(expiration='2.22.1'))
c.argument('output_location', options_list=['--output-location'],
help="The path of your build output relative to your apps location. For example, setting a value "
"of 'build' when your app location is set to '/app' will cause the content at '/app/build' to "
"be served.")
with self.argument_context('staticwebapp update') as c:
c.argument('tags', arg_type=tags_type)
c.argument('sku', arg_type=static_web_app_sku_arg_type)
with self.argument_context('staticwebapp functions link') as c:
c.argument('function_resource_id', help="Resource ID of the functionapp to link. Can be retrieved with 'az functionapp --query id'")
c.argument('force', help="Force the function link even if the function is already linked to a static webapp. May be needed if the function was previously linked to a static webapp.")
with self.argument_context('staticwebapp enterprise-edge') as c:
c.argument("no_register", help="Enabling enterprise-grade edge requires reregistration for the Azure Front Door Microsoft.CDN resource provider. Please register the provider with: az provider register --wait --namespace Microsoft.CDN. For more details, please review the documentation available at https://go.microsoft.com/fwlink/?linkid=2184995 .", default=False)
|
17,851 |
def add_zingers(tomo, f=0.01, sat=2**16):
"""Add zingers.
Zingers are caused by stray X-rays hitting the detector and causing pixels
to saturate.
The zingers are uniformly distributed across the data set with the given
frequency.
Parameters
----------
tomo : ndarray
3D tomographic data.
f : float
The fraction of measurements that are zingers.
sat : float
The pixel saturation value.
Returns
-------
ndarray
Tomographic data with zingers added.
"""
zingers = np.random.uniform(0, 1, tomo.shape)
zingers = zingers <= f # five percent of measurements are zingers
new_tomo = np.copy(tomo)
new_tomo[zingers] = sat
return new_tomo
|
def add_zingers(tomo, f=0.01, sat=2**16):
"""Add zingers.
Zingers are caused by stray X-rays hitting the detector and causing pixels
to saturate.
The zingers are uniformly distributed across the data set with the given
frequency.
Parameters
----------
tomo : ndarray
3D tomographic data.
f : float
The fraction of measurements that are zingers.
sat : float
The pixel saturation value.
Returns
-------
ndarray
Tomographic data with zingers added.
"""
zingers = np.random.uniform(0, 1, tomo.shape)
zingers = zingers <= f # f percent of measurements are zingers
new_tomo = np.copy(tomo)
new_tomo[zingers] = sat
return new_tomo
|
26,106 |
def getLocale(languageName):
""" getLocale(languageName)
Get the QLocale object for the given language (as a string).
"""
# Try System Language if nothing defined
if languageName == "":
languageName = QLocale.system().name()
# Apply synonyms
languageName = LANGUAGE_SYNONYMS.get(languageName, languageName)
# if no language applicable, get back to default
if LANGUAGES.get(languageName, None) is None:
languageName = LANGUAGE_SYNONYMS.get("", "")
# Select language in qt terms
qtLanguage = LANGUAGES.get(languageName, None)
if qtLanguage is None:
raise ValueError('Unknown language')
# Return locale
if isinstance(qtLanguage, tuple):
return QLocale(*qtLanguage)
else:
return QLocale(qtLanguage)
|
def getLocale(languageName=""):
""" getLocale(languageName)
Get the QLocale object for the given language (as a string).
"""
# Try System Language if nothing defined
if languageName == "":
languageName = QLocale.system().name()
# Apply synonyms
languageName = LANGUAGE_SYNONYMS.get(languageName, languageName)
# if no language applicable, get back to default
if LANGUAGES.get(languageName, None) is None:
languageName = LANGUAGE_SYNONYMS.get("", "")
# Select language in qt terms
qtLanguage = LANGUAGES.get(languageName, None)
if qtLanguage is None:
raise ValueError('Unknown language')
# Return locale
if isinstance(qtLanguage, tuple):
return QLocale(*qtLanguage)
else:
return QLocale(qtLanguage)
|
51,760 |
def test_ensure_only_one_temporary_storage():
"""Make sure 'gitlab-ci' section of env does not allow specification of
both 'enable-artifacts-buildcache' and 'temporary-storage-url-prefix'."""
gitlab_ci_template = """
gitlab-ci:
{0}
mappings:
- match:
- notcheckedhere
runner-attributes:
tags:
- donotcare
"""
enable_artifacts = 'enable-artifacts-buildcache: True'
temp_storage = 'temporary-storage-url-prefix: file:///temp/mirror'
specify_both = """{0}
{1}
""".format(enable_artifacts, temp_storage)
specify_neither = ''
# User can specify "enable-artifacts-buildcache" (booelan)
yaml_obj = syaml.load(gitlab_ci_template.format(enable_artifacts))
validate(yaml_obj, gitlab_ci_schema)
# User can also specify "temporary-storage-url-prefix" (string)
yaml_obj = syaml.load(gitlab_ci_template.format(temp_storage))
validate(yaml_obj, gitlab_ci_schema)
# However, specifying both should fail to validate
yaml_obj = syaml.load(gitlab_ci_template.format(specify_both))
with pytest.raises(ValidationError):
validate(yaml_obj, gitlab_ci_schema)
# Specifying neither should be fine too, as neither of these properties
# should be required
yaml_obj = syaml.load(gitlab_ci_template.format(specify_neither))
validate(yaml_obj, gitlab_ci_schema)
|
def test_ensure_only_one_temporary_storage():
"""Make sure 'gitlab-ci' section of env does not allow specification of
both 'enable-artifacts-buildcache' and 'temporary-storage-url-prefix'."""
gitlab_ci_template = """
gitlab-ci:
{0}
mappings:
- match:
- notcheckedhere
runner-attributes:
tags:
- donotcare
"""
enable_artifacts = 'enable-artifacts-buildcache: True'
temp_storage = 'temporary-storage-url-prefix: file:///temp/mirror'
specify_both = """{0}
{1}
""".format(enable_artifacts, temp_storage)
specify_neither = ''
# User can specify "enable-artifacts-buildcache" (boolean)
yaml_obj = syaml.load(gitlab_ci_template.format(enable_artifacts))
validate(yaml_obj, gitlab_ci_schema)
# User can also specify "temporary-storage-url-prefix" (string)
yaml_obj = syaml.load(gitlab_ci_template.format(temp_storage))
validate(yaml_obj, gitlab_ci_schema)
# However, specifying both should fail to validate
yaml_obj = syaml.load(gitlab_ci_template.format(specify_both))
with pytest.raises(ValidationError):
validate(yaml_obj, gitlab_ci_schema)
# Specifying neither should be fine too, as neither of these properties
# should be required
yaml_obj = syaml.load(gitlab_ci_template.format(specify_neither))
validate(yaml_obj, gitlab_ci_schema)
|
7,013 |
def encode_(message):
"""Convert the structure holding a message field from JSON to a string."""
try:
return json.dumps(message)
except Exception as exc:
return json.dumps({'data': str(exc)})
|
def encode_(message):
"""Convert the structure holding a message field from JSON to a string."""
try:
return json.dumps(message)
except Exception as exc:
return json.dumps({'errors': [{'message': str(exc)}]})
|
14,127 |
def _explore(
df,
column=None,
cmap=None,
color=None,
m=None,
tiles="OpenStreetMap",
attr=None,
tooltip=True,
popup=False,
highlight=True,
categorical=False,
legend=True,
scheme=None,
k=5,
vmin=None,
vmax=None,
width="100%",
height="100%",
categories=None,
classification_kwds=None,
control_scale=True,
marker_type=None,
marker_kwds={},
style_kwds={},
highlight_kwds={},
missing_kwds={},
tooltip_kwds={},
popup_kwds={},
legend_kwds={},
map_kwds={},
**kwargs,
):
"""Interactive map based on GeoPandas and folium/leaflet.js
Generate an interactive leaflet map based on :class:`~geopandas.GeoDataFrame`
Parameters
----------
column : str, np.array, pd.Series (default None)
The name of the dataframe column, :class:`numpy.array`,
or :class:`pandas.Series` to be plotted. If :class:`numpy.array` or
:class:`pandas.Series` are used then it must have same length as dataframe.
cmap : str, matplotlib.Colormap, branca.colormap or function (default None)
The name of a colormap recognized by ``matplotlib``, a list-like of colors,
:class:`matplotlib.colors.Colormap`, a :class:`branca.colormap.ColorMap` or
function that returns a named color or hex based on the column
value, e.g.::
def my_colormap(value): # scalar value defined in 'column'
if value > 1:
return "green"
return "red"
color : str, array-like (default None)
Named color or a list-like of colors (named or hex).
m : folium.Map (default None)
Existing map instance on which to draw the plot.
tiles : str, xyzservices.TileProvider (default 'OpenStreetMap Mapnik')
Map tileset to use. Can choose from the list supported by folium, query a
:class:`xyzservices.TileProvider` by a name from ``xyzservices.providers``,
pass :class:`xyzservices.TileProvider` object or pass custom XYZ URL.
The current list of built-in providers (when ``xyzservices`` is not available):
``["OpenStreetMap", "Stamen Terrain", “Stamen Toner", “Stamen Watercolor"
"CartoDB positron", “CartoDB dark_matter"]``
You can pass a custom tileset to Folium by passing a Leaflet-style URL
to the tiles parameter: ``http://{s}.yourtiles.com/{z}/{x}/{y}.png``.
Be sure to check their terms and conditions and to provide attribution with
the ``attr`` keyword.
attr : str (default None)
Map tile attribution; only required if passing custom tile URL.
tooltip : bool, str, int, list (default True)
Display GeoDataFrame attributes when hovering over the object.
``True`` includes all columns. ``False`` removes tooltip. Pass string or list of
strings to specify a column(s). Integer specifies first n columns to be
included. Defaults to ``True``.
popup : bool, str, int, list (default False)
Input GeoDataFrame attributes for object displayed when clicking.
``True`` includes all columns. ``False`` removes popup. Pass string or list of
strings to specify a column(s). Integer specifies first n columns to be
included. Defaults to ``False``.
highlight : bool (default True)
Enable highlight functionality when hovering over a geometry.
categorical : bool (default False)
If ``False``, ``cmap`` will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default True)
Plot a legend in choropleth plots.
Ignored if no ``column`` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires ``mapclassify`` >= 2.4.0).
A :func:`mapclassify.classify` will be used
under the hood. Supported are all schemes provided by ``mapclassify`` (e.g.
``'BoxPlot'``, ``'EqualInterval'``, ``'FisherJenks'``, ``'FisherJenksSampled'``,
``'HeadTailBreaks'``, ``'JenksCaspall'``, ``'JenksCaspallForced'``,
``'JenksCaspallSampled'``, ``'MaxP'``, ``'MaximumBreaks'``,
``'NaturalBreaks'``, ``'Quantiles'``, ``'Percentiles'``, ``'StdMean'``,
``'UserDefined'``). Arguments can be passed in ``classification_kwds``.
k : int (default 5)
Number of classes
vmin : None or float (default None)
Minimum value of ``cmap``. If ``None``, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of ``cmap``. If ``None``, the maximum data value
in the column to be plotted is used.
width : pixel int or percentage string (default: '100%')
Width of the folium :class:`~folium.folium.Map`. If the argument
m is given explicitly, width is ignored.
height : pixel int or percentage string (default: '100%')
Height of the folium :class:`~folium.folium.Map`. If the argument
m is given explicitly, height is ignored.
categories : list-like
Ordered list-like object of categories to be used for categorical plot.
classification_kwds : dict (default None)
Keyword arguments to pass to mapclassify
control_scale : bool, (default True)
Whether to add a control scale on the map.
marker_type : str, folium.Circle, folium.CircleMarker, folium.Marker (default None)
Allowed string options are ('marker', 'circle', 'circle_marker'). Defaults to
folium.CircleMarker.
marker_kwds: dict (default {})
Additional keywords to be passed to the selected ``marker_type``, e.g.:
radius : float (default 2 for ``circle_marker`` and 50 for ``circle``))
Radius of the circle, in meters (for ``circle``) or pixels
(for ``circle_marker``).
fill : bool (default True)
Whether to fill the ``circle`` or ``circle_marker`` with color.
icon : folium.map.Icon
the :class:`folium.map.Icon` object to use to render the marker.
draggable : bool (default False)
Set to True to be able to drag the marker around the map.
style_kwds : dict (default {})
Additional style to be passed to folium ``style_function``:
stroke : bool (default True)
Whether to draw stroke along the path. Set it to ``False`` to
disable borders on polygons or circles.
color : str
Stroke color
weight : int
Stroke width in pixels
opacity : float (default 1.0)
Stroke opacity
fill : boolean (default True)
Whether to fill the path with color. Set it to ``False`` to
disable filling on polygons or circles.
fillColor : str
Fill color. Defaults to the value of the color option
fillOpacity : float (default 0.5)
Fill opacity.
Plus all supported by :func:`folium.vector_layers.path_options`. See the
documentation of :class:`folium.features.GeoJson` for details.
highlight_kwds : dict (default {})
Style to be passed to folium highlight_function. Uses the same keywords
as ``style_kwds``. When empty, defaults to ``{"fillOpacity": 0.75}``.
tooltip_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.features.GeoJsonTooltip`,
e.g. ``aliases``, ``labels``, or ``sticky``.
popup_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.features.GeoJsonPopup`,
e.g. ``aliases`` or ``labels``.
legend_kwds : dict (default {})
Additional keywords to be passed to the legend.
Currently supported customisation:
caption : string
Custom caption of the legend. Defaults to the column name.
Additional accepted keywords when ``scheme`` is specified:
colorbar : bool (default True)
An option to control the style of the legend. If True, continuous
colorbar will be used. If False, categorical legend will be used for bins.
scale : bool (default True)
Scale bins along the colorbar axis according to the bin edges (True)
or use the equal length for each bin (False)
fmt : string (default "{:.2f}")
A formatting specification for the bin edges of the classes in the
legend. For example, to have no decimals: ``{"fmt": "{:.0f}"}``. Applies
if ``colorbar=False``.
labels : list-like
A list of legend labels to override the auto-generated labels.
Needs to have the same number of elements as the number of
classes (`k`). Applies if ``colorbar=False``.
interval : boolean (default False)
An option to control brackets from mapclassify legend.
If True, open/closed interval brackets are shown in the legend.
Applies if ``colorbar=False``.
max_labels : int, default 10
Maximum number of colorbar tick labels (requires branca>=0.5.0)
map_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.Map`,
e.g. ``dragging``, or ``scrollWheelZoom``.
**kwargs : dict
Additional options to be passed on to the folium object.
Returns
-------
m : folium.folium.Map
folium :class:`~folium.folium.Map` instance
Examples
--------
>>> df = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
>>> df.head(2) # doctest: +SKIP
pop_est continent name iso_a3 \
gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
>>> df.explore("pop_est", cmap="Blues") # doctest: +SKIP
"""
try:
import branca as bc
import folium
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mapclassify import classify
except (ImportError, ModuleNotFoundError):
raise ImportError(
"The 'folium', 'matplotlib' and 'mapclassify' packages are required for "
"'explore()'. You can install them using "
"'conda install -c conda-forge folium matplotlib mapclassify' "
"or 'pip install folium matplotlib mapclassify'."
)
# xyservices is an optional dependency
try:
import xyzservices
HAS_XYZSERVICES = True
except (ImportError, ModuleNotFoundError):
HAS_XYZSERVICES = False
gdf = df.copy()
# convert LinearRing to LineString
rings_mask = df.geom_type == "LinearRing"
if rings_mask.any():
gdf.geometry[rings_mask] = gdf.geometry[rings_mask].apply(
lambda g: LineString(g)
)
if gdf.crs is None:
kwargs["crs"] = "Simple"
tiles = None
elif not gdf.crs.equals(4326):
gdf = gdf.to_crs(4326)
# create folium.Map object
if m is None:
# Get bounds to specify location and map extent
bounds = gdf.total_bounds
location = kwargs.pop("location", None)
if location is None:
x = mean([bounds[0], bounds[2]])
y = mean([bounds[1], bounds[3]])
location = (y, x)
if "zoom_start" in kwargs.keys():
fit = False
else:
fit = True
else:
fit = False
# get a subset of kwargs to be passed to folium.Map
map_kwds = {
**map_kwds,
**{i: kwargs[i] for i in kwargs.keys() if i in _MAP_KWARGS},
}
if HAS_XYZSERVICES:
# match provider name string to xyzservices.TileProvider
if isinstance(tiles, str):
try:
tiles = xyzservices.providers.query_name(tiles)
except ValueError:
pass
if isinstance(tiles, xyzservices.TileProvider):
attr = attr if attr else tiles.html_attribution
map_kwds["min_zoom"] = tiles.get("min_zoom", 0)
map_kwds["max_zoom"] = tiles.get("max_zoom", 18)
tiles = tiles.build_url(scale_factor="{r}")
m = folium.Map(
location=location,
control_scale=control_scale,
tiles=tiles,
attr=attr,
width=width,
height=height,
**map_kwds,
)
# fit bounds to get a proper zoom level
if fit:
m.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]])
for map_kwd in _MAP_KWARGS:
kwargs.pop(map_kwd, None)
nan_idx = None
if column is not None:
if pd.api.types.is_list_like(column):
if len(column) != gdf.shape[0]:
raise ValueError(
"The GeoDataFrame and given column have different number of rows."
)
else:
column_name = "__plottable_column"
gdf[column_name] = column
column = column_name
elif pd.api.types.is_categorical_dtype(gdf[column]):
if categories is not None:
raise ValueError(
"Cannot specify 'categories' when column has categorical dtype"
)
categorical = True
elif gdf[column].dtype is np.dtype("O") or categories:
categorical = True
nan_idx = pd.isna(gdf[column])
if categorical:
cat = pd.Categorical(gdf[column][~nan_idx], categories=categories)
N = len(cat.categories)
cmap = cmap if cmap else "tab20"
# colormap exists in matplotlib
if cmap in plt.colormaps():
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, N)(cat.codes)
)
legend_colors = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, N)(range(N))
)
# colormap is matplotlib.Colormap
elif isinstance(cmap, colors.Colormap):
color = np.apply_along_axis(colors.to_hex, 1, cmap(cat.codes))
legend_colors = np.apply_along_axis(colors.to_hex, 1, cmap(range(N)))
# custom list of colors
elif pd.api.types.is_list_like(cmap):
if N > len(cmap):
cmap = cmap * (N // len(cmap) + 1)
color = np.take(cmap, cat.codes)
legend_colors = np.take(cmap, range(N))
else:
raise ValueError(
"'cmap' is invalid. For categorical plots, pass either valid "
"named matplotlib colormap or a list-like of colors."
)
elif callable(cmap):
# List of colors based on Branca colormaps or self-defined functions
color = list(map(lambda x: cmap(x), df[column]))
else:
vmin = gdf[column].min() if vmin is None else vmin
vmax = gdf[column].max() if vmax is None else vmax
# get bins
if scheme is not None:
if classification_kwds is None:
classification_kwds = {}
if "k" not in classification_kwds:
classification_kwds["k"] = k
binning = classify(
np.asarray(gdf[column][~nan_idx]), scheme, **classification_kwds
)
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, k)(binning.yb)
)
else:
bins = np.linspace(vmin, vmax, 257)[1:]
binning = classify(
np.asarray(gdf[column][~nan_idx]), "UserDefined", bins=bins
)
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, 256)(binning.yb)
)
# set default style
if "fillOpacity" not in style_kwds:
style_kwds["fillOpacity"] = 0.5
if "weight" not in style_kwds:
style_kwds["weight"] = 2
# specify color
if color is not None:
if (
isinstance(color, str)
and isinstance(gdf, geopandas.GeoDataFrame)
and color in gdf.columns
): # use existing column
def _style_color(x):
return {
"fillColor": x["properties"][color],
**style_kwds,
}
style_function = _style_color
else: # assign new column
if isinstance(gdf, geopandas.GeoSeries):
gdf = geopandas.GeoDataFrame(geometry=gdf)
if nan_idx is not None and nan_idx.any():
nan_color = missing_kwds.pop("color", None)
gdf["__folium_color"] = nan_color
gdf.loc[~nan_idx, "__folium_color"] = color
else:
gdf["__folium_color"] = color
stroke_color = style_kwds.pop("color", None)
if not stroke_color:
def _style_column(x):
return {
"fillColor": x["properties"]["__folium_color"],
"color": x["properties"]["__folium_color"],
**style_kwds,
}
style_function = _style_column
else:
def _style_stroke(x):
return {
"fillColor": x["properties"]["__folium_color"],
"color": stroke_color,
**style_kwds,
}
style_function = _style_stroke
else: # use folium default
def _style_default(x):
return {**style_kwds}
style_function = _style_default
if highlight:
if "fillOpacity" not in highlight_kwds:
highlight_kwds["fillOpacity"] = 0.75
def _style_highlight(x):
return {**highlight_kwds}
highlight_function = _style_highlight
else:
highlight_function = None
# define default for points
if marker_type is None:
marker_type = "circle_marker"
marker = marker_type
if isinstance(marker_type, str):
if marker_type == "marker":
marker = folium.Marker(**marker_kwds)
elif marker_type == "circle":
marker = folium.Circle(**marker_kwds)
elif marker_type == "circle_marker":
marker_kwds["radius"] = marker_kwds.get("radius", 2)
marker_kwds["fill"] = marker_kwds.get("fill", True)
marker = folium.CircleMarker(**marker_kwds)
else:
raise ValueError(
"Only 'marker', 'circle', and 'circle_marker' are "
"supported as marker values"
)
# remove additional geometries
if isinstance(gdf, geopandas.GeoDataFrame):
non_active_geoms = [
name
for name, val in (gdf.dtypes == "geometry").items()
if val and name != gdf.geometry.name
]
gdf = gdf.drop(columns=non_active_geoms)
# prepare tooltip and popup
if isinstance(gdf, geopandas.GeoDataFrame):
# add named index to the tooltip
if gdf.index.name is not None:
gdf = gdf.reset_index()
# specify fields to show in the tooltip
tooltip = _tooltip_popup("tooltip", tooltip, gdf, **tooltip_kwds)
popup = _tooltip_popup("popup", popup, gdf, **popup_kwds)
else:
tooltip = None
popup = None
# add dataframe to map
folium.GeoJson(
gdf.__geo_interface__,
tooltip=tooltip,
popup=popup,
marker=marker,
style_function=style_function,
highlight_function=highlight_function,
**kwargs,
).add_to(m)
if legend:
# NOTE: overlaps will be resolved in branca #88
caption = column if not column == "__plottable_column" else ""
caption = legend_kwds.pop("caption", caption)
if categorical:
categories = cat.categories.to_list()
legend_colors = legend_colors.tolist()
if nan_idx.any() and nan_color:
categories.append(missing_kwds.pop("label", "NaN"))
legend_colors.append(nan_color)
_categorical_legend(m, caption, categories, legend_colors)
elif column is not None:
cbar = legend_kwds.pop("colorbar", True)
colormap_kwds = {}
if "max_labels" in legend_kwds:
colormap_kwds["max_labels"] = legend_kwds.pop("max_labels")
if scheme:
cb_colors = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, binning.k)(range(binning.k))
)
if cbar:
if legend_kwds.pop("scale", True):
index = [vmin] + binning.bins.tolist()
else:
index = None
colorbar = bc.colormap.StepColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
index=index,
**colormap_kwds,
)
else:
fmt = legend_kwds.pop("fmt", "{:.2f}")
if "labels" in legend_kwds:
categories = legend_kwds["labels"]
else:
categories = binning.get_legend_classes(fmt)
show_interval = legend_kwds.pop("interval", False)
if not show_interval:
categories = [c[1:-1] for c in categories]
if nan_idx.any() and nan_color:
categories.append(missing_kwds.pop("label", "NaN"))
cb_colors = np.append(cb_colors, nan_color)
_categorical_legend(m, caption, categories, cb_colors)
else:
if isinstance(cmap, bc.colormap.ColorMap):
colorbar = cmap
else:
mp_cmap = cm.get_cmap(cmap)
cb_colors = np.apply_along_axis(
colors.to_hex, 1, mp_cmap(range(mp_cmap.N))
)
# linear legend
if mp_cmap.N > 20:
colorbar = bc.colormap.LinearColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
**colormap_kwds,
)
# steps
else:
colorbar = bc.colormap.StepColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
**colormap_kwds,
)
if cbar:
if nan_idx.any() and nan_color:
_categorical_legend(
m, "", [missing_kwds.pop("label", "NaN")], [nan_color]
)
m.add_child(colorbar)
return m
|
def _explore(
df,
column=None,
cmap=None,
color=None,
m=None,
tiles="OpenStreetMap",
attr=None,
tooltip=True,
popup=False,
highlight=True,
categorical=False,
legend=True,
scheme=None,
k=5,
vmin=None,
vmax=None,
width="100%",
height="100%",
categories=None,
classification_kwds=None,
control_scale=True,
marker_type=None,
marker_kwds={},
style_kwds={},
highlight_kwds={},
missing_kwds={},
tooltip_kwds={},
popup_kwds={},
legend_kwds={},
map_kwds={},
**kwargs,
):
"""Interactive map based on GeoPandas and folium/leaflet.js
Generate an interactive leaflet map based on :class:`~geopandas.GeoDataFrame`
Parameters
----------
column : str, np.array, pd.Series (default None)
The name of the dataframe column, :class:`numpy.array`,
or :class:`pandas.Series` to be plotted. If :class:`numpy.array` or
:class:`pandas.Series` are used then it must have same length as dataframe.
cmap : str, matplotlib.Colormap, branca.colormap or function (default None)
The name of a colormap recognized by ``matplotlib``, a list-like of colors,
:class:`matplotlib.colors.Colormap`, a :class:`branca.colormap.ColorMap` or
function that returns a named color or hex based on the column
value, e.g.::
def my_colormap(value): # scalar value defined in 'column'
if value > 1:
return "green"
return "red"
color : str, array-like (default None)
Named color or a list-like of colors (named or hex).
m : folium.Map (default None)
Existing map instance on which to draw the plot.
tiles : str, xyzservices.TileProvider (default 'OpenStreetMap Mapnik')
Map tileset to use. Can choose from the list supported by folium, query a
:class:`xyzservices.TileProvider` by a name from ``xyzservices.providers``,
pass :class:`xyzservices.TileProvider` object or pass custom XYZ URL.
The current list of built-in providers (when ``xyzservices`` is not available):
``["OpenStreetMap", "Stamen Terrain", “Stamen Toner", “Stamen Watercolor"
"CartoDB positron", “CartoDB dark_matter"]``
You can pass a custom tileset to Folium by passing a Leaflet-style URL
to the tiles parameter: ``http://{s}.yourtiles.com/{z}/{x}/{y}.png``.
Be sure to check their terms and conditions and to provide attribution with
the ``attr`` keyword.
attr : str (default None)
Map tile attribution; only required if passing custom tile URL.
tooltip : bool, str, int, list (default True)
Display GeoDataFrame attributes when hovering over the object.
``True`` includes all columns. ``False`` removes tooltip. Pass string or list of
strings to specify a column(s). Integer specifies first n columns to be
included. Defaults to ``True``.
popup : bool, str, int, list (default False)
Input GeoDataFrame attributes for object displayed when clicking.
``True`` includes all columns. ``False`` removes popup. Pass string or list of
strings to specify a column(s). Integer specifies first n columns to be
included. Defaults to ``False``.
highlight : bool (default True)
Enable highlight functionality when hovering over a geometry.
categorical : bool (default False)
If ``False``, ``cmap`` will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default True)
Plot a legend in choropleth plots.
Ignored if no ``column`` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires ``mapclassify`` >= 2.4.0).
A :func:`mapclassify.classify` will be used
under the hood. Supported are all schemes provided by ``mapclassify`` (e.g.
``'BoxPlot'``, ``'EqualInterval'``, ``'FisherJenks'``, ``'FisherJenksSampled'``,
``'HeadTailBreaks'``, ``'JenksCaspall'``, ``'JenksCaspallForced'``,
``'JenksCaspallSampled'``, ``'MaxP'``, ``'MaximumBreaks'``,
``'NaturalBreaks'``, ``'Quantiles'``, ``'Percentiles'``, ``'StdMean'``,
``'UserDefined'``). Arguments can be passed in ``classification_kwds``.
k : int (default 5)
Number of classes
vmin : None or float (default None)
Minimum value of ``cmap``. If ``None``, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of ``cmap``. If ``None``, the maximum data value
in the column to be plotted is used.
width : pixel int or percentage string (default: '100%')
Width of the folium :class:`~folium.folium.Map`. If the argument
m is given explicitly, width is ignored.
height : pixel int or percentage string (default: '100%')
Height of the folium :class:`~folium.folium.Map`. If the argument
m is given explicitly, height is ignored.
categories : list-like
Ordered list-like object of categories to be used for categorical plot.
classification_kwds : dict (default None)
Keyword arguments to pass to mapclassify
control_scale : bool, (default True)
Whether to add a control scale on the map.
marker_type : str, folium.Circle, folium.CircleMarker, folium.Marker (default None)
Allowed string options are ('marker', 'circle', 'circle_marker'). Defaults to
folium.CircleMarker.
marker_kwds: dict (default {})
Additional keywords to be passed to the selected ``marker_type``, e.g.:
radius : float (default 2 for ``circle_marker`` and 50 for ``circle``))
Radius of the circle, in meters (for ``circle``) or pixels
(for ``circle_marker``).
fill : bool (default True)
Whether to fill the ``circle`` or ``circle_marker`` with color.
icon : folium.map.Icon
the :class:`folium.map.Icon` object to use to render the marker.
draggable : bool (default False)
Set to True to be able to drag the marker around the map.
style_kwds : dict (default {})
Additional style to be passed to folium ``style_function``:
stroke : bool (default True)
Whether to draw stroke along the path. Set it to ``False`` to
disable borders on polygons or circles.
color : str
Stroke color
weight : int
Stroke width in pixels
opacity : float (default 1.0)
Stroke opacity
fill : boolean (default True)
Whether to fill the path with color. Set it to ``False`` to
disable filling on polygons or circles.
fillColor : str
Fill color. Defaults to the value of the color option
fillOpacity : float (default 0.5)
Fill opacity.
Plus all supported by :func:`folium.vector_layers.path_options`. See the
documentation of :class:`folium.features.GeoJson` for details.
highlight_kwds : dict (default {})
Style to be passed to folium highlight_function. Uses the same keywords
as ``style_kwds``. When empty, defaults to ``{"fillOpacity": 0.75}``.
tooltip_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.features.GeoJsonTooltip`,
e.g. ``aliases``, ``labels``, or ``sticky``.
popup_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.features.GeoJsonPopup`,
e.g. ``aliases`` or ``labels``.
legend_kwds : dict (default {})
Additional keywords to be passed to the legend.
Currently supported customisation:
caption : string
Custom caption of the legend. Defaults to the column name.
Additional accepted keywords when ``scheme`` is specified:
colorbar : bool (default True)
An option to control the style of the legend. If True, continuous
colorbar will be used. If False, categorical legend will be used for bins.
scale : bool (default True)
Scale bins along the colorbar axis according to the bin edges (True)
or use the equal length for each bin (False)
fmt : string (default "{:.2f}")
A formatting specification for the bin edges of the classes in the
legend. For example, to have no decimals: ``{"fmt": "{:.0f}"}``. Applies
if ``colorbar=False``.
labels : list-like
A list of legend labels to override the auto-generated labels.
Needs to have the same number of elements as the number of
classes (`k`). Applies if ``colorbar=False``.
interval : boolean (default False)
An option to control brackets from mapclassify legend.
If True, open/closed interval brackets are shown in the legend.
Applies if ``colorbar=False``.
max_labels : int, default 10
Maximum number of colorbar tick labels (requires branca>=0.5.0)
map_kwds : dict (default {})
Additional keywords to be passed to folium :class:`~folium.folium.Map`,
e.g. ``dragging``, or ``scrollWheelZoom``.
**kwargs : dict
Additional options to be passed on to the folium object.
Returns
-------
m : folium.folium.Map
folium :class:`~folium.folium.Map` instance
Examples
--------
>>> df = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
>>> df.head(2) # doctest: +SKIP
pop_est continent name iso_a3 \
gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
>>> df.explore("pop_est", cmap="Blues") # doctest: +SKIP
"""
try:
import branca as bc
import folium
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mapclassify import classify
except (ImportError, ModuleNotFoundError):
raise ImportError(
"The 'folium', 'matplotlib' and 'mapclassify' packages are required for "
"'explore()'. You can install them using "
"'conda install -c conda-forge folium matplotlib mapclassify' "
"or 'pip install folium matplotlib mapclassify'."
)
# xyservices is an optional dependency
try:
import xyzservices
HAS_XYZSERVICES = True
except (ImportError, ModuleNotFoundError):
HAS_XYZSERVICES = False
gdf = df.copy()
# convert LinearRing to LineString
rings_mask = df.geom_type == "LinearRing"
if rings_mask.any():
gdf.geometry[rings_mask] = gdf.geometry[rings_mask].apply(
lambda g: LineString(g)
)
if gdf.crs is None:
kwargs["crs"] = "Simple"
tiles = None
elif not gdf.crs.equals(4326):
gdf = gdf.to_crs(4326)
# create folium.Map object
if m is None:
# Get bounds to specify location and map extent
bounds = gdf.total_bounds
location = kwargs.pop("location", None)
if location is None:
x = mean([bounds[0], bounds[2]])
y = mean([bounds[1], bounds[3]])
location = (y, x)
if "zoom_start" in kwargs.keys():
fit = False
else:
fit = True
else:
fit = False
# get a subset of kwargs to be passed to folium.Map
map_kwds = {
**map_kwds,
**{i: kwargs[i] for i in kwargs.keys() if i in _MAP_KWARGS},
}
if HAS_XYZSERVICES:
# match provider name string to xyzservices.TileProvider
if isinstance(tiles, str):
try:
tiles = xyzservices.providers.query_name(tiles)
except ValueError:
pass
if isinstance(tiles, xyzservices.TileProvider):
attr = attr if attr else tiles.html_attribution
map_kwds["min_zoom"] = tiles.get("min_zoom", 0)
map_kwds["max_zoom"] = tiles.get("max_zoom", 18)
tiles = tiles.build_url(scale_factor="{r}")
m = folium.Map(
location=location,
control_scale=control_scale,
tiles=tiles,
attr=attr,
width=width,
height=height,
**map_kwds,
)
# fit bounds to get a proper zoom level
if fit:
m.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]])
for map_kwd in _MAP_KWARGS:
kwargs.pop(map_kwd, None)
nan_idx = None
if column is not None:
if pd.api.types.is_list_like(column):
if len(column) != gdf.shape[0]:
raise ValueError(
"The GeoDataFrame and given column have different number of rows."
)
else:
column_name = "__plottable_column"
gdf[column_name] = column
column = column_name
elif pd.api.types.is_categorical_dtype(gdf[column]):
if categories is not None:
raise ValueError(
"Cannot specify 'categories' when column has categorical dtype"
)
categorical = True
elif gdf[column].dtype is np.dtype("O") or categories:
categorical = True
nan_idx = pd.isna(gdf[column])
if categorical:
cat = pd.Categorical(gdf[column][~nan_idx], categories=categories)
N = len(cat.categories)
cmap = cmap if cmap else "tab20"
# colormap exists in matplotlib
if cmap in plt.colormaps():
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, N)(cat.codes)
)
legend_colors = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, N)(range(N))
)
# colormap is matplotlib.Colormap
elif isinstance(cmap, colors.Colormap):
color = np.apply_along_axis(colors.to_hex, 1, cmap(cat.codes))
legend_colors = np.apply_along_axis(colors.to_hex, 1, cmap(range(N)))
# custom list of colors
elif pd.api.types.is_list_like(cmap):
if N > len(cmap):
cmap = cmap * (N // len(cmap) + 1)
color = np.take(cmap, cat.codes)
legend_colors = np.take(cmap, range(N))
else:
raise ValueError(
"'cmap' is invalid. For categorical plots, pass either valid "
"named matplotlib colormap or a list-like of colors."
)
elif callable(cmap):
# List of colors based on Branca colormaps or self-defined functions
color = list(map(lambda x: cmap(x), df[column]))
else:
vmin = gdf[column].min() if vmin is None else vmin
vmax = gdf[column].max() if vmax is None else vmax
# get bins
if scheme is not None:
if classification_kwds is None:
classification_kwds = {}
if "k" not in classification_kwds:
classification_kwds["k"] = k
binning = classify(
np.asarray(gdf[column][~nan_idx]), scheme, **classification_kwds
)
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, k)(binning.yb)
)
else:
bins = np.linspace(vmin, vmax, 257)[1:]
binning = classify(
np.asarray(gdf[column][~nan_idx]), "UserDefined", bins=bins
)
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, 256)(binning.yb)
)
# set default style
if "fillOpacity" not in style_kwds:
style_kwds["fillOpacity"] = 0.5
if "weight" not in style_kwds:
style_kwds["weight"] = 2
# specify color
if color is not None:
if (
isinstance(color, str)
and isinstance(gdf, geopandas.GeoDataFrame)
and color in gdf.columns
): # use existing column
def _style_color(x):
return {
"fillColor": x["properties"][color],
**style_kwds,
}
style_function = _style_color
else: # assign new column
if isinstance(gdf, geopandas.GeoSeries):
gdf = geopandas.GeoDataFrame(geometry=gdf)
if nan_idx is not None and nan_idx.any():
nan_color = missing_kwds.pop("color", None)
gdf["__folium_color"] = nan_color
gdf.loc[~nan_idx, "__folium_color"] = color
else:
gdf["__folium_color"] = color
stroke_color = style_kwds.pop("color", None)
if not stroke_color:
def _style_column(x):
return {
"fillColor": x["properties"]["__folium_color"],
"color": x["properties"]["__folium_color"],
**style_kwds,
}
style_function = _style_column
else:
def _style_stroke(x):
return {
"fillColor": x["properties"]["__folium_color"],
"color": stroke_color,
**style_kwds,
}
style_function = _style_stroke
else: # use folium default
def _style_default(x):
return {**style_kwds}
style_function = _style_default
if highlight:
if "fillOpacity" not in highlight_kwds:
highlight_kwds["fillOpacity"] = 0.75
def _style_highlight(x):
return {**highlight_kwds}
highlight_function = _style_highlight
else:
highlight_function = None
# define default for points
if marker_type is None:
marker_type = "circle_marker"
marker = marker_type
if isinstance(marker_type, str):
if marker_type == "marker":
marker = folium.Marker(**marker_kwds)
elif marker_type == "circle":
marker = folium.Circle(**marker_kwds)
elif marker_type == "circle_marker":
marker_kwds["radius"] = marker_kwds.get("radius", 2)
marker_kwds["fill"] = marker_kwds.get("fill", True)
marker = folium.CircleMarker(**marker_kwds)
else:
raise ValueError(
"Only 'marker', 'circle', and 'circle_marker' are "
"supported as marker values"
)
# remove additional geometries
if isinstance(gdf, geopandas.GeoDataFrame):
non_active_geoms = [
name
for name, val in (gdf.dtypes == "geometry").items()
if val and name != gdf.geometry.name
]
gdf = gdf.drop(columns=non_active_geoms)
# prepare tooltip and popup
if isinstance(gdf, geopandas.GeoDataFrame):
# add named index to the tooltip
if gdf.index.name is not None:
gdf = gdf.reset_index()
# specify fields to show in the tooltip
tooltip = _tooltip_popup("tooltip", tooltip, gdf, **tooltip_kwds)
popup = _tooltip_popup("popup", popup, gdf, **popup_kwds)
else:
tooltip = None
popup = None
# add dataframe to map
folium.GeoJson(
gdf.__geo_interface__,
tooltip=tooltip,
popup=popup,
marker=marker,
style_function=style_function,
highlight_function=highlight_function,
**kwargs,
).add_to(m)
if legend:
# NOTE: overlaps will be resolved in branca #88
caption = column if not column == "__plottable_column" else ""
caption = legend_kwds.pop("caption", caption)
if categorical:
categories = cat.categories.to_list()
legend_colors = legend_colors.tolist()
if nan_idx.any() and nan_color:
categories.append(missing_kwds.pop("label", "NaN"))
legend_colors.append(nan_color)
_categorical_legend(m, caption, categories, legend_colors)
elif column is not None:
cbar = legend_kwds.pop("colorbar", True)
colormap_kwds = {}
if "max_labels" in legend_kwds:
colormap_kwds["max_labels"] = legend_kwds.pop("max_labels")
if scheme:
cb_colors = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, binning.k)(range(binning.k))
)
if cbar:
if legend_kwds.pop("scale", True):
index = [vmin] + binning.bins.tolist()
else:
index = None
colorbar = bc.colormap.StepColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
index=index,
**colormap_kwds,
)
else:
fmt = legend_kwds.pop("fmt", "{:.2f}")
if "labels" in legend_kwds:
categories = legend_kwds["labels"]
else:
categories = binning.get_legend_classes(fmt)
show_interval = legend_kwds.pop("interval", False)
if not show_interval:
categories = [c[1:-1] for c in categories]
if nan_idx.any() and nan_color:
categories.append(missing_kwds.pop("label", "NaN"))
cb_colors = np.append(cb_colors, nan_color)
_categorical_legend(m, caption, categories, cb_colors)
else:
if isinstance(cmap, bc.colormap.ColorMap):
colorbar = cmap
else:
mp_cmap = cm.get_cmap(cmap)
cb_colors = np.apply_along_axis(
colors.to_hex, 1, mp_cmap(range(mp_cmap.N))
)
# linear legend
if mp_cmap.N > 20:
colorbar = bc.colormap.LinearColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
**colormap_kwds,
)
# steps
else:
colorbar = bc.colormap.StepColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
**colormap_kwds,
)
if cbar:
if nan_idx.any() and nan_color:
_categorical_legend(
m, "", [missing_kwds.pop("label", "NaN")], [nan_color]
)
m.add_child(colorbar)
return m
|
43,792 |
def pauli_mult_with_phase(pauli_1, pauli_2, wire_map=None):
r"""Multiply two Pauli words together including the global phase.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations. The phase is computed by
looking at the number of times we have the products XY, YZ, or ZX (adds a
phase of :math:`i`), or YX, ZY, XZ (adds a phase of :math:`-i`).
Args:
pauli_1 (.Operation): A Pauli word.
pauli_2 (.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
tuple[.Operation, complex]: The product of ``pauli_1`` and ``pauli_2``, and the
global phase.
**Example**
This function works the same as ``pauli_mult`` but also returns the global
phase accumulated as a result of the Pauli product rules
:math:`\sigma_i \sigma_j = i \sigma_k`.
>>> from pennylane.pauli import pauli_mult_with_phase
>>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
>>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
>>> product, phase = pauli_mult_with_phase(pauli_1, pauli_2)
>>> product
PauliZ(wires=[0])
>>> phase
1j
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Get the product; use our earlier function
pauli_product = pauli_mult(pauli_1, pauli_2, wire_map)
pauli_1_names = [pauli_1.name] if isinstance(pauli_1.name, str) else pauli_1.name
pauli_2_names = [pauli_2.name] if isinstance(pauli_2.name, str) else pauli_2.name
pauli_1_placeholder = 0
pauli_2_placeholder = 0
phase = 1
for wire in wire_map.keys():
if wire in pauli_1.wires:
pauli_1_op_name = pauli_1_names[pauli_1_placeholder]
pauli_1_placeholder += 1
else:
pauli_1_op_name = "Identity"
if wire in pauli_2.wires:
pauli_2_op_name = pauli_2_names[pauli_2_placeholder]
pauli_2_placeholder += 1
else:
pauli_2_op_name = "Identity"
# If we have identities anywhere we don't pick up a phase
if pauli_1_op_name == "Identity" or pauli_2_op_name == "Identity":
continue
# Likewise, no additional phase if the Paulis are the same
if pauli_1_op_name == pauli_2_op_name:
continue
# Use Pauli commutation rules to determine the phase
pauli_ordering = (pauli_1_op_name, pauli_2_op_name)
pos_phases = [("PauliX", "PauliY"), ("PauliY", "PauliZ"), ("PauliZ", "PauliX")]
if pauli_ordering in pos_phases:
phase *= 1j
else:
phase *= -1j
return pauli_product, phase
|
def pauli_mult_with_phase(pauli_1, pauli_2, wire_map=None):
r"""Multiply two Pauli words together including the global phase.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations. The phase is computed by
looking at the number of times we have the products XY, YZ, or ZX (adds a
phase of :math:`i`), or YX, ZY, XZ (adds a phase of :math:`-i`).
Args:
pauli_1 (.Operation): A Pauli word.
pauli_2 (.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
tuple[.Operation, complex]: The product of ``pauli_1`` and ``pauli_2``, and the
global phase.
**Example**
This function works the same as :func:`~.pauli_mult` but also returns the global
phase accumulated as a result of the Pauli product rules
:math:`\sigma_i \sigma_j = i \sigma_k`.
>>> from pennylane.pauli import pauli_mult_with_phase
>>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
>>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
>>> product, phase = pauli_mult_with_phase(pauli_1, pauli_2)
>>> product
PauliZ(wires=[0])
>>> phase
1j
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Get the product; use our earlier function
pauli_product = pauli_mult(pauli_1, pauli_2, wire_map)
pauli_1_names = [pauli_1.name] if isinstance(pauli_1.name, str) else pauli_1.name
pauli_2_names = [pauli_2.name] if isinstance(pauli_2.name, str) else pauli_2.name
pauli_1_placeholder = 0
pauli_2_placeholder = 0
phase = 1
for wire in wire_map.keys():
if wire in pauli_1.wires:
pauli_1_op_name = pauli_1_names[pauli_1_placeholder]
pauli_1_placeholder += 1
else:
pauli_1_op_name = "Identity"
if wire in pauli_2.wires:
pauli_2_op_name = pauli_2_names[pauli_2_placeholder]
pauli_2_placeholder += 1
else:
pauli_2_op_name = "Identity"
# If we have identities anywhere we don't pick up a phase
if pauli_1_op_name == "Identity" or pauli_2_op_name == "Identity":
continue
# Likewise, no additional phase if the Paulis are the same
if pauli_1_op_name == pauli_2_op_name:
continue
# Use Pauli commutation rules to determine the phase
pauli_ordering = (pauli_1_op_name, pauli_2_op_name)
pos_phases = [("PauliX", "PauliY"), ("PauliY", "PauliZ"), ("PauliZ", "PauliX")]
if pauli_ordering in pos_phases:
phase *= 1j
else:
phase *= -1j
return pauli_product, phase
|
30,335 |
def get_group():
"""
retrieve a single Group
"""
group_type = demisto.args().get('group_type')
group_id = int(demisto.args().get('group_id'))
response = get_group_request(group_type, group_id)
if group_type == 'adversaries':
data = response.get('data', {}).get('adversarie', {})
if group_type == 'campaigns':
data = response.get('data', {}).get('campaign', {})
if group_type == 'documents':
data = response.get('data', {}).get('document', {})
if group_type == 'emails':
data = response.get('data', {}).get('email', {})
if group_type == 'events':
data = response.get('data', {}).get('event', {})
if group_type == 'incidents':
data = response.get('data', {}).get('incident', {})
if group_type == 'intrusionSets':
data = response.get('data', {}).get('intrusionSet', {})
if group_type == 'reports':
data = response.get('data', {}).get('report', {})
if group_type == 'signatures':
data = response.get('data', {}).get('signature', {})
if group_type == 'threats':
data = response.get('data', {}).get('threat', {})
if response.get('status') == 'Success':
contents = {
'ID': data.get('id'),
'Name': data.get('name'),
'Owner': data.get('owner'),
'DateAdded': data.get('dateAdded'),
'EventDate': data.get('eventDate'),
'Status': data.get('status')
}
else:
return_error(response.get('message'))
context = {
'TC.Group(val.ID && val.ID === obj.ID)': contents
}
return_outputs(
tableToMarkdown('Group information', contents, removeNull=True),
context
)
|
def get_group():
"""
retrieve a single Group
"""
group_type = demisto.args().get('group_type')
group_id = int(demisto.args().get('group_id'))
response = get_group_request(group_type, group_id)
if group_type == 'adversaries':
data = response.get('adversarie', {})
if group_type == 'campaigns':
data = response.get('data', {}).get('campaign', {})
if group_type == 'documents':
data = response.get('data', {}).get('document', {})
if group_type == 'emails':
data = response.get('data', {}).get('email', {})
if group_type == 'events':
data = response.get('data', {}).get('event', {})
if group_type == 'incidents':
data = response.get('data', {}).get('incident', {})
if group_type == 'intrusionSets':
data = response.get('data', {}).get('intrusionSet', {})
if group_type == 'reports':
data = response.get('data', {}).get('report', {})
if group_type == 'signatures':
data = response.get('data', {}).get('signature', {})
if group_type == 'threats':
data = response.get('data', {}).get('threat', {})
if response.get('status') == 'Success':
contents = {
'ID': data.get('id'),
'Name': data.get('name'),
'Owner': data.get('owner'),
'DateAdded': data.get('dateAdded'),
'EventDate': data.get('eventDate'),
'Status': data.get('status')
}
else:
return_error(response.get('message'))
context = {
'TC.Group(val.ID && val.ID === obj.ID)': contents
}
return_outputs(
tableToMarkdown('Group information', contents, removeNull=True),
context
)
|
4,113 |
def anno_gen(x: 'int') -> 'float':
"""
>>> gen = anno_gen(2)
>>> next(gen)
2.0
>>> ret, arg = sorted(anno_gen.__annotations__.items())
>>> print(ret[0]); print(str(ret[1]).strip("'")) # strip makes it pass with/without PEP563
return
float
>>> print(arg[0]); print(str(arg[1]).strip("'"))
x
int
"""
yield float(x)
|
def anno_gen(x: 'int') -> 'float':
"""
>>> gen = anno_gen(2)
>>> next(gen)
2.0
>>> ret, arg = sorted(anno_gen.__annotations__.items())
>>> print(ret[0]); print(str(ret[1]).strip("'")) # strip makes it pass with/without PEP563
return
float
>>> print(arg[0]); print(str(arg[1]).strip("'"))
x
int
"""
yield float(x)
|
27,401 |
def unmount_device(device):
"""
Unmount the given device if it is mounted.
This usueally happens with automounted data tracks.
If the given device is a symlink, the target will be checked.
"""
device = os.path.realpath(device)
logger.debug('possibly unmount real path %r', device)
proc = open('/proc/mounts').read()
if device in proc:
print('Device %s is mounted, unmounting' % device)
os.system('umount %s' % device)
|
def unmount_device(device):
"""
Unmount the given device if it is mounted.
This usually happens with automounted data tracks.
If the given device is a symlink, the target will be checked.
"""
device = os.path.realpath(device)
logger.debug('possibly unmount real path %r', device)
proc = open('/proc/mounts').read()
if device in proc:
print('Device %s is mounted, unmounting' % device)
os.system('umount %s' % device)
|
35,695 |
def regnet_y_128gf(**kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_128GF architecture from
`"Designing Network Design Spaces" <https://arxiv.org/abs/2003.13678>`_.
NOTE: Pretrained weights are not available for this model.
"""
if "pretrained" in kwargs and kwargs.pop("pretrained"):
raise ValueError("No pretrained weights available for regnet_y_128gf.")
params = BlockParams.from_init_params(depth=27, w_0=456, w_a=160.83, w_m=2.52, group_width=264, se_ratio=0.25)
return _regnet("regnet_y_128gf", params, pretrained=False, progress=False, **kwargs)
|
def regnet_y_128gf(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_128GF architecture from
`"Designing Network Design Spaces" <https://arxiv.org/abs/2003.13678>`_.
NOTE: Pretrained weights are not available for this model.
"""
if "pretrained" in kwargs and kwargs.pop("pretrained"):
raise ValueError("No pretrained weights available for regnet_y_128gf.")
params = BlockParams.from_init_params(depth=27, w_0=456, w_a=160.83, w_m=2.52, group_width=264, se_ratio=0.25)
return _regnet("regnet_y_128gf", params, pretrained=False, progress=False, **kwargs)
|
32,312 |
def main(): # pragma: no cover
try:
demisto_params = demisto.params() | demisto.args()
last_run = demisto.getLastRun()
last_object_ids = last_run.get('ids')
if 'after' not in last_run:
after = dateparser.parse(demisto_params['after'].strip())
last_run = after.timestamp()
last_run = {LogType[LogType.AUTHENTICATION]: last_run,
LogType[LogType.ADMINISTRATION]: last_run,
LogType[LogType.TELEPHONY]: last_run}
else:
last_run = last_run['after']
request_order = last_run.get('request_order',
[LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY])
demisto_params['params'] = Params(**demisto_params, mintime=last_run)
client = Client(demisto_params)
get_events = GetEvents(client, request_order)
command = demisto.command()
if command == 'test-module':
get_events.aggregated_results()
demisto.results('ok')
elif command == 'duo-get-events' or command == 'fetch-events':
events = get_events.aggregated_results(last_object_ids=last_object_ids)
demisto.setLastRun(get_events.get_last_run(events))
send_events_to_xsiam(events, 'duo', 'duo')
if command == 'duo-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('Duo Logs', events, headerTransform=pascalToSpace),
raw_response=events,
)
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main(): # pragma: no cover
try:
demisto_params = demisto.params() | demisto.args()
last_run = demisto.getLastRun()
last_object_ids = last_run.get('ids')
if 'after' not in last_run:
after = dateparser.parse(demisto_params['after'].strip())
last_run = after.timestamp()
last_run = {LogType[LogType.AUTHENTICATION]: last_run,
LogType[LogType.ADMINISTRATION]: last_run,
LogType[LogType.TELEPHONY]: last_run}
else:
last_run = last_run['after']
request_order = last_run.get('request_order',
[LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY])
demisto_params['params'] = Params(**demisto_params, mintime=last_run)
client = Client(demisto_params)
get_events = GetEvents(client, request_order)
command = demisto.command()
if command == 'test-module':
get_events.aggregated_results()
demisto.results('ok')
elif command in ('duo-get-events', 'fetch-events'):
events = get_events.aggregated_results(last_object_ids=last_object_ids)
demisto.setLastRun(get_events.get_last_run(events))
send_events_to_xsiam(events, 'duo', 'duo')
if command == 'duo-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('Duo Logs', events, headerTransform=pascalToSpace),
raw_response=events,
)
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
54,035 |
def vertex_coloring(adjacency):
"""Color the vertices of a graph such that no two colors are adjacent.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] or dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
Returns
-------
dict[hashable, int]
A dict mapping each node of the graph to a color number.
Notes
-----
This algorithms works on any data structure that can be interpreted as a graph:
a network, a mesh, a volmesh, ...
For more info, see [1]_.
References
----------
.. [1] Chu-Carroll, M. *Graph Coloring Algorithms*.
Available at: http://scienceblogs.com/goodmath/2007/06/28/graph-coloring-algorithms-1/.
Warnings
--------
This is a greedy algorithm, so it might be slow for large networks.
Examples
--------
>>> import compas
>>> from compas.datastructures import Network
>>> network = Network.from_obj(compas.get('lines.obj'))
>>> key_color = vertex_coloring(network.adjacency)
>>> key = network.get_any_node()
>>> color = key_color[key]
>>> any(key_color[nbr] == color for nbr in network.neighbors(key))
False
"""
key_to_color = {}
key_to_degree = {key: len(adjacency[key]) for key in adjacency}
vertices = sorted(adjacency.keys(), key=lambda key: key_to_degree[key])
uncolored = deque(vertices[::-1])
current_color = 0
while uncolored:
a = uncolored.popleft()
key_to_color[a] = current_color
colored_with_current = [a]
for b in uncolored:
if not any(b in adjacency[key] for key in colored_with_current):
key_to_color[b] = current_color
colored_with_current.append(b)
for key in colored_with_current[1:]:
uncolored.remove(key)
current_color += 1
return key_to_color
|
def vertex_coloring(adjacency):
"""Color the vertices of a graph such that no two colors are adjacent.
Parameters
----------
adjacency : dict[hashable, dict[hashable, None]] or dict[hashable, sequence[hashable]]
An adjacency dictionary representing the connectivity of the graph
by mapping nodes identifiers to neighbour identifiers.
Examples of valid adjacency dicts are
* ``{0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]}``
* ``{0: {1: None, 2: None, 3: None, 4: None}, 1: {0: None}, 2: {0: None}, 3: {0: None}, 4: {0: None}}``
Returns
-------
dict[hashable, int]
A dict mapping each node of the graph to a color number.
Notes
-----
This algorithms works on any data structure that can be interpreted as a graph, e.g.
networks, meshes, volmeshes, etc..
For more info, see [1]_.
References
----------
.. [1] Chu-Carroll, M. *Graph Coloring Algorithms*.
Available at: http://scienceblogs.com/goodmath/2007/06/28/graph-coloring-algorithms-1/.
Warnings
--------
This is a greedy algorithm, so it might be slow for large networks.
Examples
--------
>>> import compas
>>> from compas.datastructures import Network
>>> network = Network.from_obj(compas.get('lines.obj'))
>>> key_color = vertex_coloring(network.adjacency)
>>> key = network.get_any_node()
>>> color = key_color[key]
>>> any(key_color[nbr] == color for nbr in network.neighbors(key))
False
"""
key_to_color = {}
key_to_degree = {key: len(adjacency[key]) for key in adjacency}
vertices = sorted(adjacency.keys(), key=lambda key: key_to_degree[key])
uncolored = deque(vertices[::-1])
current_color = 0
while uncolored:
a = uncolored.popleft()
key_to_color[a] = current_color
colored_with_current = [a]
for b in uncolored:
if not any(b in adjacency[key] for key in colored_with_current):
key_to_color[b] = current_color
colored_with_current.append(b)
for key in colored_with_current[1:]:
uncolored.remove(key)
current_color += 1
return key_to_color
|
7,857 |
def test_atoms_material_cell(uo2, water):
""" Test if correct number of atoms is returned.
Also check if Cell.atoms still works after volume/material was changed
"""
c = openmc.Cell(fill=uo2)
c.volume = 2.0
expected_nucs = ['U235', 'O16']
# Precalculate the expected number of atoms
M = ((atomic_mass('U235') + 2 * atomic_mass('O16'))/3)
expected_atoms = list()
expected_atoms.append(1/3 * uo2.density/M * AVOGADRO * 2.0) # U235
expected_atoms.append(2/3 * uo2.density/M * AVOGADRO * 2.0) # O16
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == t[1]
# Change volume and check if OK
c.volume = 3.0
expected_atoms = list()
expected_atoms.append(1/3 * uo2.density/M * AVOGADRO * 3.0) # U235
expected_atoms.append(2/3 * uo2.density/M * AVOGADRO * 3.0) # O16
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
# Change material and check if OK
c.fill = water
expected_nucs = ['H1', 'O16']
M = ((2 * atomic_mass('H1') + atomic_mass('O16'))/3)
expected_atoms = list()
expected_atoms.append(2/3 * water.density/M * AVOGADRO * 3.0) # H1
expected_atoms.append(1/3 * water.density/M * AVOGADRO * 3.0) # O16
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
|
def test_atoms_material_cell(uo2, water):
""" Test if correct number of atoms is returned.
Also check if Cell.atoms still works after volume/material was changed
"""
c = openmc.Cell(fill=uo2)
c.volume = 2.0
expected_nucs = ['U235', 'O16']
# Precalculate the expected number of atoms
M = ((atomic_mass('U235') + 2 * atomic_mass('O16'))/3)
expected_atoms = list()
expected_atoms.append(1/3 * uo2.density/M * AVOGADRO * 2.0) # U235
expected_atoms.append(2/3 * uo2.density/M * AVOGADRO * 2.0) # O16
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == t[1]
# Change volume and check if OK
c.volume = 3.0
expected_atoms = list()
expected_atoms.append(1/3 * uo2.density/M * AVOGADRO * 3.0) # U235
expected_atoms.append(2/3 * uo2.density/M * AVOGADRO * 3.0) # O16
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
# Change material and check if OK
c.fill = water
expected_nucs = ['H1', 'O16']
M = (2 * atomic_mass('H1') + atomic_mass('O16')) / 3
expected_atoms = list()
expected_atoms.append(2/3 * water.density/M * AVOGADRO * 3.0) # H1
expected_atoms.append(1/3 * water.density/M * AVOGADRO * 3.0) # O16
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.