id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
43,676 |
def _terms_to_qubit_operator(coeffs, ops, wires=None):
r"""Converts a 2-tuple of complex coefficients and PennyLane operations to
OpenFermion ``QubitOperator``.
This function is the inverse of ``_qubit_operator_to_terms``.
**Example usage:**
>>> coeffs = np.array([0.1, 0.2])
>>> ops = [
... qml.operation.Tensor(qml.PauliX(wires=['w0'])),
... qml.operation.Tensor(qml.PauliY(wires=['w0']), qml.PauliZ(wires=['w2']))
... ]
>>> _terms_to_qubit_operator(coeffs, ops, wires=Wires(['w0', 'w1', 'w2']))
0.1 [X0] +
0.2 [Y0 Z2]
Args:
coeffs (array[complex]):
coefficients for each observable, same length as ops
ops (Iterable[pennylane.operation.Observable]): List of PennyLane observables as
Tensor products of Pauli observables
wires (Wires, list, tuple, dict): Custom wire mapping for translating from Pennylane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only consecutive-int-valued dict (for wire-to-qubit conversion) is
accepted. If None, will use identiy map. Defaults to None.
Returns:
QubitOperator: an instance of OpenFermion's ``QubitOperator``.
"""
all_wires = Wires.all_wires([op.wires for op in ops], sort=True)
# n_all_wires = len(all_wires)
if wires is not None:
qubit_indexed_wires = _proc_wires(wires,)
if not set(all_wires).issubset(set(qubit_indexed_wires)):
raise ValueError("Supplied `wires` does not cover all wires defined in `ops`.")
else:
qubit_indexed_wires = all_wires
q_op = QubitOperator()
for coeff, op in zip(coeffs, ops):
# Pauli axis names, note s[-1] expects only 'Pauli{X,Y,Z}'
pauli_names = [s[-1] for s in op.name]
extra_obsvbs = set(op.name) - {"PauliX", "PauliY", "PauliZ", "Identity"}
if extra_obsvbs != set():
raise ValueError(
"Expected only PennyLane observables PauliX/Y/Z or Identity, "
+ "but also got {}.".format(extra_obsvbs)
)
if op.name == ["Identity"] and len(op.wires) == 1:
term_str = ""
else:
term_str = " ".join(
[
"{}{}".format(pauli, qubit_indexed_wires.index(wire))
for pauli, wire in zip(pauli_names, op.wires)
]
)
# This is how one makes QubitOperator in OpenFermion
q_op += coeff * QubitOperator(term_str)
return q_op
|
def _terms_to_qubit_operator(coeffs, ops, wires=None):
r"""Converts a 2-tuple of complex coefficients and PennyLane operations to
OpenFermion ``QubitOperator``.
This function is the inverse of ``_qubit_operator_to_terms``.
**Example usage:**
>>> coeffs = np.array([0.1, 0.2])
>>> ops = [
... qml.operation.Tensor(qml.PauliX(wires=['w0'])),
... qml.operation.Tensor(qml.PauliY(wires=['w0']), qml.PauliZ(wires=['w2']))
... ]
>>> _terms_to_qubit_operator(coeffs, ops, wires=Wires(['w0', 'w1', 'w2']))
0.1 [X0] +
0.2 [Y0 Z2]
Args:
coeffs (array[complex]):
coefficients for each observable, same length as ops
ops (Iterable[pennylane.operation.Observable]): List of PennyLane observables as
Tensor products of Pauli observables
wires (Wires, list, tuple, dict): Custom wire mapping for translating from Pennylane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only consecutive-int-valued dict (for wire-to-qubit conversion) is
accepted. If None, will use identity map.
Returns:
QubitOperator: an instance of OpenFermion's ``QubitOperator``.
"""
all_wires = Wires.all_wires([op.wires for op in ops], sort=True)
# n_all_wires = len(all_wires)
if wires is not None:
qubit_indexed_wires = _proc_wires(wires,)
if not set(all_wires).issubset(set(qubit_indexed_wires)):
raise ValueError("Supplied `wires` does not cover all wires defined in `ops`.")
else:
qubit_indexed_wires = all_wires
q_op = QubitOperator()
for coeff, op in zip(coeffs, ops):
# Pauli axis names, note s[-1] expects only 'Pauli{X,Y,Z}'
pauli_names = [s[-1] for s in op.name]
extra_obsvbs = set(op.name) - {"PauliX", "PauliY", "PauliZ", "Identity"}
if extra_obsvbs != set():
raise ValueError(
"Expected only PennyLane observables PauliX/Y/Z or Identity, "
+ "but also got {}.".format(extra_obsvbs)
)
if op.name == ["Identity"] and len(op.wires) == 1:
term_str = ""
else:
term_str = " ".join(
[
"{}{}".format(pauli, qubit_indexed_wires.index(wire))
for pauli, wire in zip(pauli_names, op.wires)
]
)
# This is how one makes QubitOperator in OpenFermion
q_op += coeff * QubitOperator(term_str)
return q_op
|
5,417 |
def _parse_settings_arp(opts, iface):
"""
Add shared settings for arp used by balance-rr, balance-xor bonding types.
"""
ret = {}
if "arp_interval" in opts:
try:
int(opts["arp_interval"])
ret.update({"arp_interval": opts["arp_interval"]})
except Exception: # pylint: disable=broad-except
_raise_error_iface(iface, "arp_interval", "integer")
# ARP targets in n.n.n.n form
valid = "list of ips (up to 16)"
if "arp_ip_target" in opts:
if isinstance(opts["arp_ip_target"], list):
if 1 <= len(opts["arp_ip_target"]) <= 16:
ret.update({"arp_ip_target": ",".join(opts["arp_ip_target"])})
else:
_raise_error_iface(iface, "arp_ip_target", valid)
else:
_raise_error_iface(iface, "arp_ip_target", valid)
else:
_raise_error_iface(iface, "arp_ip_target", valid)
return ret
|
def _parse_settings_arp(opts, iface):
"""
Add shared settings for arp used by balance-rr, balance-xor bonding types.
"""
ret = {}
if "arp_interval" in opts:
try:
int(opts["arp_interval"])
ret.update({"arp_interval": opts["arp_interval"]})
except ValueError:
_raise_error_iface(iface, "arp_interval", "integer")
# ARP targets in n.n.n.n form
valid = "list of ips (up to 16)"
if "arp_ip_target" in opts:
if isinstance(opts["arp_ip_target"], list):
if 1 <= len(opts["arp_ip_target"]) <= 16:
ret.update({"arp_ip_target": ",".join(opts["arp_ip_target"])})
else:
_raise_error_iface(iface, "arp_ip_target", valid)
else:
_raise_error_iface(iface, "arp_ip_target", valid)
else:
_raise_error_iface(iface, "arp_ip_target", valid)
return ret
|
53,938 |
def parse_args(args, cls=DBTArgumentParser):
p = cls(
prog='dbt',
description='''
An ELT tool for managing your SQL transformations and data models.
For more documentation on these commands, visit: docs.getdbt.com
''',
epilog='''
Specify one of these sub-commands and you can find more help from
there.
'''
)
p.add_argument(
'--version',
action='dbtversion',
help='''
Show version information
''')
p.add_argument(
'-r',
'--record-timing-info',
default=None,
type=str,
help='''
When this option is passed, dbt will output low-level timing stats to
the specified file. Example: `--record-timing-info output.profile`
'''
)
p.add_argument(
'-d',
'--debug',
action='store_true',
default=None,
help='''
Display debug logging during dbt execution. Useful for debugging and
making bug reports.
'''
)
p.add_argument(
'--log-format',
choices=['text', 'json', 'default'],
default=None,
help='''Specify the log format, overriding the command's default.'''
)
p.add_argument(
'--no-write-json',
action='store_false',
default=None,
dest='write_json',
help='''
If set, skip writing the manifest and run_results.json files to disk
'''
)
colors_flag = p.add_mutually_exclusive_group()
colors_flag.add_argument(
'--use-colors',
action='store_const',
const=True,
default=None,
dest='use_colors',
help='''
Colorize the output DBT prints to the terminal. Output is colorized by
default and may also be set in a profile or at the command line.
Mutually exclusive with --no-use-colors
'''
)
colors_flag.add_argument(
'--no-use-colors',
action='store_const',
const=False,
dest='use_colors',
help='''
Do not colorize the output DBT prints to the terminal. Output is
colorized by default and may also be set in a profile or at the
command line.
Mutually exclusive with --use-colors
'''
)
p.add_argument(
'--printer-width',
dest='printer_width',
help='''
Sets the width of terminal output
'''
)
p.add_argument(
'--warn-error',
action='store_true',
default=None,
help='''
If dbt would normally warn, instead raise an exception. Examples
include --models that selects nothing, deprecations, configurations
with no associated models, invalid test configurations, and missing
sources/refs in tests.
'''
)
p.add_argument(
'--no-version-check',
dest='version_check',
action='store_false',
default=None,
help='''
If set, skip ensuring dbt's version matches the one specified in
the dbt_project.yml file ('require-dbt-version')
'''
)
p.add_optional_argument_inverse(
'--partial-parse',
enable_help='''
Allow for partial parsing by looking for and writing to a pickle file
in the target directory. This overrides the user configuration file.
''',
disable_help='''
Disallow partial parsing. This overrides the user configuration file.
''',
)
# if set, run dbt in single-threaded mode: thread count is ignored, and
# calls go through `map` instead of the thread pool. This is useful for
# getting performance information about aspects of dbt that normally run in
# a thread, as the profiler ignores child threads. Users should really
# never use this.
p.add_argument(
'--single-threaded',
action='store_true',
help=argparse.SUPPRESS,
)
# if set, will use the latest features from the static parser instead of
# the stable static parser.
p.add_argument(
'--use-experimental-parser',
action='store_true',
default=None,
help='''
Enables experimental parsing features.
'''
)
# if set, will disable the use of the stable static parser and instead
# always rely on jinja rendering.
p.add_argument(
'--no-static-parser',
default=None,
dest='static_parser',
action='store_false',
help='''
Disables the static parser.
'''
)
p.add_argument(
'--profiles-dir',
default=None,
dest='profiles_dir',
type=str,
help='''
Which directory to look in for the profiles.yml file. Default = {}
'''.format(DEFAULT_PROFILES_DIR)
)
p.add_argument(
'--no-anonymous-usage-stats',
action='store_false',
default=None,
dest='send_anonymous_usage_stats',
help='''
Do not send anonymous usage stat to dbt Labs
'''
)
p.add_argument(
'-x',
'--fail-fast',
dest='fail_fast',
action='store_true',
default=None,
help='''
Stop execution upon a first failure.
'''
)
p.add_argument(
'--event-buffer-size',
dest='event_buffer_size',
help='''
Sets the max number of events to buffer in EVENT_HISTORY
'''
)
p.add_argument(
'-q',
'--quiet',
action='store_true',
default=None,
help='''
Supress all non-error logging during dbt execution. Output from
{{ print() }} macro are still displayed.
'''
)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = _build_base_subparser()
# make the subcommands that have their own subcommands
docs_sub = _build_docs_subparser(subs, base_subparser)
docs_subs = docs_sub.add_subparsers(title="Available sub-commands")
source_sub = _build_source_subparser(subs, base_subparser)
source_subs = source_sub.add_subparsers(title="Available sub-commands")
_build_init_subparser(subs, base_subparser)
_build_clean_subparser(subs, base_subparser)
_build_debug_subparser(subs, base_subparser)
_build_deps_subparser(subs, base_subparser)
_build_list_subparser(subs, base_subparser)
build_sub = _build_build_subparser(subs, base_subparser)
snapshot_sub = _build_snapshot_subparser(subs, base_subparser)
run_sub = _build_run_subparser(subs, base_subparser)
compile_sub = _build_compile_subparser(subs, base_subparser)
parse_sub = _build_parse_subparser(subs, base_subparser)
generate_sub = _build_docs_generate_subparser(docs_subs, base_subparser)
test_sub = _build_test_subparser(subs, base_subparser)
seed_sub = _build_seed_subparser(subs, base_subparser)
# --threads, --no-version-check
_add_common_arguments(run_sub, compile_sub, generate_sub, test_sub,
seed_sub, parse_sub, build_sub)
# --select, --exclude
# list_sub sets up its own arguments.
_add_selection_arguments(
run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub)
# --defer
_add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub)
# --full-refresh
_add_table_mutability_arguments(run_sub, compile_sub, build_sub)
_build_docs_serve_subparser(docs_subs, base_subparser)
_build_source_freshness_subparser(source_subs, base_subparser)
_build_run_operation_subparser(subs, base_subparser)
if len(args) == 0:
p.print_help()
sys.exit(1)
parsed = p.parse_args(args)
# profiles_dir is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_profiles_dir'):
if parsed.sub_profiles_dir is not None:
parsed.profiles_dir = parsed.sub_profiles_dir
delattr(parsed, 'sub_profiles_dir')
if hasattr(parsed, 'profiles_dir'):
if parsed.profiles_dir is None:
parsed.profiles_dir = flags.PROFILES_DIR
else:
parsed.profiles_dir = os.path.abspath(parsed.profiles_dir)
# needs to be set before the other flags, because it's needed to
# read the profile that contains them
flags.PROFILES_DIR = parsed.profiles_dir
# version_check is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_version_check'):
if parsed.sub_version_check is False:
parsed.version_check = False
delattr(parsed, 'sub_version_check')
# fail_fast is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_fail_fast'):
if parsed.sub_fail_fast is True:
parsed.fail_fast = True
delattr(parsed, 'sub_fail_fast')
if getattr(parsed, 'project_dir', None) is not None:
expanded_user = os.path.expanduser(parsed.project_dir)
parsed.project_dir = os.path.abspath(expanded_user)
if not hasattr(parsed, 'which'):
# the user did not provide a valid subcommand. trigger the help message
# and exit with a error
p.print_help()
p.exit(1)
return parsed
|
def parse_args(args, cls=DBTArgumentParser):
p = cls(
prog='dbt',
description='''
An ELT tool for managing your SQL transformations and data models.
For more documentation on these commands, visit: docs.getdbt.com
''',
epilog='''
Specify one of these sub-commands and you can find more help from
there.
'''
)
p.add_argument(
'--version',
action='dbtversion',
help='''
Show version information
''')
p.add_argument(
'-r',
'--record-timing-info',
default=None,
type=str,
help='''
When this option is passed, dbt will output low-level timing stats to
the specified file. Example: `--record-timing-info output.profile`
'''
)
p.add_argument(
'-d',
'--debug',
action='store_true',
default=None,
help='''
Display debug logging during dbt execution. Useful for debugging and
making bug reports.
'''
)
p.add_argument(
'--log-format',
choices=['text', 'json', 'default'],
default=None,
help='''Specify the log format, overriding the command's default.'''
)
p.add_argument(
'--no-write-json',
action='store_false',
default=None,
dest='write_json',
help='''
If set, skip writing the manifest and run_results.json files to disk
'''
)
colors_flag = p.add_mutually_exclusive_group()
colors_flag.add_argument(
'--use-colors',
action='store_const',
const=True,
default=None,
dest='use_colors',
help='''
Colorize the output DBT prints to the terminal. Output is colorized by
default and may also be set in a profile or at the command line.
Mutually exclusive with --no-use-colors
'''
)
colors_flag.add_argument(
'--no-use-colors',
action='store_const',
const=False,
dest='use_colors',
help='''
Do not colorize the output DBT prints to the terminal. Output is
colorized by default and may also be set in a profile or at the
command line.
Mutually exclusive with --use-colors
'''
)
p.add_argument(
'--printer-width',
dest='printer_width',
help='''
Sets the width of terminal output
'''
)
p.add_argument(
'--warn-error',
action='store_true',
default=None,
help='''
If dbt would normally warn, instead raise an exception. Examples
include --models that selects nothing, deprecations, configurations
with no associated models, invalid test configurations, and missing
sources/refs in tests.
'''
)
p.add_argument(
'--no-version-check',
dest='version_check',
action='store_false',
default=None,
help='''
If set, skip ensuring dbt's version matches the one specified in
the dbt_project.yml file ('require-dbt-version')
'''
)
p.add_optional_argument_inverse(
'--partial-parse',
enable_help='''
Allow for partial parsing by looking for and writing to a pickle file
in the target directory. This overrides the user configuration file.
''',
disable_help='''
Disallow partial parsing. This overrides the user configuration file.
''',
)
# if set, run dbt in single-threaded mode: thread count is ignored, and
# calls go through `map` instead of the thread pool. This is useful for
# getting performance information about aspects of dbt that normally run in
# a thread, as the profiler ignores child threads. Users should really
# never use this.
p.add_argument(
'--single-threaded',
action='store_true',
help=argparse.SUPPRESS,
)
# if set, will use the latest features from the static parser instead of
# the stable static parser.
p.add_argument(
'--use-experimental-parser',
action='store_true',
default=None,
help='''
Enables experimental parsing features.
'''
)
# if set, will disable the use of the stable static parser and instead
# always rely on jinja rendering.
p.add_argument(
'--no-static-parser',
default=None,
dest='static_parser',
action='store_false',
help='''
Disables the static parser.
'''
)
p.add_argument(
'--profiles-dir',
default=None,
dest='profiles_dir',
type=str,
help='''
Which directory to look in for the profiles.yml file. Default = {}
'''.format(DEFAULT_PROFILES_DIR)
)
p.add_argument(
'--no-anonymous-usage-stats',
action='store_false',
default=None,
dest='send_anonymous_usage_stats',
help='''
Do not send anonymous usage stat to dbt Labs
'''
)
p.add_argument(
'-x',
'--fail-fast',
dest='fail_fast',
action='store_true',
default=None,
help='''
Stop execution upon a first failure.
'''
)
p.add_argument(
'--event-buffer-size',
dest='event_buffer_size',
help='''
Sets the max number of events to buffer in EVENT_HISTORY
'''
)
p.add_argument(
'-q',
'--quiet',
action='store_true',
default=None,
help='''
Suppress all non-error logging to stdout. Does not affect
{{ print() }} macro calls.
'''
)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = _build_base_subparser()
# make the subcommands that have their own subcommands
docs_sub = _build_docs_subparser(subs, base_subparser)
docs_subs = docs_sub.add_subparsers(title="Available sub-commands")
source_sub = _build_source_subparser(subs, base_subparser)
source_subs = source_sub.add_subparsers(title="Available sub-commands")
_build_init_subparser(subs, base_subparser)
_build_clean_subparser(subs, base_subparser)
_build_debug_subparser(subs, base_subparser)
_build_deps_subparser(subs, base_subparser)
_build_list_subparser(subs, base_subparser)
build_sub = _build_build_subparser(subs, base_subparser)
snapshot_sub = _build_snapshot_subparser(subs, base_subparser)
run_sub = _build_run_subparser(subs, base_subparser)
compile_sub = _build_compile_subparser(subs, base_subparser)
parse_sub = _build_parse_subparser(subs, base_subparser)
generate_sub = _build_docs_generate_subparser(docs_subs, base_subparser)
test_sub = _build_test_subparser(subs, base_subparser)
seed_sub = _build_seed_subparser(subs, base_subparser)
# --threads, --no-version-check
_add_common_arguments(run_sub, compile_sub, generate_sub, test_sub,
seed_sub, parse_sub, build_sub)
# --select, --exclude
# list_sub sets up its own arguments.
_add_selection_arguments(
run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub)
# --defer
_add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub)
# --full-refresh
_add_table_mutability_arguments(run_sub, compile_sub, build_sub)
_build_docs_serve_subparser(docs_subs, base_subparser)
_build_source_freshness_subparser(source_subs, base_subparser)
_build_run_operation_subparser(subs, base_subparser)
if len(args) == 0:
p.print_help()
sys.exit(1)
parsed = p.parse_args(args)
# profiles_dir is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_profiles_dir'):
if parsed.sub_profiles_dir is not None:
parsed.profiles_dir = parsed.sub_profiles_dir
delattr(parsed, 'sub_profiles_dir')
if hasattr(parsed, 'profiles_dir'):
if parsed.profiles_dir is None:
parsed.profiles_dir = flags.PROFILES_DIR
else:
parsed.profiles_dir = os.path.abspath(parsed.profiles_dir)
# needs to be set before the other flags, because it's needed to
# read the profile that contains them
flags.PROFILES_DIR = parsed.profiles_dir
# version_check is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_version_check'):
if parsed.sub_version_check is False:
parsed.version_check = False
delattr(parsed, 'sub_version_check')
# fail_fast is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_fail_fast'):
if parsed.sub_fail_fast is True:
parsed.fail_fast = True
delattr(parsed, 'sub_fail_fast')
if getattr(parsed, 'project_dir', None) is not None:
expanded_user = os.path.expanduser(parsed.project_dir)
parsed.project_dir = os.path.abspath(expanded_user)
if not hasattr(parsed, 'which'):
# the user did not provide a valid subcommand. trigger the help message
# and exit with a error
p.print_help()
p.exit(1)
return parsed
|
40,973 |
def backwards_func(apps, schema_editor):
Licence = apps.get_model("courses", "Licence")
LicenceTranslation = apps.get_model("courses", "LicenceTranslation")
for object in Licence.objects.all():
translation = _get_translation(object, LicenceTranslation)
object.name_deprecated = translation.name
object.save() # Note this only calls Model.save()
|
def backwards_func(apps, schema_editor):
Licence = apps.get_model("courses", "Licence")
LicenceTranslation = apps.get_model("courses", "LicenceTranslation")
for licence in Licence.objects.all():
translation = _get_translation(object, LicenceTranslation)
object.name_deprecated = translation.name
object.save() # Note this only calls Model.save()
|
25,879 |
def send_raw_request(cli_ctx, method, url, headers=None, uri_parameters=None, # pylint: disable=too-many-locals
body=None, skip_authorization_header=False, resource=None, output_file=None):
import uuid
import requests
from azure.cli.core.commands.client_factory import UA_AGENT
result = {}
for s in headers or []:
try:
temp = shell_safe_json_parse(s)
result.update(temp)
except CLIError:
key, value = s.split('=', 1)
result[key] = value
headers = result
headers.update({
'User-Agent': UA_AGENT,
'x-ms-client-request-id': str(uuid.uuid4()),
})
result = {}
for s in uri_parameters or []:
try:
temp = shell_safe_json_parse(s)
result.update(temp)
except CLIError:
key, value = s.split('=', 1)
result[key] = value
uri_parameters = result or None
if not skip_authorization_header:
from azure.cli.core._profile import Profile
if not resource:
endpoints = cli_ctx.cloud.endpoints
for p in [x for x in dir(endpoints) if not x.startswith('_')]:
value = getattr(endpoints, p)
if isinstance(value, six.string_types) and url.lower().startswith(value.lower()):
resource = value
break
profile = Profile()
token_info, _, _ = profile.get_raw_token(resource)
logger.debug('Retrievd AAD token for resource: %s', resource or 'ARM')
toke_type, token, _ = token_info
headers = headers or {}
headers['Authorization'] = '{} {}'.format(toke_type, token)
r = requests.request(method, url, params=uri_parameters, data=body, headers=headers,
verify=not should_disable_connection_verify())
if not r.ok:
reason = r.reason
if r.text:
reason += '({})'.format(r.text)
raise CLIError(reason)
if output_file:
with open(output_file, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
return r
|
def send_raw_request(cli_ctx, method, url, headers=None, uri_parameters=None, # pylint: disable=too-many-locals
body=None, skip_authorization_header=False, resource=None, output_file=None):
import uuid
import requests
from azure.cli.core.commands.client_factory import UA_AGENT
result = {}
for s in headers or []:
try:
temp = shell_safe_json_parse(s)
result.update(temp)
except CLIError:
key, value = s.split('=', 1)
result[key] = value
headers = result
headers.update({
'User-Agent': UA_AGENT,
'x-ms-client-request-id': str(uuid.uuid4()),
})
result = {}
for s in uri_parameters or []:
try:
temp = shell_safe_json_parse(s)
result.update(temp)
except CLIError:
key, value = s.split('=', 1)
result[key] = value
uri_parameters = result or None
if not skip_authorization_header:
from azure.cli.core._profile import Profile
if not resource:
endpoints = cli_ctx.cloud.endpoints
for p in [x for x in dir(endpoints) if not x.startswith('_')]:
value = getattr(endpoints, p)
if isinstance(value, six.string_types) and url.lower().startswith(value.lower()):
resource = value
break
profile = Profile()
token_info, _, _ = profile.get_raw_token(resource)
logger.debug('Retrievd AAD token for resource: %s', resource or 'ARM')
token_type, token, _ = token_info
headers = headers or {}
headers['Authorization'] = '{} {}'.format(toke_type, token)
r = requests.request(method, url, params=uri_parameters, data=body, headers=headers,
verify=not should_disable_connection_verify())
if not r.ok:
reason = r.reason
if r.text:
reason += '({})'.format(r.text)
raise CLIError(reason)
if output_file:
with open(output_file, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
return r
|
40,538 |
def load_arguments(self, _):
workspace_name_type = CLIArgumentType(options_list=['--workspace-name', '-w'], help='Name of the Quantum Workspace. You can configure the default workspace using `az quantum workspace set`.', id_part=None, required=False)
storage_account_name_type = CLIArgumentType(options_list=['--storage-account', '-a'], help='Name of the storage account to be used by a quantum workspace.')
program_args_type = CLIArgumentType(nargs='*', help='List of arguments expected by the Q# operation specified as --name=value after `--`.')
target_id_type = CLIArgumentType(options_list=['--target-id', '-t'], help='Execution engine for quantum computing jobs. When a workspace is configured with a set of provider, they each enable one or more targets.')
project_type = CLIArgumentType(help='The location of the Q# project to submit. Defaults to current folder.')
job_name_type = CLIArgumentType(help='A friendly name to give to this run of the program.')
job_id_type = CLIArgumentType(options_list=['--job-id', '-j'], help='Job unique identifier in GUID format.')
shots_type = CLIArgumentType(help='The number of times to run the Q# program on the given target.')
no_build_type = CLIArgumentType(help='If specified, the Q# program is not built before submitting.')
storage_type = CLIArgumentType(help='If specified, the ConnectionString of an Azure Storage is used to store job data and results.')
max_poll_wait_secs_type = CLIArgumentType(help='Poll time in seconds to query Azure Quantum for results of the corresponding job.')
tag_type = CLIArgumentType(help='Show only quantum workspaces that have associated the specified tag.')
skip_role_assignment_type = CLIArgumentType(help='Skips the role assignment step for the quantum workspace in the storage account.')
provider_id_type = CLIArgumentType(options_list=['--provider-id', '-p'], help='Identifier of an Azure Quantum provider.')
sku_type = CLIArgumentType(options_list=['--sku', '-k'], help='Identifies a plan or SKU offered by an Azure Quantum provider.')
with self.argument_context('quantum workspace') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('storage_account', storage_account_name_type)
c.argument('tag', tag_type)
c.argument('skip_role_assignment', skip_role_assignment_type)
with self.argument_context('quantum target') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('target_id', target_id_type)
with self.argument_context('quantum job') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('job_id', job_id_type)
c.argument('target_id', target_id_type)
c.argument('project', project_type)
c.argument('job_name', job_name_type)
c.argument('shots', shots_type)
c.argument('storage', storage_type)
c.argument('no_build', no_build_type)
c.argument('max_poll_wait_secs', max_poll_wait_secs_type)
with self.argument_context('quantum job submit') as c:
c.positional('program_args', program_args_type)
with self.argument_context('quantum execute') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('target_id', target_id_type)
c.argument('project', project_type)
c.argument('job_name', job_name_type)
c.argument('shots', shots_type)
c.argument('storage', storage_type)
c.argument('no_build', no_build_type)
c.positional('program_args', program_args_type)
with self.argument_context('quantum run') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('target_id', target_id_type)
c.argument('project', project_type)
c.argument('job_name', job_name_type)
c.argument('shots', shots_type)
c.argument('storage', storage_type)
c.argument('no_build', no_build_type)
c.positional('program_args', program_args_type)
with self.argument_context('quantum offerings') as c:
c.argument('provider_id', provider_id_type)
c.argument('sku', sku_type)
|
def load_arguments(self, _):
workspace_name_type = CLIArgumentType(options_list=['--workspace-name', '-w'], help='Name of the Quantum Workspace. You can configure the default workspace using `az quantum workspace set`.', id_part=None, required=False)
storage_account_name_type = CLIArgumentType(options_list=['--storage-account', '-a'], help='Name of the storage account to be used by a quantum workspace.')
program_args_type = CLIArgumentType(nargs='*', help='List of arguments expected by the Q# operation specified as --name=value after `--`.')
target_id_type = CLIArgumentType(options_list=['--target-id', '-t'], help='Execution engine for quantum computing jobs. When a workspace is configured with a set of provider, they each enable one or more targets.')
project_type = CLIArgumentType(help='The location of the Q# project to submit. Defaults to current folder.')
job_name_type = CLIArgumentType(help='A friendly name to give to this run of the program.')
job_id_type = CLIArgumentType(options_list=['--job-id', '-j'], help='Job unique identifier in GUID format.')
shots_type = CLIArgumentType(help='The number of times to run the Q# program on the given target.')
no_build_type = CLIArgumentType(help='If specified, the Q# program is not built before submitting.')
storage_type = CLIArgumentType(help='If specified, the ConnectionString of an Azure Storage is used to store job data and results.')
max_poll_wait_secs_type = CLIArgumentType(help='Poll time in seconds to query Azure Quantum for results of the corresponding job.')
tag_type = CLIArgumentType(help='Show only quantum workspaces that have associated the specified tag.')
skip_role_assignment_type = CLIArgumentType(help='Skips the role assignment step for the quantum workspace in the storage account.')
provider_id_type = CLIArgumentType(options_list=['--provider-id', '-p'], help='Identifier of an Azure Quantum provider.')
sku_type = CLIArgumentType(options_list=['--sku', '-k'], help='Identify a plan or SKU offered by an Azure Quantum provider.')
with self.argument_context('quantum workspace') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('storage_account', storage_account_name_type)
c.argument('tag', tag_type)
c.argument('skip_role_assignment', skip_role_assignment_type)
with self.argument_context('quantum target') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('target_id', target_id_type)
with self.argument_context('quantum job') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('job_id', job_id_type)
c.argument('target_id', target_id_type)
c.argument('project', project_type)
c.argument('job_name', job_name_type)
c.argument('shots', shots_type)
c.argument('storage', storage_type)
c.argument('no_build', no_build_type)
c.argument('max_poll_wait_secs', max_poll_wait_secs_type)
with self.argument_context('quantum job submit') as c:
c.positional('program_args', program_args_type)
with self.argument_context('quantum execute') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('target_id', target_id_type)
c.argument('project', project_type)
c.argument('job_name', job_name_type)
c.argument('shots', shots_type)
c.argument('storage', storage_type)
c.argument('no_build', no_build_type)
c.positional('program_args', program_args_type)
with self.argument_context('quantum run') as c:
c.argument('workspace_name', workspace_name_type)
c.argument('target_id', target_id_type)
c.argument('project', project_type)
c.argument('job_name', job_name_type)
c.argument('shots', shots_type)
c.argument('storage', storage_type)
c.argument('no_build', no_build_type)
c.positional('program_args', program_args_type)
with self.argument_context('quantum offerings') as c:
c.argument('provider_id', provider_id_type)
c.argument('sku', sku_type)
|
30,338 |
def get_group_security_labels():
"""
Retrieve a Group’s Security Labels
"""
group_type = demisto.args().get('group_type')
group_id = int(demisto.args().get('group_id'))
contents = []
headers = ['Name', 'Description', 'DateAdded']
response = get_group_security_labels_request(group_type, group_id)
data = response.get('data', {}).get('securityLabel', [])
if response.get('status') == 'Success':
for security_label in data:
contents.append({
'Name': security_label.get('name'),
'Description': security_label.get('description'),
'DateAdded': security_label.get('dateAdded')
})
else:
return_error(response.get('message'))
context = {
'TC.Group(val.Name && val.Name === obj.Name)': contents
}
return_outputs(
tableToMarkdown('Group Security Labels', contents, headers, removeNull=True),
context
)
|
def get_group_security_labels():
"""
Retrieve a Group’s Security Labels
"""
group_type = demisto.args().get('group_type')
group_id = int(demisto.args().get('group_id'))
contents = []
headers = ['Name', 'Description', 'DateAdded']
response = get_group_security_labels_request(group_type, group_id)
data = response.get('data', {}).get('securityLabel', [])
if response.get('status') == 'Success':
for security_label in data:
contents.append({
'Name': security_label.get('name'),
'Description': security_label.get('description'),
'DateAdded': security_label.get('dateAdded')
})
else:
return_error(response.get('message'))
context = {
'TC.Group(val.Name && val.Name === obj.Name)': contents
}
return_outputs(
tableToMarkdown('ThreatConnect Group Security Labels', contents, headers, removeNull=True),
context
)
|
6,581 |
def validate_account_for_perpetual_inventory(gl_map):
if cint(erpnext.is_perpetual_inventory_enabled(gl_map[0].company)) and gl_map[0].voucher_type=="Journal Entry":
account_list = [gl_entries.account for gl_entries in gl_map]
aii_accounts = [d.name for d in frappe.get_all("Account",
filters={'account_type': 'Stock', 'is_group': 0, 'company': gl_map[0].company})]
for account in account_list:
if account not in aii_accounts:
continue
account_bal, stock_bal, warehouse_list = get_stock_and_account_balance(account,
gl_map[0].posting_date, gl_map[0].company)
# In case of Journal Entry, there are no corresponding SL entries,
# hence deducting currency amount
account_bal -= flt(gl_map[0].debit) - flt(gl_map[0].credit)
if account_bal == stock_bal:
frappe.throw(_("Account: {0} can only be updated via Stock Transactions")
.format(account), StockAccountInvalidTransaction)
|
def validate_account_for_perpetual_inventory(gl_map):
if gl_map[0].voucher_type=="Journal Entry" and cint(erpnext.is_perpetual_inventory_enabled(gl_map[0].company)):
account_list = [gl_entries.account for gl_entries in gl_map]
aii_accounts = [d.name for d in frappe.get_all("Account",
filters={'account_type': 'Stock', 'is_group': 0, 'company': gl_map[0].company})]
for account in account_list:
if account not in aii_accounts:
continue
account_bal, stock_bal, warehouse_list = get_stock_and_account_balance(account,
gl_map[0].posting_date, gl_map[0].company)
# In case of Journal Entry, there are no corresponding SL entries,
# hence deducting currency amount
account_bal -= flt(gl_map[0].debit) - flt(gl_map[0].credit)
if account_bal == stock_bal:
frappe.throw(_("Account: {0} can only be updated via Stock Transactions")
.format(account), StockAccountInvalidTransaction)
|
14,613 |
def check_generate_predictions_console(use_threshold=False, all_probs=False):
# create some simple classification data without feature hashing
train_fs, test_fs = make_classification_data(num_examples=1000,
num_features=5)
# save the test feature set to an NDJ file
input_file = join(_my_dir, 'test',
'test_generate_predictions.jsonlines')
writer = NDJWriter(input_file, test_fs)
writer.write()
proba = use_threshold or all_probs
# create a learner that uses an SGD classifier
learner = Learner('SGDClassifier', probability=proba)
# train the learner with grid search
learner.train(train_fs, grid_search=True)
# get the predictions on the test featureset
predictions = learner.predict(test_fs)
# if we asked for probabilities, then use the threshold
# to convert them into binary predictions
if use_threshold:
threshold = 0.6
predictions = [int(p[1] >= threshold) for p in predictions]
else:
predictions = predictions.tolist()
threshold = None
# save the learner to a file
model_file = join(_my_dir, 'output',
'test_generate_predictions_console.model')
learner.save(model_file)
# now call main() from generate_predictions.py
generate_cmd = []
if use_threshold:
generate_cmd.append('-t {}'.format(threshold))
elif all_probs:
generate_cmd.append('-a')
generate_cmd.extend([model_file, input_file])
# we need to capture stdout since that's what main() writes to
err = ''
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = mystdout = StringIO()
sys.stderr = mystderr = StringIO()
gp.main(generate_cmd)
out = mystdout.getvalue()
err = mystderr.getvalue()
output_lines = out.strip().split('\n')[1:] # Skip headers
if all_probs:
# Ignore the id (first column) in output.
predictions_after_saving = [[float(p) for p in x.split('\t')[1:]]
for x in output_lines]
else:
# Ignore the id (first column) in output.
predictions_after_saving = [int(x.split('\t')[1])
for x in output_lines]
if all_probs:
assert_array_almost_equal(predictions, predictions_after_saving)
else:
eq_(predictions, predictions_after_saving)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
print(err)
|
def check_generate_predictions_console(use_threshold=False, all_labels=False):
# create some simple classification data without feature hashing
train_fs, test_fs = make_classification_data(num_examples=1000,
num_features=5)
# save the test feature set to an NDJ file
input_file = join(_my_dir, 'test',
'test_generate_predictions.jsonlines')
writer = NDJWriter(input_file, test_fs)
writer.write()
proba = use_threshold or all_probs
# create a learner that uses an SGD classifier
learner = Learner('SGDClassifier', probability=proba)
# train the learner with grid search
learner.train(train_fs, grid_search=True)
# get the predictions on the test featureset
predictions = learner.predict(test_fs)
# if we asked for probabilities, then use the threshold
# to convert them into binary predictions
if use_threshold:
threshold = 0.6
predictions = [int(p[1] >= threshold) for p in predictions]
else:
predictions = predictions.tolist()
threshold = None
# save the learner to a file
model_file = join(_my_dir, 'output',
'test_generate_predictions_console.model')
learner.save(model_file)
# now call main() from generate_predictions.py
generate_cmd = []
if use_threshold:
generate_cmd.append('-t {}'.format(threshold))
elif all_probs:
generate_cmd.append('-a')
generate_cmd.extend([model_file, input_file])
# we need to capture stdout since that's what main() writes to
err = ''
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = mystdout = StringIO()
sys.stderr = mystderr = StringIO()
gp.main(generate_cmd)
out = mystdout.getvalue()
err = mystderr.getvalue()
output_lines = out.strip().split('\n')[1:] # Skip headers
if all_probs:
# Ignore the id (first column) in output.
predictions_after_saving = [[float(p) for p in x.split('\t')[1:]]
for x in output_lines]
else:
# Ignore the id (first column) in output.
predictions_after_saving = [int(x.split('\t')[1])
for x in output_lines]
if all_probs:
assert_array_almost_equal(predictions, predictions_after_saving)
else:
eq_(predictions, predictions_after_saving)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
print(err)
|
36,603 |
def names(root=None):
"Get names of defined fonts (as a tuple)"
if not root:
root = tkinter._get_default_root('use font.names()')
return root.tk.splitlist(root.tk.call("font", "names"))
|
def names(root=None):
"Get names of defined fonts (as a tuple)"
if not root:
tk = tkinter._get_default_root('use font.names()').tk
return root.tk.splitlist(root.tk.call("font", "names"))
|
12,240 |
def test_condarc_search_path_override(tmpdir):
custom_condarc = os.path.join(tmpdir, "condarc")
with open(custom_condarc, "w") as f:
f.write(
dals(
"""
channels: ["uniquely_named_channel"]
"""
)
)
# $CONDARC allows us to add one more file or directory to the search path
with env_var("CONDARC", custom_condarc):
reset_context()
assert "uniquely_named_channel" in context.channels
# $CONDA_RC_SEARCH_PATH overrides the default search path, but
# but we can still include CONDARC in that path :)
with env_var("CONDA_RC_SEARCH_PATH", "$CONDARC"):
reset_context()
assert "uniquely_named_channel" in context.channels
# If we set it to the empty string, then no file will be loaded
with env_var("CONDA_RC_SEARCH_PATH", ""):
reset_context()
assert "uniquely_named_channel" not in context.channels
|
def test_condarc_search_path_override(tmpdir):
custom_condarc = os.path.join(tmpdir, "condarc")
with open(custom_condarc, "w") as f:
f.write(
dals(
"""
channels: ["uniquely_named_channel"]
"""
)
)
# $CONDARC allows us to add one more file or directory to the search path
with env_var("CONDARC", custom_condarc):
reset_context()
assert "uniquely_named_channel" in context.channels
# $CONDA_RC_SEARCH_PATH overrides the default search path,
# but we can still include CONDARC in that path :)
with env_var("CONDA_RC_SEARCH_PATH", "$CONDARC"):
reset_context()
assert "uniquely_named_channel" in context.channels
# If we set it to the empty string, then no file will be loaded
with env_var("CONDA_RC_SEARCH_PATH", ""):
reset_context()
assert "uniquely_named_channel" not in context.channels
|
45,299 |
def _read(**kwargs) -> DataFrame:
"""
General documentation in `modin.pandas.read_csv`.
Experimental feature is simultaneous reading from multiple csv files which are
defined using glob pattern. Works only for local files.
Parameters
----------
**kwargs : dict
Keyword arguments in `modin.pandas.read_csv`.
Returns
-------
Modin DataFrame.
"""
from modin.data_management.factories.dispatcher import EngineDispatcher
Engine.subscribe(_update_engine)
try:
pd_obj = EngineDispatcher.read_csv_glob(**kwargs)
except AttributeError:
raise AttributeError("read_csv_glob() is only implemented for pandas on Ray.")
# This happens when `read_csv` returns a TextFileReader object for iterating through
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj)
|
def _read(**kwargs) -> DataFrame:
"""
General documentation is available in `modin.pandas.read_csv`.
Experimental feature is simultaneous reading from multiple csv files which are
defined using glob pattern. Works only for local files.
Parameters
----------
**kwargs : dict
Keyword arguments in `modin.pandas.read_csv`.
Returns
-------
Modin DataFrame.
"""
from modin.data_management.factories.dispatcher import EngineDispatcher
Engine.subscribe(_update_engine)
try:
pd_obj = EngineDispatcher.read_csv_glob(**kwargs)
except AttributeError:
raise AttributeError("read_csv_glob() is only implemented for pandas on Ray.")
# This happens when `read_csv` returns a TextFileReader object for iterating through
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj)
|
23,204 |
def source_foreign(args, stdin=None, stdout=None, stderr=None):
"""Sources a file written in a foreign shell language."""
env = XSH.env
ns = _SOURCE_FOREIGN_PARSER.parse_args(args)
ns.suppress_skip_message = (
env.get("FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE")
if ns.suppress_skip_message is None
else ns.suppress_skip_message
)
files = ()
if ns.prevcmd is not None:
pass # don't change prevcmd if given explicitly
elif os.path.isfile(ns.files_or_code[0]):
# we have filenames to source
ns.prevcmd = "".join(
["{} {}\n".format(ns.sourcer, f) for f in ns.files_or_code]
)
files = tuple(ns.files_or_code)
elif ns.prevcmd is None:
ns.prevcmd = " ".join(ns.files_or_code) # code to run, no files
foreign_shell_data.cache_clear() # make sure that we don't get prev src
fsenv, fsaliases = foreign_shell_data(
shell=ns.shell,
login=ns.login,
interactive=ns.interactive,
envcmd=ns.envcmd,
aliascmd=ns.aliascmd,
extra_args=ns.extra_args,
safe=ns.safe,
prevcmd=ns.prevcmd,
postcmd=ns.postcmd,
funcscmd=ns.funcscmd,
sourcer=ns.sourcer,
use_tmpfile=ns.use_tmpfile,
seterrprevcmd=ns.seterrprevcmd,
seterrpostcmd=ns.seterrpostcmd,
show=ns.show,
dryrun=ns.dryrun,
files=files,
)
if fsenv is None:
if ns.dryrun:
return
else:
msg = "xonsh: error: Source failed: {0!r}\n".format(ns.prevcmd)
msg += "xonsh: error: Possible reasons: File not found or syntax error\n"
return (None, msg, 1)
# apply results
denv = env.detype()
for k, v in fsenv.items():
if k in denv and v == denv[k]:
continue # no change from original
env[k] = v
# Remove any env-vars that were unset by the script.
for k in denv:
if k not in fsenv:
env.pop(k, None)
# Update aliases
baliases = XSH.aliases
for k, v in fsaliases.items():
if k in baliases and v == baliases[k]:
continue # no change from original
elif ns.overwrite_aliases or k not in baliases:
baliases[k] = v
elif ns.suppress_skip_message:
pass
else:
msg = (
"Skipping application of {0!r} alias from {1!r} "
"since it shares a name with an existing xonsh alias. "
'Use "--overwrite-alias" option to apply it anyway.'
'You may prevent this message with "--suppress-skip-message" or '
'"$FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE = True".'
)
print(msg.format(k, ns.shell), file=stderr)
|
def source_foreign(args, stdin=None, stdout=None, stderr=None):
"""Sources a file written in a foreign shell language."""
env = XSH.env
ns = _SOURCE_FOREIGN_PARSER.parse_args(args)
ns.suppress_skip_message = (
env.get("FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE")
if ns.suppress_skip_message is None
else ns.suppress_skip_message
)
files = ()
if ns.prevcmd is not None:
pass # don't change prevcmd if given explicitly
elif os.path.isfile(ns.files_or_code[0]):
# we have filenames to source
ns.prevcmd = "".join(
[f"{ns.sourcer} {f}\n" for f in ns.files_or_code]
)
files = tuple(ns.files_or_code)
elif ns.prevcmd is None:
ns.prevcmd = " ".join(ns.files_or_code) # code to run, no files
foreign_shell_data.cache_clear() # make sure that we don't get prev src
fsenv, fsaliases = foreign_shell_data(
shell=ns.shell,
login=ns.login,
interactive=ns.interactive,
envcmd=ns.envcmd,
aliascmd=ns.aliascmd,
extra_args=ns.extra_args,
safe=ns.safe,
prevcmd=ns.prevcmd,
postcmd=ns.postcmd,
funcscmd=ns.funcscmd,
sourcer=ns.sourcer,
use_tmpfile=ns.use_tmpfile,
seterrprevcmd=ns.seterrprevcmd,
seterrpostcmd=ns.seterrpostcmd,
show=ns.show,
dryrun=ns.dryrun,
files=files,
)
if fsenv is None:
if ns.dryrun:
return
else:
msg = "xonsh: error: Source failed: {0!r}\n".format(ns.prevcmd)
msg += "xonsh: error: Possible reasons: File not found or syntax error\n"
return (None, msg, 1)
# apply results
denv = env.detype()
for k, v in fsenv.items():
if k in denv and v == denv[k]:
continue # no change from original
env[k] = v
# Remove any env-vars that were unset by the script.
for k in denv:
if k not in fsenv:
env.pop(k, None)
# Update aliases
baliases = XSH.aliases
for k, v in fsaliases.items():
if k in baliases and v == baliases[k]:
continue # no change from original
elif ns.overwrite_aliases or k not in baliases:
baliases[k] = v
elif ns.suppress_skip_message:
pass
else:
msg = (
"Skipping application of {0!r} alias from {1!r} "
"since it shares a name with an existing xonsh alias. "
'Use "--overwrite-alias" option to apply it anyway.'
'You may prevent this message with "--suppress-skip-message" or '
'"$FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE = True".'
)
print(msg.format(k, ns.shell), file=stderr)
|
8,766 |
def test_say_no_repeat_protection(bot):
# five is fine
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
)
# six: replaced by '...'
bot.say('hello', '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
# the extra hello is replaced by '...'
'PRIVMSG #sopel :...',
)
# these one will add more '...'
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :...',
# the new ones are also replaced by '...'
'PRIVMSG #sopel :...',
'PRIVMSG #sopel :...',
)
# but at some point it just stop talking
bot.say('hello', '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
# three time, then stop
'PRIVMSG #sopel :...',
'PRIVMSG #sopel :...',
'PRIVMSG #sopel :...',
)
|
def test_say_no_repeat_protection(bot):
# five is fine
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
)
# six: replaced by '...'
bot.say('hello', '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
# the extra hello is replaced by '...'
'PRIVMSG #sopel :...',
)
# these one will add more '...'
bot.say('hello', '#sopel')
bot.say('hello', '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :...',
# the new ones are also replaced by '...'
'PRIVMSG #sopel :...',
'PRIVMSG #sopel :...',
)
# but at some point it just stops talking
bot.say('hello', '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
'PRIVMSG #sopel :hello',
# three time, then stop
'PRIVMSG #sopel :...',
'PRIVMSG #sopel :...',
'PRIVMSG #sopel :...',
)
|
31,960 |
def http_request(method, url_suffix, data=None, headers=HEADERS):
try:
res = requests.request(
method,
BASE_URL + '/' + url_suffix,
headers=headers,
verify=INSECURE,
data=data
)
except Exception as e:
return_error(e)
# Handle error responses gracefully
if 'application/json' not in res.headers.get('Content-Type', []) and res.status_code != 204:
LOG(f'response status code is: {res.status_code}')
return_error('invalid url or port: ' + BASE_URL)
if res.status_code == 404:
if res.json().get('message'):
return_error(res.json().get('message'))
else:
return_error('No data returned')
if res.status_code not in {200, 201, 202, 207, 204}:
return_error(
'Error in API call to {}, status code: {}, reason: {}'.format(BASE_URL + '/' + url_suffix, res.status_code,
res.json()['message']))
if res.status_code == 204 and method == 'GET':
return {}
return res.json()
|
def http_request(method, url_suffix, data=None, headers=HEADERS):
try:
res = requests.request(
method,
BASE_URL + '/' + url_suffix,
headers=headers,
verify=INSECURE,
data=data
)
except Exception as e:
return_error(e)
# Handle error responses gracefully
if 'application/json' not in res.headers.get('Content-Type', []) and res.status_code != 204:
LOG(f'response status code is: {res.status_code}')
return_error('invalid url or port: ' + BASE_URL)
if res.status_code == 404:
if res.json().get('message'):
return_error(res.json().get('message'))
else:
return_error('No data returned')
if res.status_code not in {200, 201, 202, 204, 207}:
return_error(
'Error in API call to {}, status code: {}, reason: {}'.format(BASE_URL + '/' + url_suffix, res.status_code,
res.json()['message']))
if res.status_code == 204 and method == 'GET':
return {}
return res.json()
|
46,543 |
def zero_activation_threshold(spec):
"""
Helper method to use the default balance activation threshold for state creation for tests.
Usage: `@with_custom_state(threshold_fn=one_gwei_activation_threshold, ...)`
"""
return 0
|
def zero_activation_threshold(spec):
"""
Helper method to use 0 as the activation threshold for state creation for tests.
Usage: `@with_custom_state(threshold_fn=one_gwei_activation_threshold, ...)`
"""
return 0
|
49,757 |
def get_options(argv=None):
"""
Convert options into commands.
Return commands, message
"""
parser = argparse.ArgumentParser(usage="spyder [options] files")
parser.add_argument(
'--new-instance',
action='store_true',
default=False,
help="Run a new instance of Spyder, even if the single "
"instance mode has been turned on (default)"
)
parser.add_argument(
'--defaults',
dest="reset_to_defaults",
action='store_true',
default=False,
help="Reset configuration settings to defaults"
)
parser.add_argument(
'--reset',
dest="reset_config_files",
action='store_true',
default=False,
help="Remove all configuration files!"
)
parser.add_argument(
'--optimize',
action='store_true',
default=False,
help="Optimize Spyder bytecode (this may require "
"administrative privileges)"
)
parser.add_argument(
'-w', '--workdir',
dest="working_directory",
default=None,
help="Default working directory"
)
parser.add_argument(
'--hide-console',
action='store_true',
default=False,
help="Hide parent console window (Windows)"
)
parser.add_argument(
'--show-console',
action='store_true',
default=False,
help="(Deprecated) Does nothing, now the default behavior "
"is to show the console"
)
parser.add_argument(
'--multithread',
dest="multithreaded",
action='store_true',
default=False,
help="Internal console is executed in another thread "
"(separate from main application thread)"
)
parser.add_argument(
'--profile',
action='store_true',
default=False,
help="Profile mode (internal test, not related "
"with Python profiling)"
)
parser.add_argument(
'--window-title',
type=str,
default=None,
help="String to show in the main window title"
)
parser.add_argument(
'-p', '--project',
default=None,
type=str,
dest="project",
help="Path that contains an Spyder project"
)
parser.add_argument(
'--opengl',
default=None,
dest="opengl_implementation",
choices=['software', 'desktop', 'gles'],
help="OpenGL implementation to pass to Qt"
)
parser.add_argument(
'--paths',
action='store_true',
default=False,
help="Show all Spyder configuration paths"
)
parser.add_argument(
'--debug-info',
default=None,
dest="debug_info",
choices=['minimal', 'verbose'],
help=("Level of internal debugging info to give. "
"'minimal' only logs a small amount of "
"confirmation messages and 'verbose' logs a "
"lot of detailed information.")
)
parser.add_argument(
'--debug-output',
default='terminal',
dest="debug_output",
choices=['terminal', 'file'],
help=("Print internal debugging info to the terminal and a file in "
"the configuration directory or to the terminal and a file "
"called spyder-debug.log in the current working directory. "
"Default is 'terminal'.")
)
parser.add_argument(
'--filter-log',
default='',
help="Comma-separated module name hierarchies whose log "
"messages should be shown. e.g., "
"spyder.plugins.completion,spyder.plugins.editor"
)
parser.add_argument(
'--safe-mode',
dest="safe_mode",
action='store_true',
default=False,
help="Start Spyder with a clean configuration directory"
)
parser.add_argument(
'--offline',
dest="offline",
action='store_true',
default=False,
help="Disable all web engine and download functionality."
)
parser.add_argument(
'--report-segfault',
dest="report_segfault",
action='store_true',
default=False,
help="Report segmentation fault to Github."
)
parser.add_argument(
'--conf-dir',
type=str,
dest="conf_dir",
default=None,
help="Choose a configuration directory to use for Spyder."
)
parser.add_argument('files', nargs='*')
options = parser.parse_args(argv)
args = options.files
return options, args
|
def get_options(argv=None):
"""
Convert options into commands.
Return commands, message
"""
parser = argparse.ArgumentParser(usage="spyder [options] files")
parser.add_argument(
'--new-instance',
action='store_true',
default=False,
help="Run a new instance of Spyder, even if the single "
"instance mode has been turned on (default)"
)
parser.add_argument(
'--defaults',
dest="reset_to_defaults",
action='store_true',
default=False,
help="Reset configuration settings to defaults"
)
parser.add_argument(
'--reset',
dest="reset_config_files",
action='store_true',
default=False,
help="Remove all configuration files!"
)
parser.add_argument(
'--optimize',
action='store_true',
default=False,
help="Optimize Spyder bytecode (this may require "
"administrative privileges)"
)
parser.add_argument(
'-w', '--workdir',
dest="working_directory",
default=None,
help="Default working directory"
)
parser.add_argument(
'--hide-console',
action='store_true',
default=False,
help="Hide parent console window (Windows)"
)
parser.add_argument(
'--show-console',
action='store_true',
default=False,
help="(Deprecated) Does nothing, now the default behavior "
"is to show the console"
)
parser.add_argument(
'--multithread',
dest="multithreaded",
action='store_true',
default=False,
help="Internal console is executed in another thread "
"(separate from main application thread)"
)
parser.add_argument(
'--profile',
action='store_true',
default=False,
help="Profile mode (internal test, not related "
"with Python profiling)"
)
parser.add_argument(
'--window-title',
type=str,
default=None,
help="String to show in the main window title"
)
parser.add_argument(
'-p', '--project',
default=None,
type=str,
dest="project",
help="Path that contains an Spyder project"
)
parser.add_argument(
'--opengl',
default=None,
dest="opengl_implementation",
choices=['software', 'desktop', 'gles'],
help="OpenGL implementation to pass to Qt"
)
parser.add_argument(
'--paths',
action='store_true',
default=False,
help="Show all Spyder configuration paths"
)
parser.add_argument(
'--debug-info',
default=None,
dest="debug_info",
choices=['minimal', 'verbose'],
help=("Level of internal debugging info to give. "
"'minimal' only logs a small amount of "
"confirmation messages and 'verbose' logs a "
"lot of detailed information.")
)
parser.add_argument(
'--debug-output',
default='terminal',
dest="debug_output",
choices=['terminal', 'file'],
help=("Print internal debugging info to the terminal and a file in "
"the configuration directory or to the terminal and a file "
"called spyder-debug.log in the current working directory. "
"Default is 'terminal'.")
)
parser.add_argument(
'--filter-log',
default='',
help="Comma-separated module name hierarchies whose log "
"messages should be shown. e.g., "
"spyder.plugins.completion,spyder.plugins.editor"
)
parser.add_argument(
'--safe-mode',
dest="safe_mode",
action='store_true',
default=False,
help="Start Spyder with a clean configuration directory"
)
parser.add_argument(
'--no-web-widgets',
dest="no-web-widgets",
action='store_true',
default=False,
help="Disable all web engine and download functionality."
)
parser.add_argument(
'--report-segfault',
dest="report_segfault",
action='store_true',
default=False,
help="Report segmentation fault to Github."
)
parser.add_argument(
'--conf-dir',
type=str,
dest="conf_dir",
default=None,
help="Choose a configuration directory to use for Spyder."
)
parser.add_argument('files', nargs='*')
options = parser.parse_args(argv)
args = options.files
return options, args
|
2,459 |
def concat_features(row):
return (row["keywords"] + " " + row["cast"] + " " +
row["genres"] + " " + row["director"])
|
def concat_features(row):
return (row["keywords"] + " " + row["cast"] + " " +
row["genres"] + " " + row["director"])
|
37,124 |
def parallel_map( # pylint: disable=dangerous-default-value
task, values, task_args=tuple(), task_kwargs={}, num_processes=CPU_COUNT):
"""
Parallel execution of a mapping of `values` to the function `task`. This
is functionally equivalent to::
result = [task(value, *task_args, **task_kwargs) for value in values]
On Windows this function defaults to a serial implementation to avoid the
overhead from spawning processes in Windows.
Args:
task (func): Function that is to be called for each value in ``values``.
values (array_like): List or array of values for which the ``task``
function is to be evaluated.
task_args (list): Optional additional arguments to the ``task`` function.
task_kwargs (dict): Optional additional keyword argument to the ``task`` function.
num_processes (int): Number of processes to spawn.
Returns:
result: The result list contains the value of
``task(value, *task_args, **task_kwargs)`` for
each value in ``values``.
Raises:
QiskitError: If user interrupts via keyboard.
Events:
terra.parallel.start: The collection of parallel tasks are about to start.
terra.parallel.update: One of the parallel task has finished.
terra.parallel.finish: All the parallel tasks have finished.
"""
if len(values) == 1:
return [task(values[0], *task_args, **task_kwargs)]
Publisher().publish("terra.parallel.start", len(values))
nfinished = [0]
def _callback(_):
nfinished[0] += 1
Publisher().publish("terra.parallel.done", nfinished[0])
# Run in parallel if not Win and not in parallel already
if platform.system() != 'Windows' and num_processes > 1 \
and os.getenv('QISKIT_IN_PARALLEL') == 'FALSE':
os.environ['QISKIT_IN_PARALLEL'] = 'TRUE'
try:
if platform.system() == 'Darwin':
# The swawn start method is changed to fork start method
# to avoid error of python3.8. However, this seems to be considered unsafe.
# https://docs.python.org/3/library/multiprocessing.html
ctx = get_context('fork')
pool = ctx.Pool(processes=num_processes)
else:
pool = Pool(processes=num_processes)
async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs,
_callback) for value in values]
while not all([item.ready() for item in async_res]):
for item in async_res:
item.wait(timeout=0.1)
pool.terminate()
pool.join()
except (KeyboardInterrupt, Exception) as error:
if isinstance(error, KeyboardInterrupt):
pool.terminate()
pool.join()
Publisher().publish("terra.parallel.finish")
os.environ['QISKIT_IN_PARALLEL'] = 'False'
raise QiskitError('Keyboard interrupt in parallel_map.')
# Otherwise just reset parallel flag and error
os.environ['QISKIT_IN_PARALLEL'] = 'False'
raise error
Publisher().publish("terra.parallel.finish")
os.environ['QISKIT_IN_PARALLEL'] = 'FALSE'
return [ar.get() for ar in async_res]
# Cannot do parallel on Windows , if another parallel_map is running in parallel,
# or len(values) == 1.
results = []
for _, value in enumerate(values):
result = task(value, *task_args, **task_kwargs)
results.append(result)
_callback(0)
Publisher().publish("terra.parallel.finish")
return results
|
def parallel_map( # pylint: disable=dangerous-default-value
task, values, task_args=tuple(), task_kwargs={}, num_processes=CPU_COUNT):
"""
Parallel execution of a mapping of `values` to the function `task`. This
is functionally equivalent to::
result = [task(value, *task_args, **task_kwargs) for value in values]
On Windows this function defaults to a serial implementation to avoid the
overhead from spawning processes in Windows.
Args:
task (func): Function that is to be called for each value in ``values``.
values (array_like): List or array of values for which the ``task``
function is to be evaluated.
task_args (list): Optional additional arguments to the ``task`` function.
task_kwargs (dict): Optional additional keyword argument to the ``task`` function.
num_processes (int): Number of processes to spawn.
Returns:
result: The result list contains the value of
``task(value, *task_args, **task_kwargs)`` for
each value in ``values``.
Raises:
QiskitError: If user interrupts via keyboard.
Events:
terra.parallel.start: The collection of parallel tasks are about to start.
terra.parallel.update: One of the parallel task has finished.
terra.parallel.finish: All the parallel tasks have finished.
"""
if len(values) == 1:
return [task(values[0], *task_args, **task_kwargs)]
Publisher().publish("terra.parallel.start", len(values))
nfinished = [0]
def _callback(_):
nfinished[0] += 1
Publisher().publish("terra.parallel.done", nfinished[0])
# Run in parallel if not Win and not in parallel already
if platform.system() != 'Windows' and num_processes > 1 \
and os.getenv('QISKIT_IN_PARALLEL') == 'FALSE':
os.environ['QISKIT_IN_PARALLEL'] = 'TRUE'
try:
if platform.system() == 'Darwin' and sys.version_info[0] == 3 and sys.version_info[1] == 8:
# The swawn start method is changed to fork start method
# to avoid error of python3.8. However, this seems to be considered unsafe.
# https://docs.python.org/3/library/multiprocessing.html
ctx = get_context('fork')
pool = ctx.Pool(processes=num_processes)
else:
pool = Pool(processes=num_processes)
async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs,
_callback) for value in values]
while not all([item.ready() for item in async_res]):
for item in async_res:
item.wait(timeout=0.1)
pool.terminate()
pool.join()
except (KeyboardInterrupt, Exception) as error:
if isinstance(error, KeyboardInterrupt):
pool.terminate()
pool.join()
Publisher().publish("terra.parallel.finish")
os.environ['QISKIT_IN_PARALLEL'] = 'False'
raise QiskitError('Keyboard interrupt in parallel_map.')
# Otherwise just reset parallel flag and error
os.environ['QISKIT_IN_PARALLEL'] = 'False'
raise error
Publisher().publish("terra.parallel.finish")
os.environ['QISKIT_IN_PARALLEL'] = 'FALSE'
return [ar.get() for ar in async_res]
# Cannot do parallel on Windows , if another parallel_map is running in parallel,
# or len(values) == 1.
results = []
for _, value in enumerate(values):
result = task(value, *task_args, **task_kwargs)
results.append(result)
_callback(0)
Publisher().publish("terra.parallel.finish")
return results
|
33,975 |
def convert_pandas_to_torch_tensor(
data_batch: pd.DataFrame,
columns: Optional[Union[List[str], List[List[str]]]] = None,
column_dtypes: Optional[Union[torch.dtype, List[torch.dtype]]] = None,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Converts a Pandas dataframe to a torch Tensor or list of torch Tensors.
The format of the return type will match the format of ``columns``. If a
list of columns is provided, the return type will be a single tensor. If
``columns`` is a list of list, then the return type will be a list of
tensors.
Args:
data_batch (pandas.DataFrame): The pandas dataframe to conver to a
torch tensor.
columns (Optional[Union[List[str], List[List[str]]]):
The names of the columns in the dataframe to include in the
torch tensor. If this arg is a List[List[str]], then the return
type will be a List of tensors. This is useful for multi-input
models. If None, then use all columns in the ``data_batch``.
column_dtype (Optional[Union[torch.dtype, List[torch.dtype]): The
torch dtype to use for the tensor. If set to None,
then automatically
infer the dtype.
Returns:
Either a torch tensor of size (N, len(columns)) where N is the
number of rows in the ``data_batch`` Dataframe, or a list of
tensors, where the size of item i is (N, len(columns[i])).
"""
multi_input = columns and (isinstance(columns[0], (list, tuple)))
if multi_input and column_dtypes:
if not type(column_dtypes) not in [torch.dtype, list, tuple]:
raise TypeError(
"If `columns` is a list of lists, "
"`column_dtypes` must be None, `torch.dtype`,"
f" or a sequence, got {type(column_dtypes)}."
)
if (
not multi_input
and column_dtypes
and type(column_dtypes) not in [torch.dtype, list, tuple]
):
raise TypeError(
"If `columns` is a list of strings, "
"`column_dtypes` must be None or a single `torch.dtype`."
f"Got {type(column_dtypes)} instead."
)
def get_tensor_for_columns(columns, dtype):
feature_tensors = []
if columns:
batch = data_batch[columns]
else:
batch = data_batch
for col in batch.columns:
col_vals = batch[col].values
t = torch.as_tensor(col_vals, dtype=dtype)
t = t.view(-1, 1)
feature_tensors.append(t)
return torch.cat(feature_tensors, dim=1)
if multi_input:
if isinstance(column_dtypes, torch.dtype):
column_dtypes = [column_dtypes] * len(columns)
return [
get_tensor_for_columns(columns=subcolumns, dtype=dtype)
for subcolumns, dtype in zip(columns, column_dtypes)
]
else:
return get_tensor_for_columns(columns=columns, dtype=column_dtypes)
|
def convert_pandas_to_torch_tensor(
data_batch: pd.DataFrame,
columns: Optional[Union[List[str], List[List[str]]]] = None,
column_dtypes: Optional[Union[torch.dtype, List[torch.dtype]]] = None,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Converts a Pandas dataframe to a torch Tensor or list of torch Tensors.
The format of the return type will match the format of ``columns``. If a
list of columns is provided, the return type will be a single tensor. If
``columns`` is a list of list, then the return type will be a list of
tensors.
Args:
data_batch (pandas.DataFrame): The pandas dataframe to convert to a
torch tensor.
columns (Optional[Union[List[str], List[List[str]]]):
The names of the columns in the dataframe to include in the
torch tensor. If this arg is a List[List[str]], then the return
type will be a List of tensors. This is useful for multi-input
models. If None, then use all columns in the ``data_batch``.
column_dtype (Optional[Union[torch.dtype, List[torch.dtype]): The
torch dtype to use for the tensor. If set to None,
then automatically
infer the dtype.
Returns:
Either a torch tensor of size (N, len(columns)) where N is the
number of rows in the ``data_batch`` Dataframe, or a list of
tensors, where the size of item i is (N, len(columns[i])).
"""
multi_input = columns and (isinstance(columns[0], (list, tuple)))
if multi_input and column_dtypes:
if not type(column_dtypes) not in [torch.dtype, list, tuple]:
raise TypeError(
"If `columns` is a list of lists, "
"`column_dtypes` must be None, `torch.dtype`,"
f" or a sequence, got {type(column_dtypes)}."
)
if (
not multi_input
and column_dtypes
and type(column_dtypes) not in [torch.dtype, list, tuple]
):
raise TypeError(
"If `columns` is a list of strings, "
"`column_dtypes` must be None or a single `torch.dtype`."
f"Got {type(column_dtypes)} instead."
)
def get_tensor_for_columns(columns, dtype):
feature_tensors = []
if columns:
batch = data_batch[columns]
else:
batch = data_batch
for col in batch.columns:
col_vals = batch[col].values
t = torch.as_tensor(col_vals, dtype=dtype)
t = t.view(-1, 1)
feature_tensors.append(t)
return torch.cat(feature_tensors, dim=1)
if multi_input:
if isinstance(column_dtypes, torch.dtype):
column_dtypes = [column_dtypes] * len(columns)
return [
get_tensor_for_columns(columns=subcolumns, dtype=dtype)
for subcolumns, dtype in zip(columns, column_dtypes)
]
else:
return get_tensor_for_columns(columns=columns, dtype=column_dtypes)
|
521 |
def ignore_couch_changes_for_sql_domains(change):
if not change.metadata or not change.metadata.domain:
return False
if change.metadata.data_source_type == SOURCE_COUCH and should_use_sql_backend(change.metadata.domain):
return True
|
def ignore_couch_changes_for_sql_domains(change):
if not change.metadata or not change.metadata.domain:
return False
return change.metadata.data_source_type == SOURCE_COUCH and should_use_sql_backend(change.metadata.domain)
|
43,324 |
def plot_history(history, individual_figsize=(7, 4), return_figure=False, **kwargs):
"""
Plot the training history of one or more models.
This creates a column of plots, with one plot for each metric recorded during training, with the
plot showing the metric vs. epoch. If multiple models have been trained (that is, a list of
histories is passed in), each metric plot includes multiple train and validation series.
Validation data is optional (it is detected by metrics with names starting with ``val_``).
Args:
history: the training history, as returned by :meth:`tf.keras.Model.fit`
individual_figsize (tuple of numbers): the size of the plot for each metric
return_figure (bool): if True, then the figure object with the plots is returned, None otherwise.
kwargs: additional arguments to pass to :meth:`matplotlib.pyplot.subplots`
Returns:
[matplotlib.figure.Figure]: The figure object with the plots if `return_figure=True`, None otherwise
"""
# explicit colours are needed if there's multiple train or multiple validation series, because
# each train series should have the same color. This uses the global matplotlib defaults that
# would be used for a single train and validation series.
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
color_train = colors[0]
color_validation = colors[1]
if not isinstance(history, list):
history = [history]
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix) :]
metrics = sorted({remove_prefix(m, "val_") for m in history[0].history.keys()})
width, height = individual_figsize
overall_figsize = (width, len(metrics) * height)
# plot each metric in a column, so that epochs are aligned (squeeze=False, so we don't have to
# special case len(metrics) == 1 in the zip)
fig, all_axes = plt.subplots(
len(metrics), 1, squeeze=False, sharex="col", figsize=overall_figsize, **kwargs
)
has_validation = False
for ax, m in zip(all_axes[:, 0], metrics):
for h in history:
# summarize history for metric m
ax.plot(h.history[m], c=color_train)
try:
val = h.history["val_" + m]
except KeyError:
# no validation data for this metric
pass
else:
ax.plot(val, c=color_validation)
has_validation = True
ax.set_ylabel(m, fontsize="x-large")
# don't be redundant: only include legend on the top plot
labels = ["train"]
if has_validation:
labels.append("validation")
all_axes[0, 0].legend(labels, loc="best", fontsize="x-large")
# ... and only label "epoch" on the bottom
all_axes[-1, 0].set_xlabel("epoch", fontsize="x-large")
# minimise whitespace
fig.tight_layout()
if return_figure:
return fig
|
def plot_history(history, individual_figsize=(7, 4), return_figure=False, **kwargs):
"""
Plot the training history of one or more models.
This creates a column of plots, with one plot for each metric recorded during training, with the
plot showing the metric vs. epoch. If multiple models have been trained (that is, a list of
histories is passed in), each metric plot includes multiple train and validation series.
Validation data is optional (it is detected by metrics with names starting with ``val_``).
Args:
history: the training history, as returned by :meth:`tf.keras.Model.fit`
individual_figsize (tuple of numbers): the size of the plot for each metric
return_figure (bool): if True, then the figure object with the plots is returned, None otherwise.
kwargs: additional arguments to pass to :meth:`matplotlib.pyplot.subplots`
Returns:
:class:`matplotlib.figure.Figure`: The figure object with the plots if ``return_figure=True``, None otherwise
"""
# explicit colours are needed if there's multiple train or multiple validation series, because
# each train series should have the same color. This uses the global matplotlib defaults that
# would be used for a single train and validation series.
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
color_train = colors[0]
color_validation = colors[1]
if not isinstance(history, list):
history = [history]
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix) :]
metrics = sorted({remove_prefix(m, "val_") for m in history[0].history.keys()})
width, height = individual_figsize
overall_figsize = (width, len(metrics) * height)
# plot each metric in a column, so that epochs are aligned (squeeze=False, so we don't have to
# special case len(metrics) == 1 in the zip)
fig, all_axes = plt.subplots(
len(metrics), 1, squeeze=False, sharex="col", figsize=overall_figsize, **kwargs
)
has_validation = False
for ax, m in zip(all_axes[:, 0], metrics):
for h in history:
# summarize history for metric m
ax.plot(h.history[m], c=color_train)
try:
val = h.history["val_" + m]
except KeyError:
# no validation data for this metric
pass
else:
ax.plot(val, c=color_validation)
has_validation = True
ax.set_ylabel(m, fontsize="x-large")
# don't be redundant: only include legend on the top plot
labels = ["train"]
if has_validation:
labels.append("validation")
all_axes[0, 0].legend(labels, loc="best", fontsize="x-large")
# ... and only label "epoch" on the bottom
all_axes[-1, 0].set_xlabel("epoch", fontsize="x-large")
# minimise whitespace
fig.tight_layout()
if return_figure:
return fig
|
8,464 |
def Dependencies(lTOC, xtrapath=None, manifest=None, redirects=None):
"""
Expand LTOC to include all the closure of binary dependencies.
`LTOC` is a logical table of contents, ie, a seq of tuples (name, path).
Return LTOC expanded by all the binary dependencies of the entries
in LTOC, except those listed in the module global EXCLUDES
`manifest` may be a winmanifest.Manifest instance for a program manifest, so
that all dependent assemblies of python.exe can be added to the built exe.
`redirects` may be a list. Any assembly redirects found via policy files will
be added to the list as BindingRedirect objects so they can later be used
to modify any manifests that reference the redirected assembly.
"""
# Extract all necessary binary modules from Python eggs to be included
# directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
# 4 processes may yield up to +40% speed on 2 CPUs
# 2 processes may yield up to +30% speed on 2 CPUs
processes = 2 * os.cpu_count()
pool = multiprocessing.Pool(processes)
if is_win:
# Search for required assemblies and add them to the TOC
paths = [path for name, path, typ in lTOC]
assemblies = pool.map(
functools.partial(getAssemblyFiles, manifest=manifest, redirects=redirects),
paths
)
# getAssemblyFiles returns a list of tuples, so assemblies is a
# list of list of tuples
for assembly in assemblies:
for ftocnm, fn in assembly:
lTOC.append((ftocnm, fn, 'BINARY'))
dataset = collections.deque([(name, path, typ) for (name, path, typ) in lTOC])
while True:
# Breakdown the dataset in chunks as big as the chosen number of processes
# instead of just feeding the whole dataset into process pool
# so that we can keep the "seen" cache in main process only
chunk = []
while (len(chunk) < processes) and len(dataset):
(name, path, typ) = dataset.pop()
if name.upper() in seen:
continue
chunk.append(path)
if not chunk:
break # From while True, no more data
imports = pool.map(
functools.partial(selectImports, xtrapath=xtrapath),
chunk
)
# selectImports returns a list of pairs, so 'imports' is
# a list of lists of pairs
for item_dependencies in imports:
for (lib, npth) in item_dependencies:
if lib.upper() in seen or npth.upper() in seen:
continue
seen.add(npth.upper())
lTOC.append((lib, npth, 'BINARY'))
return lTOC
|
def Dependencies(lTOC, xtrapath=None, manifest=None, redirects=None):
"""
Expand LTOC to include all the closure of binary dependencies.
`LTOC` is a logical table of contents, ie, a seq of tuples (name, path).
Return LTOC expanded by all the binary dependencies of the entries
in LTOC, except those listed in the module global EXCLUDES
`manifest` may be a winmanifest.Manifest instance for a program manifest, so
that all dependent assemblies of python.exe can be added to the built exe.
`redirects` may be a list. Any assembly redirects found via policy files will
be added to the list as BindingRedirect objects so they can later be used
to modify any manifests that reference the redirected assembly.
"""
# Extract all necessary binary modules from Python eggs to be included
# directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
# 4 processes may yield up to +40% speed on 2 CPUs
# 2 processes may yield up to +30% speed on 2 CPUs
processes = 2 * os.cpu_count()
pool = multiprocessing.Pool(processes)
if is_win:
# Search for required assemblies and add them to the TOC
paths = [path for name, path, typ in lTOC]
assemblies = pool.map(
functools.partial(getAssemblyFiles, manifest=manifest, redirects=redirects),
paths
)
# getAssemblyFiles returns a list of tuples, so assemblies is a
# list of list of tuples
for assembly in assemblies:
for ftocnm, fn in assembly:
lTOC.append((ftocnm, fn, 'BINARY'))
dataset = collections.deque([(name, path, typ) for (name, path, typ) in lTOC])
while True:
# Breakdown the dataset in chunks as big as the chosen number of processes
# instead of just feeding the whole dataset into process pool
# so that we can keep the "seen" cache in main process only
chunk = []
while (len(chunk) < processes) and len(dataset):
name, path, typ = dataset.pop()
if name.upper() in seen:
continue
chunk.append(path)
if not chunk:
break # From while True, no more data
imports = pool.map(
functools.partial(selectImports, xtrapath=xtrapath),
chunk
)
# selectImports returns a list of pairs, so 'imports' is
# a list of lists of pairs
for item_dependencies in imports:
for (lib, npth) in item_dependencies:
if lib.upper() in seen or npth.upper() in seen:
continue
seen.add(npth.upper())
lTOC.append((lib, npth, 'BINARY'))
return lTOC
|
13,414 |
def test_11_get_logs_before_moving_the_system_dataset_to_the_second_pool(logs_data):
cmd = "cat /var/log/middlewared.log"
middlewared_log = SSH_TEST(cmd, user, password, ip)
assert middlewared_log['result'] is True, str(middlewared_log)
logs_data['middleware_log_4'] = middlewared_log['output'].splitlines()[-1]
cmd = "journalctl --no-page"
journald_log = SSH_TEST(cmd, user, password, ip)
assert journald_log['result'] is True, str(journald_log)
logs_data['journald_log_4'] = journald_log['output'].splitlines()[-1]
cmd = "cat /var/log/syslog"
syslog = SSH_TEST(cmd, user, password, ip)
assert syslog['result'] is True, str(syslog)
logs_data['syslog_4'] = syslog['output'].splitlines()[-1]
|
def test_11_get_logs_before_moving_the_sysds_to_the_second_pool(logs_data):
cmd = "cat /var/log/middlewared.log"
middlewared_log = SSH_TEST(cmd, user, password, ip)
assert middlewared_log['result'] is True, str(middlewared_log)
logs_data['middleware_log_4'] = middlewared_log['output'].splitlines()[-1]
cmd = "journalctl --no-page"
journald_log = SSH_TEST(cmd, user, password, ip)
assert journald_log['result'] is True, str(journald_log)
logs_data['journald_log_4'] = journald_log['output'].splitlines()[-1]
cmd = "cat /var/log/syslog"
syslog = SSH_TEST(cmd, user, password, ip)
assert syslog['result'] is True, str(syslog)
logs_data['syslog_4'] = syslog['output'].splitlines()[-1]
|
35,164 |
def _report_nonhermitian(M, name):
"""
Report if `M` is not a hermitian matrix given its type.
"""
md = M - M.T.conj()
nmd = linalg.norm(md, 1)
tol = 10 * cupy.finfo(M.dtype).eps
tol = max(tol, tol * linalg.norm(M, 1))
if nmd > tol:
print('matrix %s of the type %s is not sufficiently Hermitian:'
% (name, M.dtype))
print('condition: %.e < %e' % (nmd, tol))
|
def _report_nonhermitian(M, name):
"""
Report if `M` is not a hermitian matrix given its type.
"""
md = M - M.T.conj()
nmd = linalg.norm(md, 1)
tol = 10 * cupy.finfo(M.dtype).eps
tol *= max(1, float(linalg.norm(M, 1)))
if nmd > tol:
print('matrix %s of the type %s is not sufficiently Hermitian:'
% (name, M.dtype))
print('condition: %.e < %e' % (nmd, tol))
|
12,709 |
def _validate_metadata(
metadata: LockfileMetadata,
request: PexRequest,
requirements: (Lockfile | LockfileContent),
python_setup: PythonSetup,
) -> None:
validation = metadata.is_valid_for(
requirements.lockfile_hex_digest,
request.interpreter_constraints,
python_setup.interpreter_universe,
)
if validation:
return
def message_parts() -> Iterable[str]:
tool_name = request.options_scope_name
uses_source_plugins = tool_name in ["mypy", "pylint"]
uses_project_interpreter_constraints = tool_name in [
"bandit",
"flake8",
"pylint",
"setuptools",
"ipython",
"pytest",
]
yield "You are using "
if isinstance(requirements, LockfileContent):
if requirements.is_default_lockfile:
yield "the `<default>` lockfile provided by Pants "
else:
yield "a lockfile that was generated programmatically "
else:
yield f"the lockfile at {requirements.file_path} "
yield (
f"to install the tool `{tool_name}`, but it is not compatible with your "
"configuration: "
"\n\n"
)
if InvalidLockfileReason.INVALIDATION_DIGEST_MISMATCH in validation.failure_reasons:
yield (
"- You have set different requirements than those used to generate the lockfile. "
f"You can fix this by not setting `[{tool_name}].version`, "
)
if uses_source_plugins:
yield f"`[{tool_name}].source_plugins`, "
yield (
f"and `[{tool_name}].extra_requirements`, or by using a new "
"custom lockfile." # TODO make this more descriptive based on the defaults.
"\n"
)
if InvalidLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH in validation.failure_reasons:
yield (
f"- You have set interpreter constraints (`{request.interpreter_constraints}`) that "
"are not compatible with those used to generate the lockfile "
f"(`{metadata.valid_for_interpreter_constraints}`). "
)
if not uses_project_interpreter_constraints:
yield (
f"You can fix this by not setting `[{tool_name}].interpreter_constraints`, "
"or by using a new custom lockfile. "
)
else:
yield (
f"`{tool_name}` determines its interpreter constraints based on your code's own "
"constraints. To fix this error, you can either change your code's constraints "
f"(see {doc_url('python-interpreter-compatibility')}) or by generating a new "
"custom lockfile. "
)
yield "\n"
yield "\n"
if not isinstance(requirements, Lockfile):
yield (
"To generate a custom lockfile based on your current configuration, set "
f"`[{tool_name}].lockfile` to where you want to create the lockfile, then run "
"`./pants generate-lockfiles`. "
)
else:
yield (
"To regenerate your lockfile based on your current configuration, run "
"`./pants generate-lockfiles`. "
)
message = "".join(message_parts()).strip()
if python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.error:
raise ValueError(message)
elif python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.warn:
logger.warning("%s", message)
|
def _validate_metadata(
metadata: LockfileMetadata,
request: PexRequest,
requirements: (Lockfile | LockfileContent),
python_setup: PythonSetup,
) -> None:
validation = metadata.is_valid_for(
requirements.lockfile_hex_digest,
request.interpreter_constraints,
python_setup.interpreter_universe,
)
if validation:
return
def message_parts() -> Iterable[str]:
tool_name = request.options_scope_name
uses_source_plugins = tool_name in ["mypy", "pylint"]
uses_project_interpreter_constraints = tool_name in [
"bandit",
"flake8",
"pylint",
"setuptools",
"ipython",
"pytest",
]
yield "You are using "
if isinstance(requirements, LockfileContent):
if requirements.is_default_lockfile:
yield "the `<default>` lockfile provided by Pants "
else:
yield "a lockfile that was generated programmatically "
else:
yield f"the lockfile at {requirements.file_path} "
yield (
f"to install the tool `{tool_name}`, but it is not compatible with your "
"configuration: "
"\n\n"
)
if InvalidLockfileReason.INVALIDATION_DIGEST_MISMATCH in validation.failure_reasons:
yield (
"- You have set different requirements than those used to generate the lockfile. "
f"You can fix this by not setting `[{tool_name}].version`, "
)
if uses_source_plugins:
yield f"`[{tool_name}].source_plugins`, "
yield (
f"and `[{tool_name}].extra_requirements`, or by using a new "
"custom lockfile." # TODO make this more descriptive based on the defaults.
"\n"
)
if InvalidLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH in validation.failure_reasons:
yield (
f"- You have set interpreter constraints (`{request.interpreter_constraints}`) that "
"are not compatible with those used to generate the lockfile "
f"(`{metadata.valid_for_interpreter_constraints}`). "
)
if not uses_project_interpreter_constraints:
yield (
f"You can fix this by not setting `[{tool_name}].interpreter_constraints`, "
"or by using a new custom lockfile. "
)
else:
yield (
f"`{tool_name}` determines its interpreter constraints based on your code's own "
"constraints. To fix this error, you can either change your code's constraints "
f"(see {doc_url('python-interpreter-compatibility')}) or by generating a new "
"custom lockfile. "
)
yield "\n"
yield "\n"
if not isinstance(requirements, Lockfile):
yield (
"To generate a custom lockfile based on your current configuration, set "
f"`[{tool_name}].lockfile` to where you want to create the lockfile, then run "
"`./pants generate-lockfiles`. "
)
else:
yield (
"To regenerate your lockfile based on your current configuration, run "
"`./pants generate-lockfiles --resolve={tool_name}`. "
)
message = "".join(message_parts()).strip()
if python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.error:
raise ValueError(message)
elif python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.warn:
logger.warning("%s", message)
|
23,074 |
def take(outname, inname, chunks, index, itemsize, axis=0):
"""Index array with an iterable of index
Handles a single index by a single list
Mimics ``np.take``
>>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0)
>>> chunks
((2, 1, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, (np.concatenate, [(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))],
0),
(2, 0, 4, 1))}
When list is sorted we retain original block structure
>>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0)
>>> chunks
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 2): (getitem, ('x', 2), ([7],))}
When any indexed blocks would otherwise grow larger than
dask.config.array.chunk-size, we might split them,
depending on the value of ``dask.config.slicing.split-large-chunks``.
>>> import dask
>>> with dask.config.set(**{"array.slicing.split-large-chunks": True}):
... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)],
... [0] + [1] * 6 + [2], axis=0, itemsize=8)
>>> chunks
((1, 3, 3, 1), (1000, 1000), (1000, 1000))
"""
from .core import PerformanceWarning
plan = slicing_plan(chunks[axis], index)
if len(plan) >= len(chunks[axis]) * 10:
factor = math.ceil(len(plan) / len(chunks[axis]))
warnings.warn(
"Slicing with an out-of-order index is generating %d "
"times more chunks" % factor,
PerformanceWarning,
stacklevel=6,
)
index = np.asarray(index)
# Check for chunks from the plan that would violate the user's
# configured chunk size.
nbytes = utils.parse_bytes(config.get("array.chunk-size"))
other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]
other_numel = np.prod([sum(x) for x in other_chunks])
if math.isnan(other_numel):
warnsize = maxsize = math.inf
else:
maxsize = math.ceil(nbytes / (other_numel * itemsize))
warnsize = maxsize * 10
split = config.get("array.slicing.split-large-chunks", None)
# Warn only when the default is not specified.
warned = split is not None
for _, index_list in plan:
if not warned and len(index_list) > warnsize:
msg = (
"Slicing is producing a large chunk. To accept the large\n"
"chunk and silence this warning, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n"
" ... array[indexer]\n\n"
"To avoid creating the large chunks, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n"
" ... array[indexer]"
)
warnings.warn(msg, PerformanceWarning, stacklevel=6)
warned = True
where_index = []
index_lists = []
for where_idx, index_list in plan:
index_length = len(index_list)
if split and index_length > maxsize:
index_sublist = np.array_split(
index_list, math.ceil(index_length / maxsize)
)
index_lists.extend(index_sublist)
where_index.extend([where_idx] * len(index_sublist))
else:
index_lists.append(np.array(index_list))
where_index.append(where_idx)
dims = [range(len(bd)) for bd in chunks]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon] * len(bd) for bd in chunks]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
chunks2 = list(chunks)
chunks2[axis] = tuple(map(len, index_lists))
dsk = dict(zip(keys, values))
return tuple(chunks2), dsk
|
def take(outname, inname, chunks, index, itemsize, axis=0):
"""Index array with an iterable of index
Handles a single index by a single list
Mimics ``np.take``
>>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0)
>>> chunks
((2, 1, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, (np.concatenate, [(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))],
0),
(2, 0, 4, 1))}
When list is sorted we retain original block structure
>>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0)
>>> chunks
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 2): (getitem, ('x', 2), ([7],))}
When any indexed blocks would otherwise grow larger than
dask.config.array.chunk-size, we might split them,
depending on the value of ``dask.config.slicing.split-large-chunks``.
>>> import dask
>>> with dask.config.set({"array.slicing.split-large-chunks": True}):
... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)],
... [0] + [1] * 6 + [2], axis=0, itemsize=8)
>>> chunks
((1, 3, 3, 1), (1000, 1000), (1000, 1000))
"""
from .core import PerformanceWarning
plan = slicing_plan(chunks[axis], index)
if len(plan) >= len(chunks[axis]) * 10:
factor = math.ceil(len(plan) / len(chunks[axis]))
warnings.warn(
"Slicing with an out-of-order index is generating %d "
"times more chunks" % factor,
PerformanceWarning,
stacklevel=6,
)
index = np.asarray(index)
# Check for chunks from the plan that would violate the user's
# configured chunk size.
nbytes = utils.parse_bytes(config.get("array.chunk-size"))
other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]
other_numel = np.prod([sum(x) for x in other_chunks])
if math.isnan(other_numel):
warnsize = maxsize = math.inf
else:
maxsize = math.ceil(nbytes / (other_numel * itemsize))
warnsize = maxsize * 10
split = config.get("array.slicing.split-large-chunks", None)
# Warn only when the default is not specified.
warned = split is not None
for _, index_list in plan:
if not warned and len(index_list) > warnsize:
msg = (
"Slicing is producing a large chunk. To accept the large\n"
"chunk and silence this warning, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n"
" ... array[indexer]\n\n"
"To avoid creating the large chunks, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n"
" ... array[indexer]"
)
warnings.warn(msg, PerformanceWarning, stacklevel=6)
warned = True
where_index = []
index_lists = []
for where_idx, index_list in plan:
index_length = len(index_list)
if split and index_length > maxsize:
index_sublist = np.array_split(
index_list, math.ceil(index_length / maxsize)
)
index_lists.extend(index_sublist)
where_index.extend([where_idx] * len(index_sublist))
else:
index_lists.append(np.array(index_list))
where_index.append(where_idx)
dims = [range(len(bd)) for bd in chunks]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon] * len(bd) for bd in chunks]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
chunks2 = list(chunks)
chunks2[axis] = tuple(map(len, index_lists))
dsk = dict(zip(keys, values))
return tuple(chunks2), dsk
|
8,801 |
def _join_event_processing(bot):
"""Process a batch of JOIN event from the ``join_events_queue`` queue.
Every time this function is executed, it process at most ``throttle_join``
JOIN event: for each, it sends a WHO request to know more about the
channel. This will prevent an excess of flood when there are too many
channels to join at once.
"""
batch_size = max(bot.settings.core.throttle_join, 1)
for _ in range(batch_size):
try:
channel = bot.memory['join_events_queue'].popleft()
except IndexError:
break
LOGGER.debug('Send WHO after JOIN channel: %s', channel)
_send_who(bot, channel)
|
def _join_event_processing(bot):
"""Process a batch of JOIN event from the ``join_events_queue`` queue.
Every time this function is executed, it process at most ``throttle_join``
JOIN event: for each, it sends a WHO request to know more about the
channel. This will prevent an excess of flood when there are too many
channels to join at once.
"""
batch_size = max(bot.settings.core.throttle_join, 1)
for _ in range(batch_size):
try:
channel = bot.memory['join_events_queue'].popleft()
except IndexError:
break
LOGGER.debug('Sending WHO after JOIN channel: %s', channel)
_send_who(bot, channel)
|
32,007 |
def get_images_scan_list(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Get the images scan list.
Implement the command 'prisma-cloud-compute-images-scan-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-images-scan-list command arguments
Returns:
CommandResults: command-results object.
"""
args["limit"], args["offset"] = parse_limit_and_offset_values(
limit=args.pop("limit_record", "10"), offset=args.get("offset", "0")
)
stats_limit, _ = parse_limit_and_offset_values(limit=args.pop("limit_stats", "10"))
args["compact"] = argToBoolean(value=args.get("compact", "true"))
if images_scans := client.get_images_scan_info(params=assign_params(**args)):
for scan in images_scans:
if "vulnerabilities" in scan:
scan["vulnerabilities"] = filter_api_response(
api_response=scan.get("vulnerabilities"), limit=stats_limit
)
if vulnerabilities := scan.get("vulnerabilities"):
for vuln in vulnerabilities:
if "fixDate" in vuln:
vuln["fixDate"] = epochs_to_timestamp(epochs=vuln.get("fixDate", 0))
if "complianceIssues" in scan:
scan["complianceIssues"] = filter_api_response(
api_response=scan.get("complianceIssues"), limit=stats_limit
)
if compliances := scan.get("complianceIssues"):
for compliance in compliances:
if "fixDate" in compliance:
compliance["fixDate"] = epochs_to_timestamp(epochs=compliance.get("fixDate", 0))
image_description_table = tableToMarkdown(
name="Image description",
t=get_image_descriptions(images_scans=images_scans),
headers=["ID", "Image", "OS Distribution", "Vulnerabilities Count", "Compliance Issues Count"],
removeNull=True
)
if len(images_scans) == 1: # then there is only one image scan report
if args.get("compact", True):
# if the compact is True, the api will filter
# the response and send back only vulnerability/compliance statistics
vuln_statistics_table = tableToMarkdown(
name="Vulnerability Statistics",
t=images_scans[0].get("vulnerabilityDistribution"),
headers=["critical", "high", "medium", "low"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
compliance_statistics_table = tableToMarkdown(
name="Compliance Statistics",
t=images_scans[0].get("complianceDistribution"),
headers=["critical", "high", "medium", "low"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
table = image_description_table + vuln_statistics_table + compliance_statistics_table
else:
# handle the case where there is an image scan without vulnerabilities
vulnerabilities = images_scans[0].get("vulnerabilities")
if vulnerabilities is None:
vulnerabilities = []
vulnerabilities_table = tableToMarkdown(
name="Vulnerabilities",
t=vulnerabilities,
headers=["cve", "description", "severity", "packageName", "status", "fixDate"],
removeNull=True,
headerTransform=pascalToSpace,
)
# handle the case where there is an image scan without compliances
compliances = images_scans[0].get("complianceIssues")
if compliances is None:
compliances = []
compliances_table = tableToMarkdown(
name="Compliances",
t=compliances,
headers=["id", "severity", "status", "description", "packageName", "fixDate"],
removeNull=True,
headerTransform=pascalToSpace
)
table = image_description_table + vulnerabilities_table + compliances_table
else:
table = image_description_table
else:
table = "No results found"
return CommandResults(
outputs_prefix="PrismaCloudCompute.ReportsImagesScan",
outputs_key_field="id",
outputs=images_scans,
readable_output=table,
)
|
def get_images_scan_list(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Get the images scan list.
Implement the command 'prisma-cloud-compute-images-scan-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-images-scan-list command arguments
Returns:
CommandResults: command-results object.
"""
args["limit"], args["offset"] = parse_limit_and_offset_values(
limit=args.pop("limit_record", "10"), offset=args.get("offset", "0")
)
stats_limit, _ = parse_limit_and_offset_values(limit=args.pop("limit_stats", "10"))
args["compact"] = argToBoolean(value=args.get("compact", "true"))
if images_scans := client.get_images_scan_info(params=assign_params(**args)):
for scan in images_scans:
if "vulnerabilities" in scan:
scan["vulnerabilities"] = filter_api_response(
api_response=scan.get("vulnerabilities"), limit=stats_limit
)
if vulnerabilities := scan.get("vulnerabilities"):
for vuln in vulnerabilities:
if "fixDate" in vuln:
vuln["fixDate"] = epochs_to_timestamp(epochs=vuln.get("fixDate", 0))
if "complianceIssues" in scan:
scan["complianceIssues"] = filter_api_response(
api_response=scan.get("complianceIssues"), limit=stats_limit
)
if compliances := scan.get("complianceIssues"):
for compliance in compliances:
if "fixDate" in compliance:
compliance["fixDate"] = epochs_to_timestamp(epochs=compliance.get("fixDate", 0))
image_description_table = tableToMarkdown(
name="Image description",
t=get_image_descriptions(images_scans=images_scans),
headers=["ID", "Image", "OS Distribution", "Vulnerabilities Count", "Compliance Issues Count"],
removeNull=True
)
if len(images_scans) == 1: # then there is only one image scan report
if args.get("compact", True):
# if the compact is True, the api will filter
# the response and send back only vulnerability/compliance statistics
vuln_statistics_table = tableToMarkdown(
name="Vulnerability Statistics",
t=images_scans[0].get("vulnerabilityDistribution"),
headers=["critical", "high", "medium", "low"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
compliance_statistics_table = tableToMarkdown(
name="Compliance Statistics",
t=images_scans[0].get("complianceDistribution"),
headers=["critical", "high", "medium", "low"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
table = image_description_table + vuln_statistics_table + compliance_statistics_table
else:
# handle the case where there is an image scan without vulnerabilities
vulnerabilities = images_scans[0].get("vulnerabilities")
if vulnerabilities is None:
vulnerabilities = []
vulnerabilities_table = tableToMarkdown(
name="Vulnerabilities",
t=vulnerabilities,
headers=["cve", "description", "severity", "packageName", "status", "fixDate"],
removeNull=True,
headerTransform=pascalToSpace,
)
# handle the case where there is an image scan without compliances
compliances = images_scans[0].get("complianceIssues")
if compliances is None:
compliances = []
compliances_table = tableToMarkdown(
name="Compliances",
t=compliances,
headers=["id", "severity", "status", "description", "packageName", "fixDate"],
removeNull=True,
headerTransform=pascalToSpace
)
table = image_description_table + vulnerabilities_table + compliances_table
else:
table = image_description_table
else:
table = "No results found."
return CommandResults(
outputs_prefix="PrismaCloudCompute.ReportsImagesScan",
outputs_key_field="id",
outputs=images_scans,
readable_output=table,
)
|
31,192 |
def assign_incidents_command(client, args):
incident_ids = args.get('incident_ids')
new_assignee = args.get('new_assignee')
result = client.assign_incidents(incident_ids, new_assignee)
if not result.get('success'):
raise DemistoException(result['message'])
msg = result.get('message')
markdown = "### " + msg
results = CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.assign',
outputs_key_field='',
outputs=msg
)
return results
|
def assign_incidents_command(client, args):
incident_ids = argToList(args.get('incident_ids'))
new_assignee = args.get('new_assignee')
result = client.assign_incidents(incident_ids, new_assignee)
if not result.get('success'):
raise DemistoException(result['message'])
msg = result.get('message')
markdown = "### " + msg
results = CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.assign',
outputs_key_field='',
outputs=msg
)
return results
|
33,790 |
def ingress(app: Union["FastAPI", "APIRouter", Callable]):
"""Mark a ASGI application ingress for Serve.
Args:
app (FastAPI,APIRouter,Starlette, etc): the app or router object serve
as ingress for this backend. It can be any ASGI compatible object.
Example:
>>> app = FastAPI()
>>> @serve.deployment
@serve.ingress(app)
class App:
pass
>>> App.deploy()
"""
def decorator(cls):
if not inspect.isclass(cls):
raise ValueError("@serve.ingress must be used with a class.")
if issubclass(cls, collections.abc.Callable):
raise ValueError(
"Class passed to @serve.ingress may not have __call__ method.")
# Sometimes there are decorators on the methods. We want to fix
# the fast api routes here.
if isinstance(app, (FastAPI, APIRouter)):
make_fastapi_class_based_view(app, cls)
# Free the state of the app so subsequent modification won't affect
# this ingress deployment. We don't use copy.copy here to avoid
# recursion issue.
frozen_app = cloudpickle.loads(cloudpickle.dumps(app))
startup_hook, shutdown_hook = make_startup_shutdown_hooks(frozen_app)
class FastAPIWrapper(cls):
async def __init__(self, *args, **kwargs):
# TODO(edoakes): should the startup_hook run before or after
# the constructor?
await startup_hook()
super().__init__(*args, **kwargs)
async def __call__(self, request: Request):
sender = ASGIHTTPSender()
await frozen_app(
request.scope,
request._receive,
sender,
)
return sender.build_starlette_response()
def __del__(self):
asyncio.get_event_loop().run_until_complete(shutdown_hook())
FastAPIWrapper.__name__ = cls.__name__
return FastAPIWrapper
return decorator
|
def ingress(app: Union["FastAPI", "APIRouter", Callable]):
"""Mark an ASGI application ingress for Serve.
Args:
app (FastAPI,APIRouter,Starlette, etc): the app or router object serve
as ingress for this backend. It can be any ASGI compatible object.
Example:
>>> app = FastAPI()
>>> @serve.deployment
@serve.ingress(app)
class App:
pass
>>> App.deploy()
"""
def decorator(cls):
if not inspect.isclass(cls):
raise ValueError("@serve.ingress must be used with a class.")
if issubclass(cls, collections.abc.Callable):
raise ValueError(
"Class passed to @serve.ingress may not have __call__ method.")
# Sometimes there are decorators on the methods. We want to fix
# the fast api routes here.
if isinstance(app, (FastAPI, APIRouter)):
make_fastapi_class_based_view(app, cls)
# Free the state of the app so subsequent modification won't affect
# this ingress deployment. We don't use copy.copy here to avoid
# recursion issue.
frozen_app = cloudpickle.loads(cloudpickle.dumps(app))
startup_hook, shutdown_hook = make_startup_shutdown_hooks(frozen_app)
class FastAPIWrapper(cls):
async def __init__(self, *args, **kwargs):
# TODO(edoakes): should the startup_hook run before or after
# the constructor?
await startup_hook()
super().__init__(*args, **kwargs)
async def __call__(self, request: Request):
sender = ASGIHTTPSender()
await frozen_app(
request.scope,
request._receive,
sender,
)
return sender.build_starlette_response()
def __del__(self):
asyncio.get_event_loop().run_until_complete(shutdown_hook())
FastAPIWrapper.__name__ = cls.__name__
return FastAPIWrapper
return decorator
|
28,959 |
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
state: str = MISSING
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
.. versionchanged:: 2.0
``permissions``, ``guild``, ``redirect_uri``, ``scopes`` and ``state`` parameters
are now keyword-only.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot', 'applications.commands')``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
state: :class:`str`
The state to return after the authorization.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += '&scope=' + '+'.join(scopes or ('bot', 'applications.commands'))
if permissions is not MISSING:
url += f'&permissions={permissions.value}'
if guild is not MISSING:
url += f'&guild_id={guild.id}'
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri})
if disable_guild_select:
url += '&disable_guild_select=true'
if state is not MISSING:
url += f'&state={state}'
return url
|
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
state: str = MISSING
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
.. versionchanged:: 2.0
``permissions``, ``guild``, ``redirect_uri``, and ``scopes`` parameters
are now keyword-only.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot', 'applications.commands')``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
state: :class:`str`
The state to return after the authorization.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += '&scope=' + '+'.join(scopes or ('bot', 'applications.commands'))
if permissions is not MISSING:
url += f'&permissions={permissions.value}'
if guild is not MISSING:
url += f'&guild_id={guild.id}'
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri})
if disable_guild_select:
url += '&disable_guild_select=true'
if state is not MISSING:
url += f'&state={state}'
return url
|
32,130 |
def parse_file_report(file_hash, reports, file_info, extended_data: bool):
udp_ip = []
udp_port = []
network_udp = []
tcp_ip = []
tcp_port = []
network_tcp = []
dns_query = []
dns_response = []
network_dns = []
evidence_md5 = []
evidence_text = []
process_list_outputs = []
process_tree_outputs = []
entry_summary = []
extract_urls_outputs = []
elf_shell_commands = []
feed_related_indicators = []
platform_report = []
software_report = []
behavior = []
relationships = []
network_url = []
# When only one report is in response, it's returned as a single json object and not a list.
if not isinstance(reports, list):
reports = [reports]
for report in reports:
if 'network' in report and report["network"]:
if 'UDP' in report["network"]:
udp_objects = report["network"]["UDP"]
if not isinstance(udp_objects, list):
udp_objects = [udp_objects]
for udp_obj in udp_objects:
if '@ip' in udp_obj and udp_obj['@ip']:
udp_ip.append(udp_obj["@ip"])
feed_related_indicators.append({'value': udp_obj["@ip"], 'type': 'IP'})
relationships.append(create_relationship('related-to', (file_hash, udp_obj["@ip"]), ('file', 'ip')))
if '@port' in udp_obj:
udp_port.append(udp_obj["@port"])
if extended_data:
if network_udp_dict := parse(report=udp_obj,
keys=[('@ip', 'IP'), ('@port', 'Port'),
('@country', 'Country'), ('@ja3', 'JA3'),
('@ja3s', 'JA3S')]):
network_udp.append(network_udp_dict)
if 'TCP' in report["network"]:
tcp_objects = report["network"]["TCP"]
if not isinstance(tcp_objects, list):
tcp_objects = [tcp_objects]
for tcp_obj in tcp_objects:
if '@ip' in tcp_obj and tcp_obj['@ip']:
tcp_ip.append(tcp_obj["@ip"])
feed_related_indicators.append({'value': tcp_obj["@ip"], 'type': 'IP'})
relationships.append(create_relationship('related-to', (file_hash, tcp_obj["@ip"]), ('file', 'ip')))
if '@port' in tcp_obj:
tcp_port.append(tcp_obj['@port'])
if extended_data:
if network_tcp_dict := parse(report=tcp_obj,
keys=[('@ip', 'IP'), ('@port', 'Port'),
('@country', 'Country'), ('@ja3', 'JA3'),
('@ja3s', 'JA3S')]):
network_tcp.append(network_tcp_dict)
if 'dns' in report["network"]:
dns_objects = report["network"]["dns"]
if not isinstance(dns_objects, list):
dns_objects = [dns_objects]
for dns_obj in dns_objects:
if '@query' in dns_obj and dns_obj['@query']:
dns_query.append(dns_obj['@query'])
if '@response' in dns_obj and dns_obj['@response']:
dns_response.append(dns_obj['@response'])
if extended_data:
if network_dns_dict := parse(report=dns_obj,
keys=[('@query', 'Query'), ('@response', 'Response'),
('@type', 'Type')]):
network_dns.append(network_dns_dict)
if 'url' in report["network"]:
url = ''
if '@host' in report["network"]["url"]:
url = report["network"]["url"]["@host"]
if '@uri' in report["network"]["url"]:
url += report["network"]["url"]["@uri"]
if url:
feed_related_indicators.append({'value': url, 'type': 'URL'})
relationships.append(create_relationship('related-to', (file_hash, url.rstrip('/')), ('file', 'url')))
if extended_data:
if network_url_dict := parse(report=report["network"]['url'],
keys=[('@host', 'Host'), ('@uri', 'URI'),
('@method', 'Method'), ('@user_agent', 'UserAgent')]):
network_url.append(network_url_dict)
if 'evidence' in report and report["evidence"]:
if 'file' in report["evidence"]:
if isinstance(report["evidence"]["file"], dict) and 'entry' in report["evidence"]["file"]:
if '@md5' in report["evidence"]["file"]["entry"]:
evidence_md5.append(report["evidence"]["file"]["entry"]["@md5"])
if '@text' in report["evidence"]["file"]["entry"]:
evidence_text.append(report["evidence"]["file"]["entry"]["@text"])
if 'elf_info' in report and report["elf_info"]:
if 'Domains' in report["elf_info"]:
if isinstance(report["elf_info"]["Domains"], dict) and 'entry' in report["elf_info"]["Domains"]:
entry = report["elf_info"]["Domains"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for domain in entry:
feed_related_indicators.append({'value': domain, 'type': 'Domain'})
relationships.append(create_relationship('related-to', (file_hash, domain), ('file', 'domain')))
if 'IP_Addresses' in report["elf_info"]:
if isinstance(report["elf_info"]["IP_Addresses"], dict) and 'entry' in \
report["elf_info"]["IP_Addresses"]:
entry = report["elf_info"]["IP_Addresses"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for ip in entry:
feed_related_indicators.append({'value': ip, 'type': 'IP'})
relationships.append(create_relationship('related-to', (file_hash, ip), ('file', 'ip')))
if 'suspicious' in report["elf_info"]:
if isinstance(report["elf_info"]["suspicious"], dict) and 'entry' in report["elf_info"]['suspicious']:
entry = report["elf_info"]["suspicious"]["entry"]
# when there is only one entry, it is returned as a single json not a list
if not isinstance(entry, list):
entry = [entry]
for entry_obj in entry:
if '#text' in entry_obj and '@description' in entry_obj:
behavior.append({'details': entry_obj['#text'], 'action': entry_obj['@description']})
if 'URLs' in report["elf_info"]:
if isinstance(report["elf_info"]["URLs"], dict) and 'entry' in report["elf_info"]['URLs']:
entry = report["elf_info"]["URLs"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for url in entry:
feed_related_indicators.append({'value': url, 'type': 'URL'})
relationships.append(create_relationship('related-to', (file_hash, url), ('file', 'url')))
if extended_data:
if 'Shell_Commands' in report['elf_info'] and 'entry' in report['elf_info']['Shell_Commands'] and \
report['elf_info']['Shell_Commands']['entry']:
elf_shell_commands.append(report['elf_info']['Shell_Commands']['entry'])
if extended_data:
if 'process_list' in report and 'process' in report['process_list'] and report['process_list']['process']:
process_list = report['process_list']['process']
if not isinstance(process_list, list):
process_list = [process_list]
for process in process_list:
if process_list_dict := parse(report=process,
keys=[("@command", "ProcessCommand"),
("@name", "ProcessName"),
("@pid", "ProcessPid"),
("file", "ProcessFile"),
("service", "Service")]):
process_list_outputs.append(process_list_dict)
if 'process_tree' in report and 'process' in report['process_tree'] and report['process_tree']['process']:
process_tree = report['process_tree']['process']
if not isinstance(process_tree, list):
process_tree = [process_tree]
for process in process_tree:
tree_outputs = {}
if process_tree_dict := parse(report=process,
keys=[('@text', "ProcessText"),
('@name', "ProcessName"),
('@pid', "ProcessPid")]):
tree_outputs = process_tree_dict
if 'child' in process and 'process' in process['child'] and process['child']['process']:
child_process = process['child']['process']
if not isinstance(child_process, list):
child_process = [child_process]
for child in child_process:
if process_tree_child_dict := parse(report=child,
keys=[('@text', "ChildText"),
('@name', "ChildName"),
('@pid', "ChildPid")]):
tree_outputs['Process'] = process_tree_child_dict
if tree_outputs:
process_tree_outputs.append(tree_outputs)
if 'summary' in report and 'entry' in report['summary'] and report['summary']['entry']:
entries = report['summary']['entry']
if not isinstance(entries, list):
entries = [entries]
for entry in entries:
if entry_summary_dict := parse(report=entry,
keys=[('#text', "Text"),
('@details', "Details"),
('@behavior', "Behavior")]):
entry_summary.append(entry_summary_dict)
if 'extracted_urls' in report and report['extracted_urls'] and 'entry' in report['extracted_urls'] \
and report['extracted_urls']['entry']:
extract_urls = report['extracted_urls']['entry']
if not isinstance(extract_urls, list):
extract_urls = [extract_urls]
for urls in extract_urls:
if extract_urls_dict := parse(report=urls,
keys=[('@url', "URL"),
('@verdict', "Verdict")]):
extract_urls_outputs.append(extract_urls_dict)
if 'platform' in report:
platform_report.append(report['platform'])
if 'software' in report:
software_report.append(report['software'])
outputs = {
'Status': 'Success',
'SHA256': file_info.get('sha256')
}
if len(udp_ip) > 0 or len(udp_port) > 0 or len(tcp_ip) > 0 or len(tcp_port) > 0 or dns_query or dns_response:
outputs["Network"] = {}
if len(udp_ip) > 0 or len(udp_port) > 0:
outputs["Network"]["UDP"] = {}
if len(udp_ip) > 0:
outputs["Network"]["UDP"]["IP"] = udp_ip
if len(udp_port) > 0:
outputs["Network"]["UDP"]["Port"] = udp_port
if len(tcp_ip) > 0 or len(tcp_port) > 0:
outputs["Network"]["TCP"] = {}
if len(tcp_ip) > 0:
outputs["Network"]["TCP"]["IP"] = tcp_ip
if len(tcp_port) > 0:
outputs["Network"]["TCP"]["Port"] = tcp_port
if len(dns_query) > 0 or len(dns_response) > 0:
outputs["Network"]["DNS"] = {}
if len(dns_query) > 0:
outputs["Network"]["DNS"]["Query"] = dns_query
if len(dns_response) > 0:
outputs["Network"]["DNS"]["Response"] = dns_response
if network_udp or network_tcp or network_dns or network_url:
outputs['NetworkInfo'] = {}
if network_udp:
outputs['NetworkInfo']['UDP'] = network_udp
if network_tcp:
outputs['NetworkInfo']['TCP'] = network_tcp
if network_dns:
outputs['NetworkInfo']['DNS'] = network_dns
if network_url:
outputs['NetworkInfo']['URL'] = network_url
if platform_report:
outputs['Platform'] = platform_report
if software_report:
outputs['Software'] = software_report
if process_list_outputs:
outputs['ProcessList'] = process_list_outputs
if process_tree_outputs:
outputs['ProcessTree'] = process_tree_outputs
if entry_summary:
outputs['Summary'] = entry_summary
if extract_urls_outputs:
outputs['ExtractedURL'] = extract_urls_outputs
if elf_shell_commands:
outputs['ELF'] = {}
outputs['ELF']['ShellCommands'] = elf_shell_commands
if len(evidence_md5) > 0 or len(evidence_text) > 0:
outputs["Evidence"] = {}
if len(evidence_md5) > 0:
outputs["Evidence"]["md5"] = evidence_md5
if len(evidence_text) > 0:
outputs["Evidence"]["Text"] = evidence_text
feed_related_indicators = create_feed_related_indicators_object(feed_related_indicators)
behavior = create_behaviors_object(behavior)
relationships = relationships if CREATE_RELATIONSHIPS else None
return outputs, feed_related_indicators, behavior, relationships
|
def parse_file_report(file_hash, reports, file_info, extended_data: bool):
udp_ip = []
udp_port = []
network_udp = []
tcp_ip = []
tcp_port = []
network_tcp = []
dns_query = []
dns_response = []
network_dns = []
evidence_md5 = []
evidence_text = []
process_list_outputs = []
process_tree_outputs = []
entry_summary = []
extract_urls_outputs = []
elf_shell_commands = []
feed_related_indicators = []
platform_report = []
software_report = []
behavior = []
relationships = []
network_url = []
# When only one report is in response, it's returned as a single json object and not a list.
if not isinstance(reports, list):
reports = [reports]
for report in reports:
if 'network' in report and report["network"]:
if 'UDP' in report["network"]:
udp_objects = report["network"]["UDP"]
if not isinstance(udp_objects, list):
udp_objects = [udp_objects]
for udp_obj in udp_objects:
if '@ip' in udp_obj and udp_obj['@ip']:
udp_ip.append(udp_obj["@ip"])
feed_related_indicators.append({'value': udp_obj["@ip"], 'type': 'IP'})
relationships.append(create_relationship('related-to', (file_hash, udp_obj["@ip"]), ('file', 'ip')))
if '@port' in udp_obj:
udp_port.append(udp_obj["@port"])
if extended_data:
if network_udp_dict := parse(report=udp_obj,
keys=[('@ip', 'IP'), ('@port', 'Port'),
('@country', 'Country'), ('@ja3', 'JA3'),
('@ja3s', 'JA3S')]):
network_udp.append(network_udp_dict)
if 'TCP' in report["network"]:
tcp_objects = report["network"]["TCP"]
if not isinstance(tcp_objects, list):
tcp_objects = [tcp_objects]
for tcp_obj in tcp_objects:
if '@ip' in tcp_obj and tcp_obj['@ip']:
tcp_ip.append(tcp_obj["@ip"])
feed_related_indicators.append({'value': tcp_obj["@ip"], 'type': 'IP'})
relationships.append(create_relationship('related-to', (file_hash, tcp_obj["@ip"]), ('file', 'ip')))
if '@port' in tcp_obj:
tcp_port.append(tcp_obj['@port'])
if extended_data:
if network_tcp_dict := parse(report=tcp_obj,
keys=[('@ip', 'IP'), ('@port', 'Port'),
('@country', 'Country'), ('@ja3', 'JA3'),
('@ja3s', 'JA3S')]):
network_tcp.append(network_tcp_dict)
if 'dns' in report["network"]:
dns_objects = report["network"]["dns"]
if not isinstance(dns_objects, list):
dns_objects = [dns_objects]
for dns_obj in dns_objects:
if '@query' in dns_obj and dns_obj['@query']:
dns_query.append(dns_obj['@query'])
if '@response' in dns_obj and dns_obj['@response']:
dns_response.append(dns_obj['@response'])
if extended_data:
if network_dns_dict := parse(report=dns_obj,
keys=[('@query', 'Query'), ('@response', 'Response'),
('@type', 'Type')]):
network_dns.append(network_dns_dict)
if 'url' in report["network"]:
url = ''
if '@host' in report["network"]["url"]:
url = report["network"]["url"]["@host"]
if '@uri' in report["network"]["url"]:
url += report["network"]["url"]["@uri"]
if url:
feed_related_indicators.append({'value': url, 'type': 'URL'})
relationships.append(create_relationship('related-to', (file_hash, url.rstrip('/')), ('file', 'url')))
if extended_data:
if network_url_dict := parse(report=report["network"]['url'],
keys=[('@host', 'Host'), ('@uri', 'URI'),
('@method', 'Method'), ('@user_agent', 'UserAgent')]):
network_url.append(network_url_dict)
if 'evidence' in report and report["evidence"]:
if 'file' in report["evidence"]:
if isinstance(report["evidence"]["file"], dict) and 'entry' in report["evidence"]["file"]:
if '@md5' in report["evidence"]["file"]["entry"]:
evidence_md5.append(report["evidence"]["file"]["entry"]["@md5"])
if '@text' in report["evidence"]["file"]["entry"]:
evidence_text.append(report["evidence"]["file"]["entry"]["@text"])
if 'elf_info' in report and report["elf_info"]:
if 'Domains' in report["elf_info"]:
if isinstance(report["elf_info"]["Domains"], dict) and 'entry' in report["elf_info"]["Domains"]:
entry = report["elf_info"]["Domains"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for domain in entry:
feed_related_indicators.append({'value': domain, 'type': 'Domain'})
relationships.append(create_relationship('related-to', (file_hash, domain), ('file', 'domain')))
if 'IP_Addresses' in report["elf_info"]:
if isinstance(report["elf_info"]["IP_Addresses"], dict) and 'entry' in \
report["elf_info"]["IP_Addresses"]:
entry = report["elf_info"]["IP_Addresses"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for ip in entry:
feed_related_indicators.append({'value': ip, 'type': 'IP'})
relationships.append(create_relationship('related-to', (file_hash, ip), ('file', 'ip')))
if 'suspicious' in report["elf_info"]:
if isinstance(report["elf_info"]["suspicious"], dict) and 'entry' in report["elf_info"]['suspicious']:
entry = report["elf_info"]["suspicious"]["entry"]
# when there is only one entry, it is returned as a single json not a list
if not isinstance(entry, list):
entry = [entry]
for entry_obj in entry:
if '#text' in entry_obj and '@description' in entry_obj:
behavior.append({'details': entry_obj['#text'], 'action': entry_obj['@description']})
if 'URLs' in report["elf_info"]:
if isinstance(report["elf_info"]["URLs"], dict) and 'entry' in report["elf_info"]['URLs']:
entry = report["elf_info"]["URLs"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for url in entry:
feed_related_indicators.append({'value': url, 'type': 'URL'})
relationships.append(create_relationship('related-to', (file_hash, url), ('file', 'url')))
if extended_data:
if 'Shell_Commands' in report['elf_info'] and 'entry' in report['elf_info']['Shell_Commands'] and \
report['elf_info']['Shell_Commands']['entry']:
elf_shell_commands.append(report['elf_info']['Shell_Commands']['entry'])
if extended_data:
if process_list := demisto.get(report, 'process_list.process'):
if not isinstance(process_list, list):
process_list = [process_list]
for process in process_list:
if process_list_dict := parse(report=process,
keys=[("@command", "ProcessCommand"),
("@name", "ProcessName"),
("@pid", "ProcessPid"),
("file", "ProcessFile"),
("service", "Service")]):
process_list_outputs.append(process_list_dict)
if 'process_tree' in report and 'process' in report['process_tree'] and report['process_tree']['process']:
process_tree = report['process_tree']['process']
if not isinstance(process_tree, list):
process_tree = [process_tree]
for process in process_tree:
tree_outputs = {}
if process_tree_dict := parse(report=process,
keys=[('@text', "ProcessText"),
('@name', "ProcessName"),
('@pid', "ProcessPid")]):
tree_outputs = process_tree_dict
if 'child' in process and 'process' in process['child'] and process['child']['process']:
child_process = process['child']['process']
if not isinstance(child_process, list):
child_process = [child_process]
for child in child_process:
if process_tree_child_dict := parse(report=child,
keys=[('@text', "ChildText"),
('@name', "ChildName"),
('@pid', "ChildPid")]):
tree_outputs['Process'] = process_tree_child_dict
if tree_outputs:
process_tree_outputs.append(tree_outputs)
if 'summary' in report and 'entry' in report['summary'] and report['summary']['entry']:
entries = report['summary']['entry']
if not isinstance(entries, list):
entries = [entries]
for entry in entries:
if entry_summary_dict := parse(report=entry,
keys=[('#text', "Text"),
('@details', "Details"),
('@behavior', "Behavior")]):
entry_summary.append(entry_summary_dict)
if 'extracted_urls' in report and report['extracted_urls'] and 'entry' in report['extracted_urls'] \
and report['extracted_urls']['entry']:
extract_urls = report['extracted_urls']['entry']
if not isinstance(extract_urls, list):
extract_urls = [extract_urls]
for urls in extract_urls:
if extract_urls_dict := parse(report=urls,
keys=[('@url', "URL"),
('@verdict', "Verdict")]):
extract_urls_outputs.append(extract_urls_dict)
if 'platform' in report:
platform_report.append(report['platform'])
if 'software' in report:
software_report.append(report['software'])
outputs = {
'Status': 'Success',
'SHA256': file_info.get('sha256')
}
if len(udp_ip) > 0 or len(udp_port) > 0 or len(tcp_ip) > 0 or len(tcp_port) > 0 or dns_query or dns_response:
outputs["Network"] = {}
if len(udp_ip) > 0 or len(udp_port) > 0:
outputs["Network"]["UDP"] = {}
if len(udp_ip) > 0:
outputs["Network"]["UDP"]["IP"] = udp_ip
if len(udp_port) > 0:
outputs["Network"]["UDP"]["Port"] = udp_port
if len(tcp_ip) > 0 or len(tcp_port) > 0:
outputs["Network"]["TCP"] = {}
if len(tcp_ip) > 0:
outputs["Network"]["TCP"]["IP"] = tcp_ip
if len(tcp_port) > 0:
outputs["Network"]["TCP"]["Port"] = tcp_port
if len(dns_query) > 0 or len(dns_response) > 0:
outputs["Network"]["DNS"] = {}
if len(dns_query) > 0:
outputs["Network"]["DNS"]["Query"] = dns_query
if len(dns_response) > 0:
outputs["Network"]["DNS"]["Response"] = dns_response
if network_udp or network_tcp or network_dns or network_url:
outputs['NetworkInfo'] = {}
if network_udp:
outputs['NetworkInfo']['UDP'] = network_udp
if network_tcp:
outputs['NetworkInfo']['TCP'] = network_tcp
if network_dns:
outputs['NetworkInfo']['DNS'] = network_dns
if network_url:
outputs['NetworkInfo']['URL'] = network_url
if platform_report:
outputs['Platform'] = platform_report
if software_report:
outputs['Software'] = software_report
if process_list_outputs:
outputs['ProcessList'] = process_list_outputs
if process_tree_outputs:
outputs['ProcessTree'] = process_tree_outputs
if entry_summary:
outputs['Summary'] = entry_summary
if extract_urls_outputs:
outputs['ExtractedURL'] = extract_urls_outputs
if elf_shell_commands:
outputs['ELF'] = {}
outputs['ELF']['ShellCommands'] = elf_shell_commands
if len(evidence_md5) > 0 or len(evidence_text) > 0:
outputs["Evidence"] = {}
if len(evidence_md5) > 0:
outputs["Evidence"]["md5"] = evidence_md5
if len(evidence_text) > 0:
outputs["Evidence"]["Text"] = evidence_text
feed_related_indicators = create_feed_related_indicators_object(feed_related_indicators)
behavior = create_behaviors_object(behavior)
relationships = relationships if CREATE_RELATIONSHIPS else None
return outputs, feed_related_indicators, behavior, relationships
|
20,437 |
def get_public_ips(protocol=4):
"""Retrieve a list (sorted by frequency) of different public IP addresses from the IPmirrors"""
ip_url_yunohost_tab = settings_get("security.ipmirrors.v"+str(protocol)).split(",")
ip_count = {} # Count the number of times an IP has appeared
# Check URLS
for url in ip_url_yunohost_tab:
logger.debug("Fetching IP from %s " % url)
try:
ip = download_text(url, timeout=30).strip()
if ip in ip_count.keys():
ip_count[ip]+=1
else:
ip_count[ip]=1
except Exception as e:
logger.debug(
"Could not get public IPv%s from %s : %s" % (str(protocol), url, str(e))
)
ip_list_with_count = [ (ip,ip_count[ip]) for ip in ip_count ]
ip_list_with_count.sort(key=lambda x: x[1]) # Sort by frequency
return [ x[0] for x in ip_list_with_count ]
|
def get_public_ips(protocol=4):
"""Retrieve a list (sorted by frequency) of different public IP addresses from the IPmirrors"""
ip_url_yunohost_tab = settings_get("security.ipmirrors.v"+str(protocol)).split(",")
ip_count = {} # Count the number of times an IP has appeared
# Check URLS
for url in ip_url_yunohost_tab:
logger.debug("Fetching IP from %s " % url)
try:
ip = download_text(url, timeout=15).strip()
if ip in ip_count.keys():
ip_count[ip]+=1
else:
ip_count[ip]=1
except Exception as e:
logger.debug(
"Could not get public IPv%s from %s : %s" % (str(protocol), url, str(e))
)
ip_list_with_count = [ (ip,ip_count[ip]) for ip in ip_count ]
ip_list_with_count.sort(key=lambda x: x[1]) # Sort by frequency
return [ x[0] for x in ip_list_with_count ]
|
31,389 |
def bang_ip(client: Client, score_calculator: ScoreCalculator, args: dict) -> CommandResults:
"""
1 API Call for regular
1-4 API Calls for premium subscriptions
"""
ip = args['ip']
raise_if_ip_not_valid(ip)
raw_response = client.ip(ip)
score = score_calculator.ip_score(ip, raw_response)
if score != Common.DBotScore.BAD and client.is_premium:
score = score_calculator.analyze_premium_ip_score(client, ip, score)
logs = score_calculator.get_logs()
demisto.debug(logs)
data = raw_response['data']
attributes = data['attributes']
ip_standard = {
'Address': ip,
'ASN': attributes.get('asn'),
'Geo': assign_params(
Country=attributes.get('country')
),
'Vendor': 'VirusTotal'
}
if score == Common.DBotScore.BAD:
ip_standard['Malicious'] = {
'Vendor': INTEGRATION_NAME,
'Description': logs
}
outputs = {
f'{INTEGRATION_ENTRY_CONTEXT}.IP(val.id && val.id === obj.id)': data,
**Common.DBotScore(
ip,
DBotScoreType.IP,
INTEGRATION_NAME,
score
).to_context(),
Common.IP.CONTEXT_PATH: ip_standard
}
last_analysis_stats = data['attributes']["last_analysis_stats"]
malicious = last_analysis_stats['malicious']
total = sum(last_analysis_stats.values())
return CommandResults(
readable_output=tableToMarkdown(
'IP reputation:',
{
**data,
**attributes,
'last_modified': epoch_to_timestamp(attributes['last_modification_date']),
'positives': f'{malicious}/{total}'
},
headers=['id', 'network', 'country', 'last_modified', 'reputation', 'positives']
),
outputs=outputs,
raw_response=raw_response
)
|
def ip_command(client: Client, score_calculator: ScoreCalculator, args: dict) -> CommandResults:
"""
1 API Call for regular
1-4 API Calls for premium subscriptions
"""
ip = args['ip']
raise_if_ip_not_valid(ip)
raw_response = client.ip(ip)
score = score_calculator.ip_score(ip, raw_response)
if score != Common.DBotScore.BAD and client.is_premium:
score = score_calculator.analyze_premium_ip_score(client, ip, score)
logs = score_calculator.get_logs()
demisto.debug(logs)
data = raw_response['data']
attributes = data['attributes']
ip_standard = {
'Address': ip,
'ASN': attributes.get('asn'),
'Geo': assign_params(
Country=attributes.get('country')
),
'Vendor': 'VirusTotal'
}
if score == Common.DBotScore.BAD:
ip_standard['Malicious'] = {
'Vendor': INTEGRATION_NAME,
'Description': logs
}
outputs = {
f'{INTEGRATION_ENTRY_CONTEXT}.IP(val.id && val.id === obj.id)': data,
**Common.DBotScore(
ip,
DBotScoreType.IP,
INTEGRATION_NAME,
score
).to_context(),
Common.IP.CONTEXT_PATH: ip_standard
}
last_analysis_stats = data['attributes']["last_analysis_stats"]
malicious = last_analysis_stats['malicious']
total = sum(last_analysis_stats.values())
return CommandResults(
readable_output=tableToMarkdown(
'IP reputation:',
{
**data,
**attributes,
'last_modified': epoch_to_timestamp(attributes['last_modification_date']),
'positives': f'{malicious}/{total}'
},
headers=['id', 'network', 'country', 'last_modified', 'reputation', 'positives']
),
outputs=outputs,
raw_response=raw_response
)
|
31,217 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
try:
command = demisto.command()
demisto.debug(f'Orca Command being called is {command}')
api_key = demisto.params().get('apikey')
client = BaseClient(
base_url=ORCA_API_DNS_NAME,
verify=True,
headers={
'Authorization': f'Bearer {api_key}'
},
proxy=True)
orca_client = OrcaClient(client=client)
if command == "get-alerts":
alerts = orca_client.get_alerts_by_type(alert_type=demisto.args().get('alert_type'))
return_results(alerts)
elif command == "get-asset":
asset = orca_client.get_asset(asset_unique_id=demisto.args()['asset_unique_id'])
return_results(asset)
elif command == "fetch-incidents":
fetch_incidents(orca_client)
else:
raise NotImplementedError(f'{command} is not an existing Tripwire command')
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
try:
command = demisto.command()
demisto.debug(f'Orca Command being called is {command}')
api_key = demisto.params().get('apikey')
client = BaseClient(
base_url=ORCA_API_DNS_NAME,
verify=True,
headers={
'Authorization': f'Bearer {api_key}'
},
proxy=True)
orca_client = OrcaClient(client=client)
if command == "orca-get-alerts":
alerts = orca_client.get_alerts_by_type(alert_type=demisto.args().get('alert_type'))
return_results(alerts)
elif command == "get-asset":
asset = orca_client.get_asset(asset_unique_id=demisto.args()['asset_unique_id'])
return_results(asset)
elif command == "fetch-incidents":
fetch_incidents(orca_client)
else:
raise NotImplementedError(f'{command} is not an existing Tripwire command')
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
39,840 |
def specaugment(mel_spectrogram, frequency_masking_para=27,
time_masking_para=100, frequency_masking_num=1, time_masking_num=1):
"""Spec augmentation Calculation Function.
'SpecAugment' have 3 steps for audio data augmentation.
first step is time warping using Tensorflow's image_sparse_warp function.
Second step is frequency masking, last step is time masking.
# Arguments:
mel_spectrogram(numpy array): audio file path of you want to warping and masking.
time_warping_para(float): Augmentation parameter, "time warp parameter W".
If none, default = 80 for LibriSpeech.
frequency_masking_para(float): Augmentation parameter, "frequency mask parameter F"
If none, default = 100 for LibriSpeech.
time_masking_para(float): Augmentation parameter, "time mask parameter T"
If none, default = 27 for LibriSpeech.
frequency_mask_num(float): number of frequency masking lines, "m_F".
If none, default = 1 for LibriSpeech.
time_mask_num(float): number of time masking lines, "m_T".
If none, default = 1 for LibriSpeech.
# Returns
mel_spectrogram(numpy array): warped and masked mel spectrogram.
"""
tau, v = mel_spectrogram.size()
# Step 1 : Frequency masking (masks can overlap)
for i in range(frequency_masking_num):
f = np.random.uniform(low=0.0, high=frequency_masking_para)
f = int(f)
f0 = random.randint(0, v - f)
mel_spectrogram[:, f0:f0 + f] = 0
# Step 2 : Time masking (masks can overlap)
for i in range(time_masking_num):
t = np.random.uniform(low=1.0, high=min(time_masking_para, tau))
t = int(t)
t0 = random.randint(0, tau - t)
mel_spectrogram[t0:t0 + t, :] = 0
return mel_spectrogram
|
def specaugment(mel_spectrogram, frequency_masking_para=27,
time_masking_para=100, frequency_masking_num=1, time_masking_num=1):
"""Spec augmentation Calculation Function.
'SpecAugment' has 3 steps for audio data augmentation.
first step is time warping using Tensorflow's image_sparse_warp function.
Second step is frequency masking, last step is time masking.
# Arguments:
mel_spectrogram(numpy array): audio file path of you want to warping and masking.
time_warping_para(float): Augmentation parameter, "time warp parameter W".
If none, default = 80 for LibriSpeech.
frequency_masking_para(float): Augmentation parameter, "frequency mask parameter F"
If none, default = 100 for LibriSpeech.
time_masking_para(float): Augmentation parameter, "time mask parameter T"
If none, default = 27 for LibriSpeech.
frequency_mask_num(float): number of frequency masking lines, "m_F".
If none, default = 1 for LibriSpeech.
time_mask_num(float): number of time masking lines, "m_T".
If none, default = 1 for LibriSpeech.
# Returns
mel_spectrogram(numpy array): warped and masked mel spectrogram.
"""
tau, v = mel_spectrogram.size()
# Step 1 : Frequency masking (masks can overlap)
for i in range(frequency_masking_num):
f = np.random.uniform(low=0.0, high=frequency_masking_para)
f = int(f)
f0 = random.randint(0, v - f)
mel_spectrogram[:, f0:f0 + f] = 0
# Step 2 : Time masking (masks can overlap)
for i in range(time_masking_num):
t = np.random.uniform(low=1.0, high=min(time_masking_para, tau))
t = int(t)
t0 = random.randint(0, tau - t)
mel_spectrogram[t0:t0 + t, :] = 0
return mel_spectrogram
|
25,999 |
def load_arguments(self, _):
(JsonWebKeyOperation, JsonWebKeyType, SasTokenType,
SasDefinitionAttributes, SecretAttributes, CertificateAttributes, StorageAccountAttributes) = self.get_models(
'JsonWebKeyOperation', 'JsonWebKeyType', 'SasTokenType',
'SasDefinitionAttributes', 'SecretAttributes', 'CertificateAttributes', 'StorageAccountAttributes',
resource_type=ResourceType.DATA_KEYVAULT)
KeyCurveName = self.get_sdk('KeyCurveName', resource_type=ResourceType.DATA_KEYVAULT_KEYS, mod='_enums')
EncryptionAlgorithm = self.get_sdk('EncryptionAlgorithm', resource_type=ResourceType.DATA_KEYVAULT_KEYS, mod='crypto._enums')
class CLIJsonWebKeyOperation(str, Enum):
encrypt = "encrypt"
decrypt = "decrypt"
sign = "sign"
verify = "verify"
wrap_key = "wrapKey"
unwrap_key = "unwrapKey"
import_ = "import"
export = "export"
JsonWebKeyOperation = CLIJsonWebKeyOperation # TODO: Remove this patch when new SDK is released
class CLIJsonWebKeyType(str, Enum):
ec = "EC" #: Elliptic Curve.
ec_hsm = "EC-HSM" #: Elliptic Curve with a private key which is not exportable from the HSM.
rsa = "RSA" #: RSA (https://tools.ietf.org/html/rfc3447)
rsa_hsm = "RSA-HSM" #: RSA with a private key which is not exportable from the HSM.
oct = "oct" #: Octet sequence (used to represent symmetric keys)
oct_hsm = "oct-HSM" #: Oct with a private key which is not exportable from the HSM.
JsonWebKeyType = CLIJsonWebKeyType # TODO: Remove this patch when new SDK is released
class CLIKeyTypeForBYOKImport(str, Enum):
ec = "EC" #: Elliptic Curve.
rsa = "RSA" #: RSA (https://tools.ietf.org/html/rfc3447)
oct = "oct" #: Octet sequence (used to represent symmetric keys)
class CLISecurityDomainOperation(str, Enum):
download = "download" #: Download operation
upload = "upload" #: Upload operation
(KeyPermissions, SecretPermissions, CertificatePermissions, StoragePermissions,
NetworkRuleBypassOptions, NetworkRuleAction, PublicNetworkAccess) = self.get_models(
'KeyPermissions', 'SecretPermissions', 'CertificatePermissions', 'StoragePermissions',
'NetworkRuleBypassOptions', 'NetworkRuleAction', 'PublicNetworkAccess',
resource_type=ResourceType.MGMT_KEYVAULT)
# ARGUMENT DEFINITIONS
vault_name_type = CLIArgumentType(
help='Name of the Vault.', options_list=['--vault-name'], metavar='NAME', id_part=None,
completer=get_resource_name_completion_list('Microsoft.KeyVault/vaults'))
deleted_vault_name_type = CLIArgumentType(
help='Name of the deleted Vault.', options_list=['--vault-name'], metavar='NAME', id_part=None)
hsm_name_type = CLIArgumentType(help='Name of the HSM.',
options_list=['--hsm-name'], id_part=None)
hsm_url_type = CLIArgumentType(help='Name of the HSM.', type=get_hsm_base_url_type(self.cli_ctx),
options_list=['--hsm-name'], id_part=None)
mgmt_plane_hsm_name_type = CLIArgumentType(help='Name of the HSM. (--hsm-name and --name/-n are mutually '
'exclusive, please specify just one of them)',
options_list=['--hsm-name'], id_part=None,
validator=validate_vault_name_and_hsm_name)
data_plane_hsm_name_type = CLIArgumentType(help='Name of the HSM. (--hsm-name and --vault-name are '
'mutually exclusive, please specify just one of them)',
type=get_hsm_base_url_type(self.cli_ctx),
options_list=['--hsm-name'], id_part=None,
validator=set_vault_base_url)
deleted_hsm_name_type = CLIArgumentType(help='Name of the deleted HSM. (--hsm-name and --name/-n are '
'mutually exclusive, please specify just one of them)',
options_list=['--hsm-name'], id_part=None,
validator=validate_vault_name_and_hsm_name)
# region vault (management)
with self.argument_context('keyvault') as c:
c.argument('resource_group_name', resource_group_name_type, id_part=None, required=False,
help='Proceed only if Key Vault belongs to the specified resource group.',
validator=validate_resource_group_name)
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'])
c.argument('object_id', help='a GUID that identifies the principal that will receive permissions')
c.argument('spn', help='name of a service principal that will receive permissions')
c.argument('upn', help='name of a user principal that will receive permissions')
c.argument('tags', tags_type)
c.argument('enabled_for_deployment', arg_type=get_three_state_flag(),
help='[Vault Only] Property to specify whether Azure Virtual Machines are permitted to retrieve '
'certificates stored as secrets from the key vault.')
c.argument('enabled_for_disk_encryption', arg_type=get_three_state_flag(),
help='[Vault Only] Property to specify whether Azure Disk Encryption is permitted to retrieve '
'secrets from the vault and unwrap keys.')
c.argument('enabled_for_template_deployment', arg_type=get_three_state_flag(),
help='[Vault Only] Property to specify whether Azure Resource Manager is permitted to retrieve '
'secrets from the key vault.')
c.argument('enable_rbac_authorization', arg_type=get_three_state_flag())
c.argument('enable_soft_delete', arg_type=get_three_state_flag(), deprecate_info=c.deprecate(
message_func=lambda x: 'Warning! The ability to create new key vaults with soft delete disabled will be '
'deprecated by December 2020. All key vaults will be required to have soft delete '
'enabled. Please see the following documentation for additional guidance.\n'
'https://docs.microsoft.com/azure/key-vault/general/soft-delete-change'),
help='[Vault Only] Property to specify whether the \'soft delete\' functionality is enabled for '
'this key vault. If it\'s not set to any value (true or false) when creating new key vault, it '
'will be set to true by default. Once set to true, it cannot be reverted to false.')
c.argument('enable_purge_protection', arg_type=get_three_state_flag())
c.argument('public_network_access', arg_type=get_enum_type(PublicNetworkAccess),
help="Property to specify whether the vault will accept traffic from public internet. If set to "
"'disabled' all traffic except private endpoint traffic and that originates from trusted "
"services will be blocked. This will override the set firewall rules, meaning that even if the "
"firewall rules are present we will not honor the rules.")
with self.argument_context('keyvault', arg_group='Network Rule', min_api='2018-02-14') as c:
c.argument('bypass', arg_type=get_enum_type(NetworkRuleBypassOptions),
help='Bypass traffic for space-separated uses.')
c.argument('default_action', arg_type=get_enum_type(NetworkRuleAction),
help='Default action to apply when no rule matches.')
for item in ['show', 'delete', 'create']:
with self.argument_context('keyvault {}'.format(item)) as c:
c.argument('hsm_name', mgmt_plane_hsm_name_type)
with self.argument_context('keyvault create') as c:
c.argument('resource_group_name', resource_group_name_type, required=True, completer=None, validator=None)
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'])
c.argument('administrators', nargs='+',
help='[HSM Only] Administrator role for data plane operations for Managed HSM. '
'It accepts a space separated list of OIDs that will be assigned.')
c.argument('sku', help='Required. SKU details. Allowed values for Vault: premium, standard. Default: standard.'
' Allowed values for HSM: Standard_B1, Custom_B32. Default: Standard_B1')
c.argument('no_self_perms', arg_type=get_three_state_flag(),
help='[Vault Only] Don\'t add permissions for the current user/service principal in the new vault.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('retention_days', help='Soft delete data retention days. It accepts >=7 and <=90.', default='90')
with self.argument_context('keyvault create', arg_group='Network Rule') as c:
c.argument('network_acls', type=validate_file_or_dict,
help='Network ACLs. It accepts a JSON filename or a JSON string. JSON format: '
'`{\\"ip\\":[<ip1>, <ip2>...],\\"vnet\\":[<vnet_name_1>/<subnet_name_1>,<subnet_id2>...]}`')
c.argument('network_acls_ips', nargs='*', help='Network ACLs IP rules. Space-separated list of IP addresses.')
c.argument('network_acls_vnets', nargs='*', help='Network ACLS VNet rules. Space-separated list of '
'Vnet/subnet pairs or subnet resource ids.')
with self.argument_context('keyvault update') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'])
c.argument('retention_days', help='Soft delete data retention days. It accepts >=7 and <=90.')
with self.argument_context('keyvault update-hsm') as c:
c.argument('name', hsm_name_type)
c.argument('enable_purge_protection', options_list=['--enable-purge-protection', '-e'])
c.argument('secondary_locations', nargs='+',
help='--secondary-locations extends/contracts an HSM pool to listed regions. The primary location '
'where the resource was originally created CANNOT be removed.')
with self.argument_context('keyvault wait-hsm') as c:
c.argument('hsm_name', hsm_name_type)
c.argument('resource_group_name', options_list=['--resource-group', '-g'],
help='Proceed only if HSM belongs to the specified resource group.')
with self.argument_context('keyvault recover') as c:
c.argument('vault_name', deleted_vault_name_type, options_list=['--name', '-n'],
validator=validate_deleted_vault_or_hsm_name)
c.argument('hsm_name', deleted_hsm_name_type)
c.argument('resource_group_name', resource_group_name_type, id_part=None, required=False,
help='Resource group of the deleted Vault or HSM')
c.argument('location', help='Location of the deleted Vault or HSM', required=False)
with self.argument_context('keyvault purge') as c:
c.argument('vault_name', deleted_vault_name_type, options_list=['--name', '-n'],
validator=validate_deleted_vault_or_hsm_name)
c.argument('hsm_name', deleted_hsm_name_type)
c.argument('location', help='Location of the deleted Vault or HSM', required=False)
with self.argument_context('keyvault list') as c:
c.argument('resource_group_name', resource_group_name_type, validator=None)
c.argument('resource_type', help='When --resource-type is not present the command will list all Vaults and HSMs.'
' Possible values for --resource-type are vault and hsm.')
with self.argument_context('keyvault list-deleted') as c:
c.argument('resource_type', help='When --resource-type is not present the command will list all deleted Vaults '
'and HSMs. Possible values for --resource-type are vault and hsm.')
with self.argument_context('keyvault show-deleted') as c:
c.argument('vault_name', deleted_vault_name_type, options_list=['--name', '-n'],
validator=validate_deleted_vault_or_hsm_name)
c.argument('hsm_name', deleted_hsm_name_type)
c.argument('location', help='Location of the deleted Vault or HSM', required=False)
for item in ['set-policy', 'delete-policy']:
with self.argument_context('keyvault {}'.format(item)) as c:
c.argument('object_id', validator=validate_principal)
c.argument('application_id', help='Application ID of the client making request on behalf of a principal. '
'Exposed for compound identity using on-behalf-of authentication flow.')
with self.argument_context('keyvault set-policy', arg_group='Permission') as c:
c.argument('key_permissions', arg_type=get_enum_type(KeyPermissions), metavar='PERM', nargs='*',
help='Space-separated list of key permissions to assign.', validator=validate_policy_permissions)
c.argument('secret_permissions', arg_type=get_enum_type(SecretPermissions), metavar='PERM', nargs='*',
help='Space-separated list of secret permissions to assign.')
c.argument('certificate_permissions', arg_type=get_enum_type(CertificatePermissions), metavar='PERM', nargs='*',
help='Space-separated list of certificate permissions to assign.')
c.argument('storage_permissions', arg_type=get_enum_type(StoragePermissions), metavar='PERM', nargs='*',
help='Space-separated list of storage permissions to assign.')
with self.argument_context('keyvault network-rule', min_api='2018-02-14') as c:
c.argument('ip_address', help='IPv4 address or CIDR range.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
with self.argument_context('keyvault network-rule add', min_api='2018-02-14') as c:
c.argument('ip_address', nargs='*', help='IPv4 address or CIDR range. Can supply a list: --ip-address ip1 [ip2]...', validator=validate_ip_address)
for item in ['approve', 'reject', 'delete', 'show', 'wait']:
with self.argument_context('keyvault private-endpoint-connection {}'.format(item), min_api='2018-02-14') as c:
c.extra('connection_id', options_list=['--id'], required=False,
help='The ID of the private endpoint connection associated with the Key Vault/HSM. '
'If specified --vault-name/--hsm-name and --name/-n, this should be omitted.')
c.argument('description', help='Comments for the {} operation.'.format(item))
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False,
help='The name of the private endpoint connection associated with the Key Vault/HSM. '
'Required if --id is not specified')
c.argument('vault_name', vault_name_type, required=False,
help='Name of the Key Vault. Required if --id is not specified')
c.argument('hsm_name', mgmt_plane_hsm_name_type, min_api='2021-04-01-preview',
help='Name of the HSM. Required if --id is not specified.'
'(--hsm-name and --vault-name are mutually exclusive, please specify just one of them)')
with self.argument_context('keyvault private-endpoint-connection list') as c:
c.argument("hsm_name", hsm_name_type)
with self.argument_context('keyvault private-link-resource', min_api='2018-02-14', max_api='2020-04-01-preview') as c:
c.argument('vault_name', vault_name_type, required=True)
with self.argument_context('keyvault private-link-resource', min_api='2021-04-01-preview') as c:
c.argument('vault_name', vault_name_type)
c.argument('hsm_name', mgmt_plane_hsm_name_type)
# endregion
# region Shared
for item in ['secret', 'certificate']:
with self.argument_context('keyvault ' + item, arg_group='Id') as c:
c.argument(item + '_name', options_list=['--name', '-n'], help='Name of the {}.'.format(item),
id_part='child_name_1', completer=get_keyvault_name_completion_list(item))
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
c.argument(item + '_version', options_list=['--version', '-v'],
help='The {} version. If omitted, uses the latest version.'.format(item), default='',
required=False, completer=get_keyvault_version_completion_list(item))
for cmd in ['backup', 'decrypt', 'delete', 'download', 'encrypt', 'list-versions', 'set-attributes', 'show',
'list']:
with self.argument_context('keyvault {} {}'.format(item, cmd), arg_group='Id') as c:
try:
if cmd in ['list']:
c.extra('identifier', options_list=['--id'],
help='Full URI of the Vault or HSM. '
'If specified all other \'Id\' arguments should be omitted.',
validator=validate_vault_or_hsm)
else:
c.extra('identifier', options_list=['--id'],
help='Id of the {}. '
'If specified all other \'Id\' arguments should be omitted.'.format(item),
validator=validate_key_id(item))
except ValueError:
pass
c.argument(item + '_name', help='Name of the {}. Required if --id is not specified.'.format(item),
required=False)
c.argument('vault_base_url', vault_name_type, required=False,
help='Name of the Key Vault. Required if --id is not specified.')
c.argument(item + '_version', required=False)
for cmd in ['purge', 'recover', 'show-deleted']:
with self.argument_context('keyvault {} {}'.format(item, cmd), arg_group='Id') as c:
c.extra('identifier', options_list=['--id'],
help='The recovery id of the {}. '
'If specified all other \'Id\' arguments should be omitted.'.format(item),
validator=validate_key_id('deleted' + item))
c.argument(item + '_name', help='Name of the {}. Required if --id is not specified.'.format(item),
required=False)
c.argument('vault_base_url', help='Name of the Vault. Required if --id is not specified.',
required=False)
c.argument(item + '_version', required=False)
for cmd in ['list', 'list-deleted']:
with self.argument_context('keyvault {} {}'.format(item, cmd)) as c:
c.argument('include_pending', arg_type=get_three_state_flag())
with self.argument_context('keyvault {} {}'.format(item, cmd), arg_group='Id') as c:
if cmd in ['list-deleted']:
c.extra('identifier', options_list=['--id'],
help='Full URI of the Vault{}. '
'If specified all other \'Id\' arguments should be '
'omitted.'.format(' or HSM' if item == 'key' else ''),
validator=validate_vault_or_hsm)
# endregion
# region keys
# keys track1
with self.argument_context('keyvault key') as c:
c.argument('key_ops', arg_type=get_enum_type(JsonWebKeyOperation), options_list=['--ops'], nargs='*',
help='Space-separated list of permitted JSON web key operations.')
for item in ['delete', 'list', 'list-deleted', 'list-versions', 'purge', 'recover', 'show-deleted']:
with self.argument_context('keyvault key {}'.format(item), arg_group='Id') as c:
c.ignore('cls')
c.argument('key_name', options_list=['--name', '-n'], required=False, id_part='child_name_1',
completer=get_keyvault_name_completion_list('key'),
help='Name of the key. Required if --id is not specified.')
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx),
id_part=None, required=False)
c.argument('key_version', options_list=['--version', '-v'],
help='The key version. If omitted, uses the latest version.', default='',
required=False, completer=get_keyvault_version_completion_list('key'))
c.extra('identifier', options_list=['--id'], validator=validate_key_id('key'),
help='Id of the key. If specified all other \'Id\' arguments should be omitted.')
c.extra('hsm_name', data_plane_hsm_name_type)
if item in ['list', 'list-deleted']:
c.extra('identifier', options_list=['--id'], validator=validate_vault_or_hsm,
help='Full URI of the Vault or HSM. If specified all other \'Id\' arguments should be omitted.')
elif item in ['show-deleted', 'purge', 'recover']:
c.extra('identifier', options_list=['--id'], validator=validate_key_id('deletedkey'),
help='The recovery id of the key. If specified all other \'Id\' arguments should be omitted.')
for item in ['backup', 'download']:
with self.argument_context('keyvault key {}'.format(item), arg_group='Id') as c:
c.argument('key_name', options_list=['--name', '-n'],
help='Name of the key. Required if --id is not specified.',
required=False, id_part='child_name_1', completer=get_keyvault_name_completion_list('key'))
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
c.argument('key_version', options_list=['--version', '-v'],
help='The key version. If omitted, uses the latest version.', default='',
required=False, completer=get_keyvault_version_completion_list('key'))
c.argument('identifier', options_list=['--id'], validator=validate_key_id('key'),
help='Id of the key. If specified all other \'Id\' arguments should be omitted.')
c.argument('hsm_name', data_plane_hsm_name_type)
with self.argument_context('keyvault key backup') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local file path in which to store key backup.')
with self.argument_context('keyvault key download') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File to receive the key contents.')
c.argument('encoding', arg_type=get_enum_type(key_format_values), options_list=['--encoding', '-e'],
help='Encoding of the key, default: PEM', default='PEM')
with self.argument_context('keyvault key restore', arg_group='Id') as c:
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
c.argument('identifier', options_list=['--id'], validator=validate_vault_or_hsm,
help='Full URI of the Vault or HSM. If specified all other \'Id\' arguments should be omitted.')
c.argument('hsm_name', data_plane_hsm_name_type, validator=None)
with self.argument_context('keyvault key restore') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local key backup from which to restore key.')
with self.argument_context('keyvault key restore', arg_group='Storage Id') as c:
c.argument('storage_resource_uri', options_list=['--storage-resource-uri', '-u'],
help='Azure Blob storage container Uri. If specified, all '
'other \'Storage Id\' arguments should be omitted')
c.argument('storage_account_name', help='Name of Azure Storage Account.')
c.argument('blob_container_name', help='Name of Blob Container.')
with self.argument_context('keyvault key restore', arg_group='Restoring keys from storage account') as c:
c.argument('token', options_list=['--storage-container-SAS-token', '-t'],
help='The SAS token pointing to an Azure Blob storage container')
c.argument('backup_folder', help='Name of the blob container which contains the backup')
c.argument('key_name', options_list=['--name', '-n'],
help='Name of the key. (Only for restoring from storage account)')
for scope in ['list', 'list-deleted', 'list-versions']:
with self.argument_context('keyvault key {}'.format(scope)) as c:
c.argument('maxresults', options_list=['--maxresults'], type=int)
with self.argument_context('keyvault key list') as c:
c.extra('include_managed', arg_type=get_three_state_flag(), default=False,
help='Include managed keys. Default: false')
# keys track2
for scope in ['create', 'import', 'set-attributes', 'show', 'encrypt', 'decrypt',
'rotate', 'rotation-policy show', 'rotation-policy update']:
with self.argument_context('keyvault key {}'.format(scope), arg_group='Id') as c:
c.argument('name', options_list=['--name', '-n'], id_part='child_name_1',
required=False, completer=get_keyvault_name_completion_list('key'),
help='Name of the key. Required if --id is not specified.')
c.argument('version', options_list=['--version', '-v'],
help='The key version. If omitted, uses the latest version.', default='',
required=False, completer=get_keyvault_version_completion_list('key'))
c.extra('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
c.extra('hsm_name', data_plane_hsm_name_type, required=False)
c.extra('identifier', options_list=['--id'],
help='Id of the key. If specified all other \'Id\' arguments should be omitted.',
validator=validate_keyvault_resource_id('key'))
for item in ['create', 'import']:
with self.argument_context('keyvault key {}'.format(item)) as c:
c.argument('protection', arg_type=get_enum_type(['software', 'hsm']), options_list=['--protection', '-p'],
help='Specifies the type of key protection.')
c.argument('disabled', arg_type=get_three_state_flag(), help='Create key in disabled state.')
c.argument('key_size', options_list=['--size'], type=int,
help='The key size in bits. For example: 2048, 3072, or 4096 for RSA. 128, 192, or 256 for oct.')
c.argument('expires', default=None, help='Expiration UTC datetime (Y-m-d\'T\'H:M:S\'Z\').',
type=datetime_type)
c.argument('not_before', default=None, type=datetime_type,
help='Key not usable before the provided UTC datetime (Y-m-d\'T\'H:M:S\'Z\').')
c.argument('exportable', arg_type=get_three_state_flag(), is_preview=True,
help='Whether the private key can be exported. To create key with release policy, '
'"exportable" must be true and caller must have "export" permission.')
c.argument('release_policy', options_list=['--policy'], type=file_type, completer=FilesCompleter(),
validator=process_key_release_policy, is_preview=True,
help='The policy rules under which the key can be exported. '
'Policy definition as JSON, or a path to a file containing JSON policy definition.')
c.extra('immutable', arg_type=get_three_state_flag(), is_preview=True,
help='Mark a release policy as immutable. '
'An immutable release policy cannot be changed or updated after being marked immutable. '
'Release policies are mutable by default.')
with self.argument_context('keyvault key create') as c:
c.argument('kty', arg_type=get_enum_type(JsonWebKeyType), validator=validate_key_type,
help='The type of key to create. For valid values, see: https://docs.microsoft.com/en-us/rest/api/keyvault/keys/create-key/create-key#jsonwebkeytype')
c.argument('curve', arg_type=get_enum_type(KeyCurveName),
help='Elliptic curve name. For valid values, see: https://docs.microsoft.com/en-us/rest/api/keyvault/keys/create-key/create-key#jsonwebkeycurvename')
with self.argument_context('keyvault key import') as c:
c.argument('kty', arg_type=get_enum_type(CLIKeyTypeForBYOKImport), validator=validate_key_import_type,
help='The type of key to import (only for BYOK).')
c.argument('curve', arg_type=get_enum_type(KeyCurveName), validator=validate_key_import_type,
help='The curve name of the key to import (only for BYOK).')
with self.argument_context('keyvault key import', arg_group='Key Source') as c:
c.argument('pem_file', type=file_type, help='PEM file containing the key to be imported.', completer=FilesCompleter(), validator=validate_key_import_source)
c.argument('pem_string', type=file_type, help='PEM string containing the key to be imported.', validator=validate_key_import_source)
c.argument('pem_password', help='Password of PEM file.')
c.argument('byok_file', type=file_type, help='BYOK file containing the key to be imported. Must not be password protected.', completer=FilesCompleter(), validator=validate_key_import_source)
c.argument('byok_string', type=file_type, help='BYOK string containing the key to be imported. Must not be password protected.', validator=validate_key_import_source)
for scope in ['encrypt', 'decrypt']:
with self.argument_context('keyvault key {}'.format(scope)) as c:
c.argument('algorithm', options_list=['--algorithm', '-a'], arg_type=get_enum_type(EncryptionAlgorithm),
help='Algorithm identifier')
with self.argument_context('keyvault key encrypt') as c:
c.argument('value', help='The value to be encrypted. Default data type is Base64 encoded string.',
validator=validate_encryption)
c.extra('data_type', help='The type of the original data.', arg_type=get_enum_type(KeyEncryptionDataType),
default='base64')
c.argument('iv', help='Initialization vector. Required for only AES-CBC(PAD) encryption.')
c.argument('aad', help='Optional data that is authenticated but not encrypted. For use with AES-GCM encryption.')
with self.argument_context('keyvault key decrypt') as c:
c.argument('value', help='The value to be decrypted, which should be the result of "az keyvault encrypt"',
validator=validate_decryption)
c.extra('data_type', help='The type of the original data.', arg_type=get_enum_type(KeyEncryptionDataType),
default='base64')
c.argument('iv', help='The initialization vector used during encryption. Required for AES decryption.')
c.argument('aad', help='Optional data that is authenticated but not encrypted. For use with AES-GCM decryption.')
c.argument('tag', help='The authentication tag generated during encryption. Required for only AES-GCM decryption.')
with self.argument_context('keyvault key random') as c:
c.extra('hsm_name', hsm_url_type, arg_group='Id', required=False)
c.extra('identifier', options_list=['--id'], arg_group='Id',
help='Full URI of the HSM.', validator=validate_vault_or_hsm)
c.argument('count', type=int, help='The requested number of random bytes.')
with self.argument_context('keyvault key set-attributes') as c:
c.extra('enabled', help='Enable the key.', arg_type=get_three_state_flag())
c.extra('expires_on', options_list=['--expires'], default=None, type=datetime_type,
help='Expiration UTC datetime (Y-m-d\'T\'H:M:S\'Z\').')
c.extra('not_before', default=None, type=datetime_type,
help='Key not usable before the provided UTC datetime (Y-m-d\'T\'H:M:S\'Z\').')
c.extra('key_operations', arg_type=get_enum_type(JsonWebKeyOperation), options_list=['--ops'], nargs='*',
help='Space-separated list of permitted JSON web key operations.')
c.extra('release_policy', options_list=['--policy'], type=file_type, completer=FilesCompleter(),
validator=process_key_release_policy, is_preview=True,
help='The policy rules under which the key can be exported. '
'Policy definition as JSON, or a path to a file containing JSON policy definition.')
c.extra('immutable', arg_type=get_three_state_flag(), is_preview=True,
help='Mark a release policy as immutable. '
'An immutable release policy cannot be changed or updated after being marked immutable. '
'Release policies are mutable by default.')
c.extra('tags', tags_type)
with self.argument_context('keyvault key rotation-policy update') as c:
c.argument('value', type=file_type, completer=FilesCompleter(),
help='The rotation policy file definition as JSON, or a path to a file containing JSON policy definition.')
# endregion
# region KeyVault Secret
with self.argument_context('keyvault secret set') as c:
c.argument('content_type', options_list=['--description'],
help='Description of the secret contents (e.g. password, connection string, etc)')
c.attributes_argument('secret', SecretAttributes, create=True)
with self.argument_context('keyvault secret set', arg_group='Content Source') as c:
c.argument('value', options_list=['--value'],
help="Plain text secret value. Cannot be used with '--file' or '--encoding'", required=False)
c.extra('file_path', options_list=['--file', '-f'], type=file_type,
help="Source file for secret. Use in conjunction with '--encoding'", completer=FilesCompleter())
c.extra('encoding', arg_type=get_enum_type(secret_encoding_values, default='utf-8'),
options_list=['--encoding', '-e'],
help='Source file encoding. The value is saved as a tag (`file-encoding=<val>`) '
'and used during download to automatically encode the resulting file.')
with self.argument_context('keyvault secret set-attributes') as c:
c.attributes_argument('secret', SecretAttributes)
with self.argument_context('keyvault secret download') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File to receive the secret contents.')
c.argument('encoding', arg_type=get_enum_type(secret_encoding_values), options_list=['--encoding', '-e'],
help="Encoding of the secret. By default, will look for the 'file-encoding' tag on the secret. "
"Otherwise will assume 'utf-8'.", default=None)
for scope in ['backup', 'restore']:
with self.argument_context('keyvault secret {}'.format(scope)) as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File to receive the secret contents.')
for scope in ['list', 'list-deleted', 'list-versions']:
with self.argument_context('keyvault secret {}'.format(scope)) as c:
c.argument('maxresults', options_list=['--maxresults'], type=int)
with self.argument_context('keyvault secret list') as c:
c.extra('include_managed', arg_type=get_three_state_flag(), default=False,
help='Include managed secrets. Default: false')
# endregion
# region keyvault security-domain
for scope in ['init-recovery', 'download', 'upload']:
with self.argument_context('keyvault security-domain {}'.format(scope), arg_group='HSM Id') as c:
c.argument('hsm_name', hsm_url_type, required=False,
help='Name of the HSM. Can be omitted if --id is specified.')
c.extra('identifier', options_list=['--id'], validator=validate_vault_or_hsm, help='Full URI of the HSM.')
c.ignore('vault_base_url')
with self.argument_context('keyvault security-domain init-recovery') as c:
c.argument('sd_exchange_key', help='Local file path to store the exported key.')
with self.argument_context('keyvault security-domain upload') as c:
c.argument('sd_file', help='This file contains security domain encrypted using SD Exchange file downloaded '
'in security-domain init-recovery command.')
c.argument('sd_exchange_key', help='The exchange key for security domain.')
c.argument('sd_wrapping_keys', nargs='*',
help='Space-separated file paths to PEM files containing private keys.')
c.argument('passwords', nargs='*', help='Space-separated password list for --sd-wrapping-keys. '
'CLI will match them in order. Can be omitted if your keys are without '
'password protection.')
with self.argument_context('keyvault security-domain download') as c:
c.argument('sd_wrapping_keys', nargs='*',
help='Space-separated file paths to PEM files containing public keys.')
c.argument('security_domain_file',
help='Path to a file where the JSON blob returned by this command is stored.')
c.argument('sd_quorum', type=int, help='The minimum number of shares required to decrypt the security domain '
'for recovery.')
with self.argument_context('keyvault security-domain wait') as c:
c.argument('hsm_name', hsm_url_type, help='Name of the HSM. Can be omitted if --id is specified.',
required=False)
c.argument('identifier', options_list=['--id'], validator=validate_vault_or_hsm, help='Full URI of the HSM.')
c.argument('resource_group_name', options_list=['--resource-group', '-g'],
help='Proceed only if HSM belongs to the specified resource group.')
c.argument('target_operation', arg_type=get_enum_type(CLISecurityDomainOperation),
help='Target operation that needs waiting.')
c.ignore('vault_base_url')
# endregion
# region keyvault backup/restore
for item in ['backup', 'restore']:
for scope in ['start']: # TODO add 'status' when SDK is ready
with self.argument_context('keyvault {} {}'.format(item, scope), arg_group='HSM Id') as c:
c.argument('hsm_name', hsm_url_type, required=False,
help='Name of the HSM. Can be omitted if --id is specified.')
c.extra('identifier', options_list=['--id'], validator=validate_vault_or_hsm, help='Full URI of the HSM.')
c.ignore('cls')
with self.argument_context('keyvault backup start', arg_group='Storage Id') as c:
c.argument('storage_resource_uri', required=False,
help='Azure Blob storage container Uri. If specified all other \'Storage Id\' arguments '
'should be omitted')
c.extra('storage_account_name', help='Name of Azure Storage Account.')
c.extra('blob_container_name', help='Name of Blob Container.')
for command_group in ['backup', 'restore']:
with self.argument_context('keyvault {} start'.format(command_group)) as c:
c.argument('token', options_list=['--storage-container-SAS-token', '-t'], required=True,
help='The SAS token pointing to an Azure Blob storage container')
with self.argument_context('keyvault restore start') as c:
c.argument('folder_to_restore', options_list=['--backup-folder'],
help='Name of the blob container which contains the backup')
with self.argument_context('keyvault restore start', arg_group='Storage Id') as c:
c.extra('storage_resource_uri', required=False,
help='Azure Blob storage container Uri. If specified all other \'Storage Id\' '
'arguments should be omitted')
c.extra('storage_account_name', help='Name of Azure Storage Account.')
c.extra('blob_container_name', help='Name of Blob Container.')
# endregion
# region KeyVault Storage Account
with self.argument_context('keyvault storage', arg_group='Id') as c:
c.argument('storage_account_name', options_list=['--name', '-n'],
help='Name to identify the storage account in the vault.', id_part='child_name_1',
completer=get_keyvault_name_completion_list('storage_account'))
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
for scope in ['keyvault storage add', 'keyvault storage update']:
with self.argument_context(scope) as c:
c.extra('disabled', arg_type=get_three_state_flag(), help='Add the storage account in a disabled state.',
validator=validate_storage_disabled_attribute(
'storage_account_attributes', StorageAccountAttributes))
c.ignore('storage_account_attributes')
c.argument('auto_regenerate_key', arg_type=get_three_state_flag(), required=False)
c.argument('regeneration_period', help='The key regeneration time duration specified in ISO-8601 format, '
'such as "P30D" for rotation every 30 days.')
for scope in ['backup', 'show', 'update', 'remove', 'regenerate-key']:
with self.argument_context('keyvault storage ' + scope, arg_group='Id') as c:
c.extra('identifier', options_list=['--id'],
help='Id of the storage account. If specified all other \'Id\' arguments should be omitted.',
validator=validate_storage_account_id)
c.argument('storage_account_name', required=False,
help='Name to identify the storage account in the vault. Required if --id is not specified.')
c.argument('vault_base_url', help='Name of the Key Vault. Required if --id is not specified.',
required=False)
with self.argument_context('keyvault storage backup') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local file path in which to store storage account backup.')
with self.argument_context('keyvault storage restore') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local key backup from which to restore storage account.')
with self.argument_context('keyvault storage sas-definition', arg_group='Id') as c:
c.argument('storage_account_name', options_list=['--account-name'],
help='Name to identify the storage account in the vault.', id_part='child_name_1',
completer=get_keyvault_name_completion_list('storage_account'))
c.argument('sas_definition_name', options_list=['--name', '-n'],
help='Name to identify the SAS definition in the vault.', id_part='child_name_2')
for scope in ['keyvault storage sas-definition create', 'keyvault storage sas-definition update']:
with self.argument_context(scope) as c:
c.extra('disabled', arg_type=get_three_state_flag(), help='Add the storage account in a disabled state.',
validator=validate_storage_disabled_attribute('sas_definition_attributes', SasDefinitionAttributes))
c.ignore('sas_definition_attributes')
c.argument('sas_type', arg_type=get_enum_type(SasTokenType))
c.argument('template_uri',
help='The SAS definition token template signed with the key 00000000. '
'In the case of an account token this is only the sas token itself, for service tokens, '
'the full service endpoint url along with the sas token. Tokens created according to the '
'SAS definition will have the same properties as the template.')
c.argument('validity_period',
help='The validity period of SAS tokens created according to the SAS definition in ISO-8601, '
'such as "PT12H" for 12 hour tokens.')
c.argument('auto_regenerate_key', arg_type=get_three_state_flag())
for scope in ['keyvault storage sas-definition delete', 'keyvault storage sas-definition show',
'keyvault storage sas-definition update']:
with self.argument_context(scope, arg_group='Id') as c:
c.extra('identifier', options_list=['--id'],
help='Id of the SAS definition. If specified all other \'Id\' arguments should be omitted.',
validator=validate_sas_definition_id)
c.argument('storage_account_name', required=False,
help='Name to identify the storage account in the vault. Required if --id is not specified.')
c.argument('sas_definition_name', required=False,
help='Name to identify the SAS definition in the vault. Required if --id is not specified.')
c.argument('vault_base_url', help='Name of the Key Vault. Required if --id is not specified.',
required=False)
# endregion
# KeyVault Certificate
with self.argument_context('keyvault certificate') as c:
c.argument('validity', type=int,
help='Number of months the certificate is valid for. Overrides the value specified with --policy/-p')
# TODO: Remove workaround when https://github.com/Azure/azure-rest-api-specs/issues/1153 is fixed
with self.argument_context('keyvault certificate create') as c:
c.attributes_argument('certificate', CertificateAttributes, True, ignore=['expires', 'not_before'])
with self.argument_context('keyvault certificate set-attributes') as c:
c.attributes_argument('certificate', CertificateAttributes, ignore=['expires', 'not_before'])
with self.argument_context('keyvault certificate backup') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local file path in which to store certificate backup.')
with self.argument_context('keyvault certificate restore') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local certificate backup from which to restore certificate.')
for item in ['create', 'set-attributes', 'import']:
with self.argument_context('keyvault certificate ' + item) as c:
c.argument('certificate_policy', options_list=['--policy', '-p'],
help='JSON encoded policy definition. Use @{file} to load from a file(e.g. @my_policy.json).',
type=get_json_object)
with self.argument_context('keyvault certificate import') as c:
c.argument('certificate_data', options_list=['--file', '-f'], completer=FilesCompleter(),
help='PKCS12 file or PEM file containing the certificate and private key.',
type=certificate_type)
c.argument('password', help="If the private key in certificate is encrypted, the password used for encryption.")
c.extra('disabled', arg_type=get_three_state_flag(), help='Import the certificate in disabled state.')
with self.argument_context('keyvault certificate download') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File to receive the binary certificate contents.')
c.argument('encoding', arg_type=get_enum_type(certificate_format_values), options_list=['--encoding', '-e'],
help='Encoding of the certificate. DER will create a binary DER formatted x509 certificate, '
'and PEM will create a base64 PEM x509 certificate.')
# TODO: Fix once service side issue is fixed that there is no way to list pending certificates
with self.argument_context('keyvault certificate pending') as c:
c.argument('certificate_name', options_list=['--name', '-n'], help='Name of the pending certificate.',
id_part='child_name_1', completer=None)
with self.argument_context('keyvault certificate pending merge') as c:
c.argument('x509_certificates', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File containing the certificate or certificate chain to merge.',
validator=validate_x509_certificate_chain)
c.attributes_argument('certificate', CertificateAttributes, True)
with self.argument_context('keyvault certificate pending cancel') as c:
c.ignore('cancellation_requested')
with self.argument_context('keyvault certificate contact') as c:
c.argument('contact_email', options_list=['--email'], help='Contact e-mail address. Must be unique.')
c.argument('contact_name', options_list=['--name'], help='Full contact name.')
c.argument('contact_phone', options_list=['--phone'], help='Contact phone number.')
with self.argument_context('keyvault certificate issuer admin') as c:
c.argument('email', help='Admin e-mail address. Must be unique within the vault.')
c.argument('name', help='Full admin name.')
c.argument('phone', help='Admin phone number.')
c.argument('first_name', help='Admin first name.')
c.argument('last_name', help='Admin last name.')
with self.argument_context('keyvault certificate issuer') as c:
c.argument('issuer_name', help='Certificate issuer name.')
c.argument('disabled', arg_type=get_three_state_flag(), help='Set issuer to disabled state.')
c.argument('enabled', arg_type=get_three_state_flag(), help='Set issuer enabled state.')
with self.argument_context('keyvault certificate issuer', arg_group='Issuer Credential') as c:
c.argument('account_id')
c.argument('password')
with self.argument_context('keyvault certificate issuer', arg_group='Organization Detail') as c:
c.argument('organization_id')
c.argument('admin_first_name')
c.argument('admin_last_name')
c.argument('admin_email')
c.argument('admin_phone')
for item in ['list', 'list-deleted', 'list-versions']:
with self.argument_context('keyvault certificate {}'.format(item)) as c:
c.argument('maxresults', options_list=['--maxresults'], type=int)
with self.argument_context('keyvault role') as c:
c.argument('scope',
help='scope at which the role assignment or definition applies to, '
'e.g., "/" or "/keys" or "/keys/{keyname}"')
with self.argument_context('keyvault role', arg_group='Id') as c:
c.argument('hsm_name', hsm_url_type)
c.argument('identifier', options_list=['--id'],
help='Full URI of the HSM. If specified all other \'Id\' arguments should be omitted.',
validator=process_hsm_name)
with self.argument_context('keyvault role assignment') as c:
c.argument('role_assignment_name', options_list=['--name', '-n'], help='Name of the role assignment.')
c.argument('assignee', help='represent a user, group, or service principal. '
'supported format: object id, user sign-in name, or service principal name')
c.argument('assignee_object_id',
help='Use this parameter instead of \'--assignee\' to bypass graph permission issues. '
'This parameter only works with object ids for users, groups, service principals, and '
'managed identities. For managed identities use the principal id. For service principals, '
'use the object id and not the app id.')
c.argument('ids', nargs='+', help='space-separated role assignment ids')
c.argument('role', help='role name or id')
with self.argument_context('keyvault role definition') as c:
c.argument('hsm_name', hsm_url_type)
c.argument('role_definition', help='Description of a role as JSON, or a path to a file containing a JSON description.')
c.argument('role_id', help='The role definition ID.')
c.argument('role_definition_name', options_list=['--name', '-n'], help='The role definition name. '
'This is a GUID in the "name" property of a role definition.')
with self.argument_context('keyvault role definition list') as c:
c.argument('custom_role_only', arg_type=get_three_state_flag(), help='Only show custom role definitions.')
class PrincipalType(str, Enum): # Copied from azure.mgmt.authorization v2018_09_01_preview
user = "User"
group = "Group"
service_principal = "ServicePrincipal"
unknown = "Unknown"
directory_role_template = "DirectoryRoleTemplate"
foreign_group = "ForeignGroup"
application = "Application"
msi = "MSI"
directory_object_or_group = "DirectoryObjectOrGroup"
everyone = "Everyone"
with self.argument_context('keyvault role assignment create') as c:
c.argument('assignee_principal_type', options_list=['--assignee-principal-type', '-t'],
arg_type=get_enum_type(PrincipalType), help='The principal type of assignee.')
# endregion
|
def load_arguments(self, _):
(JsonWebKeyOperation, JsonWebKeyType, SasTokenType,
SasDefinitionAttributes, SecretAttributes, CertificateAttributes, StorageAccountAttributes) = self.get_models(
'JsonWebKeyOperation', 'JsonWebKeyType', 'SasTokenType',
'SasDefinitionAttributes', 'SecretAttributes', 'CertificateAttributes', 'StorageAccountAttributes',
resource_type=ResourceType.DATA_KEYVAULT)
KeyCurveName = self.get_sdk('KeyCurveName', resource_type=ResourceType.DATA_KEYVAULT_KEYS, mod='_enums')
EncryptionAlgorithm = self.get_sdk('EncryptionAlgorithm', resource_type=ResourceType.DATA_KEYVAULT_KEYS, mod='crypto._enums')
class CLIJsonWebKeyOperation(str, Enum):
encrypt = "encrypt"
decrypt = "decrypt"
sign = "sign"
verify = "verify"
wrap_key = "wrapKey"
unwrap_key = "unwrapKey"
import_ = "import"
export = "export"
JsonWebKeyOperation = CLIJsonWebKeyOperation # TODO: Remove this patch when new SDK is released
class CLIJsonWebKeyType(str, Enum):
ec = "EC" #: Elliptic Curve.
ec_hsm = "EC-HSM" #: Elliptic Curve with a private key which is not exportable from the HSM.
rsa = "RSA" #: RSA (https://tools.ietf.org/html/rfc3447)
rsa_hsm = "RSA-HSM" #: RSA with a private key which is not exportable from the HSM.
oct = "oct" #: Octet sequence (used to represent symmetric keys)
oct_hsm = "oct-HSM" #: Oct with a private key which is not exportable from the HSM.
JsonWebKeyType = CLIJsonWebKeyType # TODO: Remove this patch when new SDK is released
class CLIKeyTypeForBYOKImport(str, Enum):
ec = "EC" #: Elliptic Curve.
rsa = "RSA" #: RSA (https://tools.ietf.org/html/rfc3447)
oct = "oct" #: Octet sequence (used to represent symmetric keys)
class CLISecurityDomainOperation(str, Enum):
download = "download" #: Download operation
upload = "upload" #: Upload operation
(KeyPermissions, SecretPermissions, CertificatePermissions, StoragePermissions,
NetworkRuleBypassOptions, NetworkRuleAction, PublicNetworkAccess) = self.get_models(
'KeyPermissions', 'SecretPermissions', 'CertificatePermissions', 'StoragePermissions',
'NetworkRuleBypassOptions', 'NetworkRuleAction', 'PublicNetworkAccess',
resource_type=ResourceType.MGMT_KEYVAULT)
# ARGUMENT DEFINITIONS
vault_name_type = CLIArgumentType(
help='Name of the Vault.', options_list=['--vault-name'], metavar='NAME', id_part=None,
completer=get_resource_name_completion_list('Microsoft.KeyVault/vaults'))
deleted_vault_name_type = CLIArgumentType(
help='Name of the deleted Vault.', options_list=['--vault-name'], metavar='NAME', id_part=None)
hsm_name_type = CLIArgumentType(help='Name of the HSM.',
options_list=['--hsm-name'], id_part=None)
hsm_url_type = CLIArgumentType(help='Name of the HSM.', type=get_hsm_base_url_type(self.cli_ctx),
options_list=['--hsm-name'], id_part=None)
mgmt_plane_hsm_name_type = CLIArgumentType(help='Name of the HSM. (--hsm-name and --name/-n are mutually '
'exclusive, please specify just one of them)',
options_list=['--hsm-name'], id_part=None,
validator=validate_vault_name_and_hsm_name)
data_plane_hsm_name_type = CLIArgumentType(help='Name of the HSM. (--hsm-name and --vault-name are '
'mutually exclusive, please specify just one of them)',
type=get_hsm_base_url_type(self.cli_ctx),
options_list=['--hsm-name'], id_part=None,
validator=set_vault_base_url)
deleted_hsm_name_type = CLIArgumentType(help='Name of the deleted HSM. (--hsm-name and --name/-n are '
'mutually exclusive, please specify just one of them)',
options_list=['--hsm-name'], id_part=None,
validator=validate_vault_name_and_hsm_name)
# region vault (management)
with self.argument_context('keyvault') as c:
c.argument('resource_group_name', resource_group_name_type, id_part=None, required=False,
help='Proceed only if Key Vault belongs to the specified resource group.',
validator=validate_resource_group_name)
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'])
c.argument('object_id', help='a GUID that identifies the principal that will receive permissions')
c.argument('spn', help='name of a service principal that will receive permissions')
c.argument('upn', help='name of a user principal that will receive permissions')
c.argument('tags', tags_type)
c.argument('enabled_for_deployment', arg_type=get_three_state_flag(),
help='[Vault Only] Property to specify whether Azure Virtual Machines are permitted to retrieve '
'certificates stored as secrets from the key vault.')
c.argument('enabled_for_disk_encryption', arg_type=get_three_state_flag(),
help='[Vault Only] Property to specify whether Azure Disk Encryption is permitted to retrieve '
'secrets from the vault and unwrap keys.')
c.argument('enabled_for_template_deployment', arg_type=get_three_state_flag(),
help='[Vault Only] Property to specify whether Azure Resource Manager is permitted to retrieve '
'secrets from the key vault.')
c.argument('enable_rbac_authorization', arg_type=get_three_state_flag())
c.argument('enable_soft_delete', arg_type=get_three_state_flag(), deprecate_info=c.deprecate(
message_func=lambda x: 'Warning! The ability to create new key vaults with soft delete disabled will be '
'deprecated by December 2020. All key vaults will be required to have soft delete '
'enabled. Please see the following documentation for additional guidance.\n'
'https://docs.microsoft.com/azure/key-vault/general/soft-delete-change'),
help='[Vault Only] Property to specify whether the \'soft delete\' functionality is enabled for '
'this key vault. If it\'s not set to any value (true or false) when creating new key vault, it '
'will be set to true by default. Once set to true, it cannot be reverted to false.')
c.argument('enable_purge_protection', arg_type=get_three_state_flag())
c.argument('public_network_access', arg_type=get_enum_type(PublicNetworkAccess),
help="Property to specify whether the vault will accept traffic from public internet. If set to "
"'disabled' all traffic except private endpoint traffic and that originates from trusted "
"services will be blocked. This will override the set firewall rules, meaning that even if the "
"firewall rules are present we will not honor the rules.")
with self.argument_context('keyvault', arg_group='Network Rule', min_api='2018-02-14') as c:
c.argument('bypass', arg_type=get_enum_type(NetworkRuleBypassOptions),
help='Bypass traffic for space-separated uses.')
c.argument('default_action', arg_type=get_enum_type(NetworkRuleAction),
help='Default action to apply when no rule matches.')
for item in ['show', 'delete', 'create']:
with self.argument_context('keyvault {}'.format(item)) as c:
c.argument('hsm_name', mgmt_plane_hsm_name_type)
with self.argument_context('keyvault create') as c:
c.argument('resource_group_name', resource_group_name_type, required=True, completer=None, validator=None)
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'])
c.argument('administrators', nargs='+',
help='[HSM Only] Administrator role for data plane operations for Managed HSM. '
'It accepts a space separated list of OIDs that will be assigned.')
c.argument('sku', help='Required. SKU details. Allowed values for Vault: premium, standard. Default: standard.'
' Allowed values for HSM: Standard_B1, Custom_B32. Default: Standard_B1')
c.argument('no_self_perms', arg_type=get_three_state_flag(),
help='[Vault Only] Don\'t add permissions for the current user/service principal in the new vault.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('retention_days', help='Soft delete data retention days. It accepts >=7 and <=90.', default='90')
with self.argument_context('keyvault create', arg_group='Network Rule') as c:
c.argument('network_acls', type=validate_file_or_dict,
help='Network ACLs. It accepts a JSON filename or a JSON string. JSON format: '
'`{\\"ip\\":[<ip1>, <ip2>...],\\"vnet\\":[<vnet_name_1>/<subnet_name_1>,<subnet_id2>...]}`')
c.argument('network_acls_ips', nargs='*', help='Network ACLs IP rules. Space-separated list of IP addresses.')
c.argument('network_acls_vnets', nargs='*', help='Network ACLS VNet rules. Space-separated list of '
'Vnet/subnet pairs or subnet resource ids.')
with self.argument_context('keyvault update') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'])
c.argument('retention_days', help='Soft delete data retention days. It accepts >=7 and <=90.')
with self.argument_context('keyvault update-hsm') as c:
c.argument('name', hsm_name_type)
c.argument('enable_purge_protection', options_list=['--enable-purge-protection', '-e'])
c.argument('secondary_locations', nargs='+',
help='--secondary-locations extends/contracts an HSM pool to listed regions. The primary location '
'where the resource was originally created CANNOT be removed.')
with self.argument_context('keyvault wait-hsm') as c:
c.argument('hsm_name', hsm_name_type)
c.argument('resource_group_name', options_list=['--resource-group', '-g'],
help='Proceed only if HSM belongs to the specified resource group.')
with self.argument_context('keyvault recover') as c:
c.argument('vault_name', deleted_vault_name_type, options_list=['--name', '-n'],
validator=validate_deleted_vault_or_hsm_name)
c.argument('hsm_name', deleted_hsm_name_type)
c.argument('resource_group_name', resource_group_name_type, id_part=None, required=False,
help='Resource group of the deleted Vault or HSM')
c.argument('location', help='Location of the deleted Vault or HSM', required=False)
with self.argument_context('keyvault purge') as c:
c.argument('vault_name', deleted_vault_name_type, options_list=['--name', '-n'],
validator=validate_deleted_vault_or_hsm_name)
c.argument('hsm_name', deleted_hsm_name_type)
c.argument('location', help='Location of the deleted Vault or HSM', required=False)
with self.argument_context('keyvault list') as c:
c.argument('resource_group_name', resource_group_name_type, validator=None)
c.argument('resource_type', help='When --resource-type is not present the command will list all Vaults and HSMs.'
' Possible values for --resource-type are vault and hsm.')
with self.argument_context('keyvault list-deleted') as c:
c.argument('resource_type', help='When --resource-type is not present the command will list all deleted Vaults '
'and HSMs. Possible values for --resource-type are vault and hsm.')
with self.argument_context('keyvault show-deleted') as c:
c.argument('vault_name', deleted_vault_name_type, options_list=['--name', '-n'],
validator=validate_deleted_vault_or_hsm_name)
c.argument('hsm_name', deleted_hsm_name_type)
c.argument('location', help='Location of the deleted Vault or HSM', required=False)
for item in ['set-policy', 'delete-policy']:
with self.argument_context('keyvault {}'.format(item)) as c:
c.argument('object_id', validator=validate_principal)
c.argument('application_id', help='Application ID of the client making request on behalf of a principal. '
'Exposed for compound identity using on-behalf-of authentication flow.')
with self.argument_context('keyvault set-policy', arg_group='Permission') as c:
c.argument('key_permissions', arg_type=get_enum_type(KeyPermissions), metavar='PERM', nargs='*',
help='Space-separated list of key permissions to assign.', validator=validate_policy_permissions)
c.argument('secret_permissions', arg_type=get_enum_type(SecretPermissions), metavar='PERM', nargs='*',
help='Space-separated list of secret permissions to assign.')
c.argument('certificate_permissions', arg_type=get_enum_type(CertificatePermissions), metavar='PERM', nargs='*',
help='Space-separated list of certificate permissions to assign.')
c.argument('storage_permissions', arg_type=get_enum_type(StoragePermissions), metavar='PERM', nargs='*',
help='Space-separated list of storage permissions to assign.')
with self.argument_context('keyvault network-rule', min_api='2018-02-14') as c:
c.argument('ip_address', help='IPv4 address or CIDR range.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
with self.argument_context('keyvault network-rule add', min_api='2018-02-14') as c:
c.argument('ip_address', nargs='*', help='IPv4 address or CIDR range. Can supply a list: --ip-address ip1 [ip2]...', validator=validate_ip_address)
for item in ['approve', 'reject', 'delete', 'show', 'wait']:
with self.argument_context('keyvault private-endpoint-connection {}'.format(item), min_api='2018-02-14') as c:
c.extra('connection_id', options_list=['--id'], required=False,
help='The ID of the private endpoint connection associated with the Key Vault/HSM. '
'If specified --vault-name/--hsm-name and --name/-n, this should be omitted.')
c.argument('description', help='Comments for the {} operation.'.format(item))
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False,
help='The name of the private endpoint connection associated with the Key Vault/HSM. '
'Required if --id is not specified')
c.argument('vault_name', vault_name_type, required=False,
help='Name of the Key Vault. Required if --id is not specified')
c.argument('hsm_name', mgmt_plane_hsm_name_type, min_api='2021-04-01-preview',
help='Name of the HSM. Required if --id is not specified.'
'(--hsm-name and --vault-name are mutually exclusive, please specify just one of them)')
with self.argument_context('keyvault private-endpoint-connection list') as c:
c.argument("hsm_name", hsm_name_type)
with self.argument_context('keyvault private-link-resource', min_api='2018-02-14', max_api='2020-04-01-preview') as c:
c.argument('vault_name', vault_name_type, required=True)
with self.argument_context('keyvault private-link-resource', min_api='2021-04-01-preview') as c:
c.argument('vault_name', vault_name_type)
c.argument('hsm_name', mgmt_plane_hsm_name_type)
# endregion
# region Shared
for item in ['secret', 'certificate']:
with self.argument_context('keyvault ' + item, arg_group='Id') as c:
c.argument(item + '_name', options_list=['--name', '-n'], help='Name of the {}.'.format(item),
id_part='child_name_1', completer=get_keyvault_name_completion_list(item))
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
c.argument(item + '_version', options_list=['--version', '-v'],
help='The {} version. If omitted, uses the latest version.'.format(item), default='',
required=False, completer=get_keyvault_version_completion_list(item))
for cmd in ['backup', 'decrypt', 'delete', 'download', 'encrypt', 'list-versions', 'set-attributes', 'show',
'list']:
with self.argument_context('keyvault {} {}'.format(item, cmd), arg_group='Id') as c:
try:
if cmd in ['list']:
c.extra('identifier', options_list=['--id'],
help='Full URI of the Vault or HSM. '
'If specified all other \'Id\' arguments should be omitted.',
validator=validate_vault_or_hsm)
else:
c.extra('identifier', options_list=['--id'],
help='Id of the {}. '
'If specified all other \'Id\' arguments should be omitted.'.format(item),
validator=validate_key_id(item))
except ValueError:
pass
c.argument(item + '_name', help='Name of the {}. Required if --id is not specified.'.format(item),
required=False)
c.argument('vault_base_url', vault_name_type, required=False,
help='Name of the Key Vault. Required if --id is not specified.')
c.argument(item + '_version', required=False)
for cmd in ['purge', 'recover', 'show-deleted']:
with self.argument_context('keyvault {} {}'.format(item, cmd), arg_group='Id') as c:
c.extra('identifier', options_list=['--id'],
help='The recovery id of the {}. '
'If specified all other \'Id\' arguments should be omitted.'.format(item),
validator=validate_key_id('deleted' + item))
c.argument(item + '_name', help='Name of the {}. Required if --id is not specified.'.format(item),
required=False)
c.argument('vault_base_url', help='Name of the Vault. Required if --id is not specified.',
required=False)
c.argument(item + '_version', required=False)
for cmd in ['list', 'list-deleted']:
with self.argument_context('keyvault {} {}'.format(item, cmd)) as c:
c.argument('include_pending', arg_type=get_three_state_flag())
with self.argument_context('keyvault {} {}'.format(item, cmd), arg_group='Id') as c:
if cmd in ['list-deleted']:
c.extra('identifier', options_list=['--id'],
help='Full URI of the Vault{}. '
'If specified all other \'Id\' arguments should be '
'omitted.'.format(' or HSM' if item == 'key' else ''),
validator=validate_vault_or_hsm)
# endregion
# region keys
# keys track1
with self.argument_context('keyvault key') as c:
c.argument('key_ops', arg_type=get_enum_type(JsonWebKeyOperation), options_list=['--ops'], nargs='*',
help='Space-separated list of permitted JSON web key operations.')
for item in ['delete', 'list', 'list-deleted', 'list-versions', 'purge', 'recover', 'show-deleted']:
with self.argument_context('keyvault key {}'.format(item), arg_group='Id') as c:
c.ignore('cls')
c.argument('key_name', options_list=['--name', '-n'], required=False, id_part='child_name_1',
completer=get_keyvault_name_completion_list('key'),
help='Name of the key. Required if --id is not specified.')
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx),
id_part=None, required=False)
c.argument('key_version', options_list=['--version', '-v'],
help='The key version. If omitted, uses the latest version.', default='',
required=False, completer=get_keyvault_version_completion_list('key'))
c.extra('identifier', options_list=['--id'], validator=validate_key_id('key'),
help='Id of the key. If specified all other \'Id\' arguments should be omitted.')
c.extra('hsm_name', data_plane_hsm_name_type)
if item in ['list', 'list-deleted']:
c.extra('identifier', options_list=['--id'], validator=validate_vault_or_hsm,
help='Full URI of the Vault or HSM. If specified all other \'Id\' arguments should be omitted.')
elif item in ['show-deleted', 'purge', 'recover']:
c.extra('identifier', options_list=['--id'], validator=validate_key_id('deletedkey'),
help='The recovery id of the key. If specified all other \'Id\' arguments should be omitted.')
for item in ['backup', 'download']:
with self.argument_context('keyvault key {}'.format(item), arg_group='Id') as c:
c.argument('key_name', options_list=['--name', '-n'],
help='Name of the key. Required if --id is not specified.',
required=False, id_part='child_name_1', completer=get_keyvault_name_completion_list('key'))
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
c.argument('key_version', options_list=['--version', '-v'],
help='The key version. If omitted, uses the latest version.', default='',
required=False, completer=get_keyvault_version_completion_list('key'))
c.argument('identifier', options_list=['--id'], validator=validate_key_id('key'),
help='Id of the key. If specified all other \'Id\' arguments should be omitted.')
c.argument('hsm_name', data_plane_hsm_name_type)
with self.argument_context('keyvault key backup') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local file path in which to store key backup.')
with self.argument_context('keyvault key download') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File to receive the key contents.')
c.argument('encoding', arg_type=get_enum_type(key_format_values), options_list=['--encoding', '-e'],
help='Encoding of the key, default: PEM', default='PEM')
with self.argument_context('keyvault key restore', arg_group='Id') as c:
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
c.argument('identifier', options_list=['--id'], validator=validate_vault_or_hsm,
help='Full URI of the Vault or HSM. If specified all other \'Id\' arguments should be omitted.')
c.argument('hsm_name', data_plane_hsm_name_type, validator=None)
with self.argument_context('keyvault key restore') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local key backup from which to restore key.')
with self.argument_context('keyvault key restore', arg_group='Storage Id') as c:
c.argument('storage_resource_uri', options_list=['--storage-resource-uri', '-u'],
help='Azure Blob storage container Uri. If specified, all '
'other \'Storage Id\' arguments should be omitted')
c.argument('storage_account_name', help='Name of Azure Storage Account.')
c.argument('blob_container_name', help='Name of Blob Container.')
with self.argument_context('keyvault key restore', arg_group='Restoring keys from storage account') as c:
c.argument('token', options_list=['--storage-container-SAS-token', '-t'],
help='The SAS token pointing to an Azure Blob storage container')
c.argument('backup_folder', help='Name of the blob container which contains the backup')
c.argument('key_name', options_list=['--name', '-n'],
help='Name of the key. (Only for restoring from storage account)')
for scope in ['list', 'list-deleted', 'list-versions']:
with self.argument_context('keyvault key {}'.format(scope)) as c:
c.argument('maxresults', options_list=['--maxresults'], type=int)
with self.argument_context('keyvault key list') as c:
c.extra('include_managed', arg_type=get_three_state_flag(), default=False,
help='Include managed keys. Default: false')
# keys track2
for scope in ['create', 'import', 'set-attributes', 'show', 'encrypt', 'decrypt',
'rotate', 'rotation-policy show', 'rotation-policy update']:
with self.argument_context('keyvault key {}'.format(scope), arg_group='Id') as c:
c.argument('name', options_list=['--name', '-n'], id_part='child_name_1',
required=False, completer=get_keyvault_name_completion_list('key'),
help='Name of the key. Required if --id is not specified.')
c.argument('version', options_list=['--version', '-v'],
help='The key version. If omitted, uses the latest version.', default='',
required=False, completer=get_keyvault_version_completion_list('key'))
c.extra('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
c.extra('hsm_name', data_plane_hsm_name_type, required=False)
c.extra('identifier', options_list=['--id'],
help='Id of the key. If specified all other \'Id\' arguments should be omitted.',
validator=validate_keyvault_resource_id('key'))
for item in ['create', 'import']:
with self.argument_context('keyvault key {}'.format(item)) as c:
c.argument('protection', arg_type=get_enum_type(['software', 'hsm']), options_list=['--protection', '-p'],
help='Specifies the type of key protection.')
c.argument('disabled', arg_type=get_three_state_flag(), help='Create key in disabled state.')
c.argument('key_size', options_list=['--size'], type=int,
help='The key size in bits. For example: 2048, 3072, or 4096 for RSA. 128, 192, or 256 for oct.')
c.argument('expires', default=None, help='Expiration UTC datetime (Y-m-d\'T\'H:M:S\'Z\').',
type=datetime_type)
c.argument('not_before', default=None, type=datetime_type,
help='Key not usable before the provided UTC datetime (Y-m-d\'T\'H:M:S\'Z\').')
c.argument('exportable', arg_type=get_three_state_flag(), is_preview=True,
help='Whether the private key can be exported. To create key with release policy, '
'"exportable" must be true and caller must have "export" permission.')
c.argument('release_policy', options_list=['--policy'], type=file_type, completer=FilesCompleter(),
validator=process_key_release_policy, is_preview=True,
help='The policy rules under which the key can be exported. '
'Policy definition as JSON, or a path to a file containing JSON policy definition.')
c.extra('immutable', arg_type=get_three_state_flag(), is_preview=True,
help='Mark a release policy as immutable. '
'An immutable release policy cannot be changed or updated after being marked immutable. '
'Release policies are mutable by default.')
with self.argument_context('keyvault key create') as c:
c.argument('kty', arg_type=get_enum_type(JsonWebKeyType), validator=validate_key_type,
help='The type of key to create. For valid values, see: https://docs.microsoft.com/rest/api/keyvault/keys/create-key/create-key#jsonwebkeytype')
c.argument('curve', arg_type=get_enum_type(KeyCurveName),
help='Elliptic curve name. For valid values, see: https://docs.microsoft.com/en-us/rest/api/keyvault/keys/create-key/create-key#jsonwebkeycurvename')
with self.argument_context('keyvault key import') as c:
c.argument('kty', arg_type=get_enum_type(CLIKeyTypeForBYOKImport), validator=validate_key_import_type,
help='The type of key to import (only for BYOK).')
c.argument('curve', arg_type=get_enum_type(KeyCurveName), validator=validate_key_import_type,
help='The curve name of the key to import (only for BYOK).')
with self.argument_context('keyvault key import', arg_group='Key Source') as c:
c.argument('pem_file', type=file_type, help='PEM file containing the key to be imported.', completer=FilesCompleter(), validator=validate_key_import_source)
c.argument('pem_string', type=file_type, help='PEM string containing the key to be imported.', validator=validate_key_import_source)
c.argument('pem_password', help='Password of PEM file.')
c.argument('byok_file', type=file_type, help='BYOK file containing the key to be imported. Must not be password protected.', completer=FilesCompleter(), validator=validate_key_import_source)
c.argument('byok_string', type=file_type, help='BYOK string containing the key to be imported. Must not be password protected.', validator=validate_key_import_source)
for scope in ['encrypt', 'decrypt']:
with self.argument_context('keyvault key {}'.format(scope)) as c:
c.argument('algorithm', options_list=['--algorithm', '-a'], arg_type=get_enum_type(EncryptionAlgorithm),
help='Algorithm identifier')
with self.argument_context('keyvault key encrypt') as c:
c.argument('value', help='The value to be encrypted. Default data type is Base64 encoded string.',
validator=validate_encryption)
c.extra('data_type', help='The type of the original data.', arg_type=get_enum_type(KeyEncryptionDataType),
default='base64')
c.argument('iv', help='Initialization vector. Required for only AES-CBC(PAD) encryption.')
c.argument('aad', help='Optional data that is authenticated but not encrypted. For use with AES-GCM encryption.')
with self.argument_context('keyvault key decrypt') as c:
c.argument('value', help='The value to be decrypted, which should be the result of "az keyvault encrypt"',
validator=validate_decryption)
c.extra('data_type', help='The type of the original data.', arg_type=get_enum_type(KeyEncryptionDataType),
default='base64')
c.argument('iv', help='The initialization vector used during encryption. Required for AES decryption.')
c.argument('aad', help='Optional data that is authenticated but not encrypted. For use with AES-GCM decryption.')
c.argument('tag', help='The authentication tag generated during encryption. Required for only AES-GCM decryption.')
with self.argument_context('keyvault key random') as c:
c.extra('hsm_name', hsm_url_type, arg_group='Id', required=False)
c.extra('identifier', options_list=['--id'], arg_group='Id',
help='Full URI of the HSM.', validator=validate_vault_or_hsm)
c.argument('count', type=int, help='The requested number of random bytes.')
with self.argument_context('keyvault key set-attributes') as c:
c.extra('enabled', help='Enable the key.', arg_type=get_three_state_flag())
c.extra('expires_on', options_list=['--expires'], default=None, type=datetime_type,
help='Expiration UTC datetime (Y-m-d\'T\'H:M:S\'Z\').')
c.extra('not_before', default=None, type=datetime_type,
help='Key not usable before the provided UTC datetime (Y-m-d\'T\'H:M:S\'Z\').')
c.extra('key_operations', arg_type=get_enum_type(JsonWebKeyOperation), options_list=['--ops'], nargs='*',
help='Space-separated list of permitted JSON web key operations.')
c.extra('release_policy', options_list=['--policy'], type=file_type, completer=FilesCompleter(),
validator=process_key_release_policy, is_preview=True,
help='The policy rules under which the key can be exported. '
'Policy definition as JSON, or a path to a file containing JSON policy definition.')
c.extra('immutable', arg_type=get_three_state_flag(), is_preview=True,
help='Mark a release policy as immutable. '
'An immutable release policy cannot be changed or updated after being marked immutable. '
'Release policies are mutable by default.')
c.extra('tags', tags_type)
with self.argument_context('keyvault key rotation-policy update') as c:
c.argument('value', type=file_type, completer=FilesCompleter(),
help='The rotation policy file definition as JSON, or a path to a file containing JSON policy definition.')
# endregion
# region KeyVault Secret
with self.argument_context('keyvault secret set') as c:
c.argument('content_type', options_list=['--description'],
help='Description of the secret contents (e.g. password, connection string, etc)')
c.attributes_argument('secret', SecretAttributes, create=True)
with self.argument_context('keyvault secret set', arg_group='Content Source') as c:
c.argument('value', options_list=['--value'],
help="Plain text secret value. Cannot be used with '--file' or '--encoding'", required=False)
c.extra('file_path', options_list=['--file', '-f'], type=file_type,
help="Source file for secret. Use in conjunction with '--encoding'", completer=FilesCompleter())
c.extra('encoding', arg_type=get_enum_type(secret_encoding_values, default='utf-8'),
options_list=['--encoding', '-e'],
help='Source file encoding. The value is saved as a tag (`file-encoding=<val>`) '
'and used during download to automatically encode the resulting file.')
with self.argument_context('keyvault secret set-attributes') as c:
c.attributes_argument('secret', SecretAttributes)
with self.argument_context('keyvault secret download') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File to receive the secret contents.')
c.argument('encoding', arg_type=get_enum_type(secret_encoding_values), options_list=['--encoding', '-e'],
help="Encoding of the secret. By default, will look for the 'file-encoding' tag on the secret. "
"Otherwise will assume 'utf-8'.", default=None)
for scope in ['backup', 'restore']:
with self.argument_context('keyvault secret {}'.format(scope)) as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File to receive the secret contents.')
for scope in ['list', 'list-deleted', 'list-versions']:
with self.argument_context('keyvault secret {}'.format(scope)) as c:
c.argument('maxresults', options_list=['--maxresults'], type=int)
with self.argument_context('keyvault secret list') as c:
c.extra('include_managed', arg_type=get_three_state_flag(), default=False,
help='Include managed secrets. Default: false')
# endregion
# region keyvault security-domain
for scope in ['init-recovery', 'download', 'upload']:
with self.argument_context('keyvault security-domain {}'.format(scope), arg_group='HSM Id') as c:
c.argument('hsm_name', hsm_url_type, required=False,
help='Name of the HSM. Can be omitted if --id is specified.')
c.extra('identifier', options_list=['--id'], validator=validate_vault_or_hsm, help='Full URI of the HSM.')
c.ignore('vault_base_url')
with self.argument_context('keyvault security-domain init-recovery') as c:
c.argument('sd_exchange_key', help='Local file path to store the exported key.')
with self.argument_context('keyvault security-domain upload') as c:
c.argument('sd_file', help='This file contains security domain encrypted using SD Exchange file downloaded '
'in security-domain init-recovery command.')
c.argument('sd_exchange_key', help='The exchange key for security domain.')
c.argument('sd_wrapping_keys', nargs='*',
help='Space-separated file paths to PEM files containing private keys.')
c.argument('passwords', nargs='*', help='Space-separated password list for --sd-wrapping-keys. '
'CLI will match them in order. Can be omitted if your keys are without '
'password protection.')
with self.argument_context('keyvault security-domain download') as c:
c.argument('sd_wrapping_keys', nargs='*',
help='Space-separated file paths to PEM files containing public keys.')
c.argument('security_domain_file',
help='Path to a file where the JSON blob returned by this command is stored.')
c.argument('sd_quorum', type=int, help='The minimum number of shares required to decrypt the security domain '
'for recovery.')
with self.argument_context('keyvault security-domain wait') as c:
c.argument('hsm_name', hsm_url_type, help='Name of the HSM. Can be omitted if --id is specified.',
required=False)
c.argument('identifier', options_list=['--id'], validator=validate_vault_or_hsm, help='Full URI of the HSM.')
c.argument('resource_group_name', options_list=['--resource-group', '-g'],
help='Proceed only if HSM belongs to the specified resource group.')
c.argument('target_operation', arg_type=get_enum_type(CLISecurityDomainOperation),
help='Target operation that needs waiting.')
c.ignore('vault_base_url')
# endregion
# region keyvault backup/restore
for item in ['backup', 'restore']:
for scope in ['start']: # TODO add 'status' when SDK is ready
with self.argument_context('keyvault {} {}'.format(item, scope), arg_group='HSM Id') as c:
c.argument('hsm_name', hsm_url_type, required=False,
help='Name of the HSM. Can be omitted if --id is specified.')
c.extra('identifier', options_list=['--id'], validator=validate_vault_or_hsm, help='Full URI of the HSM.')
c.ignore('cls')
with self.argument_context('keyvault backup start', arg_group='Storage Id') as c:
c.argument('storage_resource_uri', required=False,
help='Azure Blob storage container Uri. If specified all other \'Storage Id\' arguments '
'should be omitted')
c.extra('storage_account_name', help='Name of Azure Storage Account.')
c.extra('blob_container_name', help='Name of Blob Container.')
for command_group in ['backup', 'restore']:
with self.argument_context('keyvault {} start'.format(command_group)) as c:
c.argument('token', options_list=['--storage-container-SAS-token', '-t'], required=True,
help='The SAS token pointing to an Azure Blob storage container')
with self.argument_context('keyvault restore start') as c:
c.argument('folder_to_restore', options_list=['--backup-folder'],
help='Name of the blob container which contains the backup')
with self.argument_context('keyvault restore start', arg_group='Storage Id') as c:
c.extra('storage_resource_uri', required=False,
help='Azure Blob storage container Uri. If specified all other \'Storage Id\' '
'arguments should be omitted')
c.extra('storage_account_name', help='Name of Azure Storage Account.')
c.extra('blob_container_name', help='Name of Blob Container.')
# endregion
# region KeyVault Storage Account
with self.argument_context('keyvault storage', arg_group='Id') as c:
c.argument('storage_account_name', options_list=['--name', '-n'],
help='Name to identify the storage account in the vault.', id_part='child_name_1',
completer=get_keyvault_name_completion_list('storage_account'))
c.argument('vault_base_url', vault_name_type, type=get_vault_base_url_type(self.cli_ctx), id_part=None)
for scope in ['keyvault storage add', 'keyvault storage update']:
with self.argument_context(scope) as c:
c.extra('disabled', arg_type=get_three_state_flag(), help='Add the storage account in a disabled state.',
validator=validate_storage_disabled_attribute(
'storage_account_attributes', StorageAccountAttributes))
c.ignore('storage_account_attributes')
c.argument('auto_regenerate_key', arg_type=get_three_state_flag(), required=False)
c.argument('regeneration_period', help='The key regeneration time duration specified in ISO-8601 format, '
'such as "P30D" for rotation every 30 days.')
for scope in ['backup', 'show', 'update', 'remove', 'regenerate-key']:
with self.argument_context('keyvault storage ' + scope, arg_group='Id') as c:
c.extra('identifier', options_list=['--id'],
help='Id of the storage account. If specified all other \'Id\' arguments should be omitted.',
validator=validate_storage_account_id)
c.argument('storage_account_name', required=False,
help='Name to identify the storage account in the vault. Required if --id is not specified.')
c.argument('vault_base_url', help='Name of the Key Vault. Required if --id is not specified.',
required=False)
with self.argument_context('keyvault storage backup') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local file path in which to store storage account backup.')
with self.argument_context('keyvault storage restore') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local key backup from which to restore storage account.')
with self.argument_context('keyvault storage sas-definition', arg_group='Id') as c:
c.argument('storage_account_name', options_list=['--account-name'],
help='Name to identify the storage account in the vault.', id_part='child_name_1',
completer=get_keyvault_name_completion_list('storage_account'))
c.argument('sas_definition_name', options_list=['--name', '-n'],
help='Name to identify the SAS definition in the vault.', id_part='child_name_2')
for scope in ['keyvault storage sas-definition create', 'keyvault storage sas-definition update']:
with self.argument_context(scope) as c:
c.extra('disabled', arg_type=get_three_state_flag(), help='Add the storage account in a disabled state.',
validator=validate_storage_disabled_attribute('sas_definition_attributes', SasDefinitionAttributes))
c.ignore('sas_definition_attributes')
c.argument('sas_type', arg_type=get_enum_type(SasTokenType))
c.argument('template_uri',
help='The SAS definition token template signed with the key 00000000. '
'In the case of an account token this is only the sas token itself, for service tokens, '
'the full service endpoint url along with the sas token. Tokens created according to the '
'SAS definition will have the same properties as the template.')
c.argument('validity_period',
help='The validity period of SAS tokens created according to the SAS definition in ISO-8601, '
'such as "PT12H" for 12 hour tokens.')
c.argument('auto_regenerate_key', arg_type=get_three_state_flag())
for scope in ['keyvault storage sas-definition delete', 'keyvault storage sas-definition show',
'keyvault storage sas-definition update']:
with self.argument_context(scope, arg_group='Id') as c:
c.extra('identifier', options_list=['--id'],
help='Id of the SAS definition. If specified all other \'Id\' arguments should be omitted.',
validator=validate_sas_definition_id)
c.argument('storage_account_name', required=False,
help='Name to identify the storage account in the vault. Required if --id is not specified.')
c.argument('sas_definition_name', required=False,
help='Name to identify the SAS definition in the vault. Required if --id is not specified.')
c.argument('vault_base_url', help='Name of the Key Vault. Required if --id is not specified.',
required=False)
# endregion
# KeyVault Certificate
with self.argument_context('keyvault certificate') as c:
c.argument('validity', type=int,
help='Number of months the certificate is valid for. Overrides the value specified with --policy/-p')
# TODO: Remove workaround when https://github.com/Azure/azure-rest-api-specs/issues/1153 is fixed
with self.argument_context('keyvault certificate create') as c:
c.attributes_argument('certificate', CertificateAttributes, True, ignore=['expires', 'not_before'])
with self.argument_context('keyvault certificate set-attributes') as c:
c.attributes_argument('certificate', CertificateAttributes, ignore=['expires', 'not_before'])
with self.argument_context('keyvault certificate backup') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local file path in which to store certificate backup.')
with self.argument_context('keyvault certificate restore') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='Local certificate backup from which to restore certificate.')
for item in ['create', 'set-attributes', 'import']:
with self.argument_context('keyvault certificate ' + item) as c:
c.argument('certificate_policy', options_list=['--policy', '-p'],
help='JSON encoded policy definition. Use @{file} to load from a file(e.g. @my_policy.json).',
type=get_json_object)
with self.argument_context('keyvault certificate import') as c:
c.argument('certificate_data', options_list=['--file', '-f'], completer=FilesCompleter(),
help='PKCS12 file or PEM file containing the certificate and private key.',
type=certificate_type)
c.argument('password', help="If the private key in certificate is encrypted, the password used for encryption.")
c.extra('disabled', arg_type=get_three_state_flag(), help='Import the certificate in disabled state.')
with self.argument_context('keyvault certificate download') as c:
c.argument('file_path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File to receive the binary certificate contents.')
c.argument('encoding', arg_type=get_enum_type(certificate_format_values), options_list=['--encoding', '-e'],
help='Encoding of the certificate. DER will create a binary DER formatted x509 certificate, '
'and PEM will create a base64 PEM x509 certificate.')
# TODO: Fix once service side issue is fixed that there is no way to list pending certificates
with self.argument_context('keyvault certificate pending') as c:
c.argument('certificate_name', options_list=['--name', '-n'], help='Name of the pending certificate.',
id_part='child_name_1', completer=None)
with self.argument_context('keyvault certificate pending merge') as c:
c.argument('x509_certificates', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
help='File containing the certificate or certificate chain to merge.',
validator=validate_x509_certificate_chain)
c.attributes_argument('certificate', CertificateAttributes, True)
with self.argument_context('keyvault certificate pending cancel') as c:
c.ignore('cancellation_requested')
with self.argument_context('keyvault certificate contact') as c:
c.argument('contact_email', options_list=['--email'], help='Contact e-mail address. Must be unique.')
c.argument('contact_name', options_list=['--name'], help='Full contact name.')
c.argument('contact_phone', options_list=['--phone'], help='Contact phone number.')
with self.argument_context('keyvault certificate issuer admin') as c:
c.argument('email', help='Admin e-mail address. Must be unique within the vault.')
c.argument('name', help='Full admin name.')
c.argument('phone', help='Admin phone number.')
c.argument('first_name', help='Admin first name.')
c.argument('last_name', help='Admin last name.')
with self.argument_context('keyvault certificate issuer') as c:
c.argument('issuer_name', help='Certificate issuer name.')
c.argument('disabled', arg_type=get_three_state_flag(), help='Set issuer to disabled state.')
c.argument('enabled', arg_type=get_three_state_flag(), help='Set issuer enabled state.')
with self.argument_context('keyvault certificate issuer', arg_group='Issuer Credential') as c:
c.argument('account_id')
c.argument('password')
with self.argument_context('keyvault certificate issuer', arg_group='Organization Detail') as c:
c.argument('organization_id')
c.argument('admin_first_name')
c.argument('admin_last_name')
c.argument('admin_email')
c.argument('admin_phone')
for item in ['list', 'list-deleted', 'list-versions']:
with self.argument_context('keyvault certificate {}'.format(item)) as c:
c.argument('maxresults', options_list=['--maxresults'], type=int)
with self.argument_context('keyvault role') as c:
c.argument('scope',
help='scope at which the role assignment or definition applies to, '
'e.g., "/" or "/keys" or "/keys/{keyname}"')
with self.argument_context('keyvault role', arg_group='Id') as c:
c.argument('hsm_name', hsm_url_type)
c.argument('identifier', options_list=['--id'],
help='Full URI of the HSM. If specified all other \'Id\' arguments should be omitted.',
validator=process_hsm_name)
with self.argument_context('keyvault role assignment') as c:
c.argument('role_assignment_name', options_list=['--name', '-n'], help='Name of the role assignment.')
c.argument('assignee', help='represent a user, group, or service principal. '
'supported format: object id, user sign-in name, or service principal name')
c.argument('assignee_object_id',
help='Use this parameter instead of \'--assignee\' to bypass graph permission issues. '
'This parameter only works with object ids for users, groups, service principals, and '
'managed identities. For managed identities use the principal id. For service principals, '
'use the object id and not the app id.')
c.argument('ids', nargs='+', help='space-separated role assignment ids')
c.argument('role', help='role name or id')
with self.argument_context('keyvault role definition') as c:
c.argument('hsm_name', hsm_url_type)
c.argument('role_definition', help='Description of a role as JSON, or a path to a file containing a JSON description.')
c.argument('role_id', help='The role definition ID.')
c.argument('role_definition_name', options_list=['--name', '-n'], help='The role definition name. '
'This is a GUID in the "name" property of a role definition.')
with self.argument_context('keyvault role definition list') as c:
c.argument('custom_role_only', arg_type=get_three_state_flag(), help='Only show custom role definitions.')
class PrincipalType(str, Enum): # Copied from azure.mgmt.authorization v2018_09_01_preview
user = "User"
group = "Group"
service_principal = "ServicePrincipal"
unknown = "Unknown"
directory_role_template = "DirectoryRoleTemplate"
foreign_group = "ForeignGroup"
application = "Application"
msi = "MSI"
directory_object_or_group = "DirectoryObjectOrGroup"
everyone = "Everyone"
with self.argument_context('keyvault role assignment create') as c:
c.argument('assignee_principal_type', options_list=['--assignee-principal-type', '-t'],
arg_type=get_enum_type(PrincipalType), help='The principal type of assignee.')
# endregion
|
1,748 |
def top_k_accuracy_score(y_true, y_score, k=5, normalize=True):
"""Top k Accuracy classification score.
This metric computes the number of times where the correct label is among
the top ``k`` labels predicted (ranked by predicted scores). Note that
multilabel classification case isn't handled here.
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores.
k : int, optional (default=5)
Number of guesses allowed to find the correct label.
normalize : bool, optional (default=True)
If ``True``, return the fraction of correctly classified samples.
Otherwise, return the number of correctly classified samples.
Returns
-------
score : float
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score
Notes
-----
If ``k = 1``, the result will be the same as the accuracy_score (though see
note below). If ``k`` is the same as the number of classes, this score will
be perfect and meaningless.
In cases where two or more labels are assigned equal probabilities, the
result may be incorrect if one of those labels falls at the threshold, as
one class must be chosen to be the ``k``th class and the class chosen may
not be the correct one.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import top_k_accuracy_score
>>> y_true = np.array([0, 1, 2, 2])
>>> y_score = np.array([[0.5, 0.2, 0.2],
... [0.3, 0.4, 0.2],
... [0.2, 0.4, 0.3],
... [0.7, 0.2, 0.1]])
>>> top_k_accuracy_score(y_true, y_score, k=1)
0.5
>>> top_k_accuracy_score(y_true, y_score, k=2)
0.75
>>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False)
3
>>> top_k_accuracy_score(y_true, y_score, k=3)
1.0
"""
check_consistent_length(y_true, y_score)
y_type = type_of_target(y_true)
if y_type != 'multiclass':
raise ValueError(f"Target type must be 'multiclass' not {y_type}")
y_true = column_or_1d(y_true)
y_score = check_array(y_score)
classes = _encode(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of columns "
"in 'y_score'"
)
sorted_pred = np.argsort(-y_score, axis=1)
score = sum(y in pred[:k] for y, pred in zip(y_true, sorted_pred))
score = score / len(y_true) if normalize else score
return score
|
def top_k_accuracy_score(y_true, y_score, k=5, normalize=True):
"""Top k Accuracy classification score.
This metric computes the number of times where the correct label is among
the top ``k`` labels predicted (ranked by predicted scores). Note that
multilabel classification case isn't handled here.
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores.
k : int, default=5
Number of guesses allowed to find the correct label.
normalize : bool, optional (default=True)
If ``True``, return the fraction of correctly classified samples.
Otherwise, return the number of correctly classified samples.
Returns
-------
score : float
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score
Notes
-----
If ``k = 1``, the result will be the same as the accuracy_score (though see
note below). If ``k`` is the same as the number of classes, this score will
be perfect and meaningless.
In cases where two or more labels are assigned equal probabilities, the
result may be incorrect if one of those labels falls at the threshold, as
one class must be chosen to be the ``k``th class and the class chosen may
not be the correct one.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import top_k_accuracy_score
>>> y_true = np.array([0, 1, 2, 2])
>>> y_score = np.array([[0.5, 0.2, 0.2],
... [0.3, 0.4, 0.2],
... [0.2, 0.4, 0.3],
... [0.7, 0.2, 0.1]])
>>> top_k_accuracy_score(y_true, y_score, k=1)
0.5
>>> top_k_accuracy_score(y_true, y_score, k=2)
0.75
>>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False)
3
>>> top_k_accuracy_score(y_true, y_score, k=3)
1.0
"""
check_consistent_length(y_true, y_score)
y_type = type_of_target(y_true)
if y_type != 'multiclass':
raise ValueError(f"Target type must be 'multiclass' not {y_type}")
y_true = column_or_1d(y_true)
y_score = check_array(y_score)
classes = _encode(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of columns "
"in 'y_score'"
)
sorted_pred = np.argsort(-y_score, axis=1)
score = sum(y in pred[:k] for y, pred in zip(y_true, sorted_pred))
score = score / len(y_true) if normalize else score
return score
|
40,467 |
def train():
model.train()
total_loss = 0
for loader in train_loader:
optimizer.zero_grad()
h = model(loader)
src_nodes = loader.edge_label_index[0]
dst_nodes = loader.edge_label_index[1]
out = h[src_nodes] * h[dst_nodes]
loss = F.binary_cross_entropy_with_logits(out.sum(dim=-1),
loader.edge_label)
loss.backward()
optimizer.step()
total_loss += float(loss) * out.size(0)
return total_loss / data.num_nodes
|
def train():
model.train()
total_loss = 0
for batch in train_loader:
optimizer.zero_grad()
h = model(loader)
src_nodes = loader.edge_label_index[0]
dst_nodes = loader.edge_label_index[1]
out = h[src_nodes] * h[dst_nodes]
loss = F.binary_cross_entropy_with_logits(out.sum(dim=-1),
loader.edge_label)
loss.backward()
optimizer.step()
total_loss += float(loss) * out.size(0)
return total_loss / data.num_nodes
|
13,543 |
def projection_shifts(A, E, Z, V, prev_shifts, shift_options):
"""Find further shift parameters for low-rank ADI iteration using
Galerkin projection on spaces spanned by LR-ADI iterates.
See [PK16]_, pp. 92-95.
Parameters
----------
A
The |Operator| A from the corresponding Lyapunov equation.
E
The |Operator| E from the corresponding Lyapunov equation.
Z
A |VectorArray| representing the currently computed low-rank
solution factor.
V
A |VectorArray| representing the currently computed iterate.
prev_shifts
A |NumPy array| containing the set of all previously used shift
parameters.
shift_options
The shift options to use (see :func:`lyap_lrcf_solver_options`).
Returns
-------
shifts
A |NumPy array| containing a set of stable shift parameters.
"""
if prev_shifts[prev_shifts.size - 1].imag != 0:
Q = gram_schmidt(cat_arrays([V.real, V.imag]), atol=0, rtol=0)
else:
Q = gram_schmidt(V, atol=0, rtol=0)
Ap = A.apply2(Q, Q)
Ep = E.apply2(Q, Q)
shifts = spla.eigvals(Ap, Ep)
shifts.imag[abs(shifts.imag) < np.finfo(float).eps] = 0
shifts = shifts[np.real(shifts) < 0]
if shifts.size == 0:
return prev_shifts
else:
if(shifts[np.imag(shifts) != 0].size > 0):
shifts = np.array(sorted(shifts, key=np.abs))
else:
shifts.sort()
return shifts
|
def projection_shifts(A, E, Z, V, prev_shifts, shift_options):
"""Find further shift parameters for low-rank ADI iteration using
Galerkin projection on spaces spanned by LR-ADI iterates.
See [PK16]_, pp. 92-95.
Parameters
----------
A
The |Operator| A from the corresponding Lyapunov equation.
E
The |Operator| E from the corresponding Lyapunov equation.
Z
A |VectorArray| representing the currently computed low-rank
solution factor.
V
A |VectorArray| representing the currently computed iterate.
prev_shifts
A |NumPy array| containing the set of all previously used shift
parameters.
shift_options
The shift options to use (see :func:`lyap_lrcf_solver_options`).
Returns
-------
shifts
A |NumPy array| containing a set of stable shift parameters.
"""
if prev_shifts[-1].imag != 0:
Q = gram_schmidt(cat_arrays([V.real, V.imag]), atol=0, rtol=0)
else:
Q = gram_schmidt(V, atol=0, rtol=0)
Ap = A.apply2(Q, Q)
Ep = E.apply2(Q, Q)
shifts = spla.eigvals(Ap, Ep)
shifts.imag[abs(shifts.imag) < np.finfo(float).eps] = 0
shifts = shifts[np.real(shifts) < 0]
if shifts.size == 0:
return prev_shifts
else:
if(shifts[np.imag(shifts) != 0].size > 0):
shifts = np.array(sorted(shifts, key=np.abs))
else:
shifts.sort()
return shifts
|
54,011 |
def sum_up_lists(list_a, list_b):
'''
This function sums up the entries of two chart lists
'''
dict_a = dict(list_a)
dict_b = dict(list_b)
tmp = dict.fromkeys(dict_a.keys(), value=0)
# Iterate over both dicts at the same time
for (k_a, v_a), (_, v_b) in zip(dict_a.items(), dict_b.items()):
tmp[k_a] += v_a + v_b
return _convert_dict_to_chart_list(tmp)
|
def sum_up_lists(list_a, list_b):
'''
This function sums up the entries of two chart lists
'''
tmp = {}
for key, value in chain(list_a, list_b):
tmp.setdefault(key, 0)
tmp[key] += value
return _convert_dict_to_chart_list(tmp)
|
40,562 |
def load_arguments(self, _):
with self.argument_context('connectedk8s connect') as c:
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('cluster_name', options_list=['--name', '-n'], help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('https_proxy', options_list=['--proxy-https'], arg_group='Proxy', help='Https proxy URL to be used.')
c.argument('http_proxy', options_list=['--proxy-http'], arg_group='Proxy', help='Http proxy URL to be used.')
c.argument('no_proxy', options_list=['--proxy-skip-range'], arg_group='Proxy', help='List of URLs/CIDRs for which proxy should not to be used.')
c.argument('proxy_cert', options_list=['--proxy-cert'], arg_group='Proxy', type=file_type, completer=FilesCompleter(), help='Path to the certificate file for proxy')
c.argument('distribution', options_list=['--distribution'], help='The Kubernetes distribution which will be running on this connected cluster.', arg_type=get_enum_type(Distribution_Enum_Values))
c.argument('infrastructure', options_list=['--infrastructure'], help='The infrastructure on which the Kubernetes cluster represented by this connected cluster will be running on.', arg_type=get_enum_type(Infrastructure_Enum_Values))
c.argument('disable_auto_upgrade', options_list=['--disable-auto-upgrade'], action='store_true', help='Flag to disable auto upgrade of arc agents.')
c.argument('cl_oid', options_list=['--custom-locations-oid'], help="OID of 'custom-locations' app")
c.argument('onboarding_timeout', options_list=['--onboarding-timeout'], arg_group='Timeout', help='Time required (in seconds) for the arc-agent pods to be installed on the kubernetes cluster. Override this value if the hardware/network constraints on your cluster requires more time for installing the arc-agent pods.')
c.argument('no_wait', options_list=['--no-wait'], arg_group='Timeout', help="Do not wait for the long-running operation to finish.")
c.argument('correlation_id', options_list=['--correlation_id'], help='A guid that is used to track the source of cluster onboarding')
with self.argument_context('connectedk8s update') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('https_proxy', options_list=['--proxy-https'], arg_group='Proxy', help='Https proxy URL to be used.')
c.argument('http_proxy', options_list=['--proxy-http'], arg_group='Proxy', help='Http proxy URL to be used.')
c.argument('no_proxy', options_list=['--proxy-skip-range'], arg_group='Proxy', help='List of URLs/CIDRs for which proxy should not to be used.')
c.argument('proxy_cert', options_list=['--proxy-cert'], arg_group='Proxy', type=file_type, completer=FilesCompleter(), help='Path to the any additional certificate file (for proxy as well)')
c.argument('disable_proxy', options_list=['--disable-proxy'], arg_group='Proxy', action='store_true', help='Disables proxy settings for agents')
c.argument('auto_upgrade', options_list=['--auto-upgrade'], help='Flag to enable/disable auto upgrade of arc agents. By default, auto upgrade of agents is enabled.', arg_type=get_enum_type(["true", "false"]))
with self.argument_context('connectedk8s upgrade') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('arc_agent_version', options_list=['--agent-version'], help='Version of agent to update the helm charts to.')
c.argument('upgrade_timeout', options_list=['--upgrade-timeout'], help='Time required (in seconds) for the arc-agent pods to be upgraded on the kubernetes cluster. Override this value if the hardware/network constraints on your cluster requires more time for upgrading the arc-agent pods.')
with self.argument_context('connectedk8s enable-features') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('features', features_types, options_list=['--features'], help='Space-separated list of features you want to enable.')
c.argument('azrbac_client_id', options_list=['--app-id'], arg_group='Azure RBAC', help='Application ID for enabling Azure RBAC. Specify when enabling azure-rbac.')
c.argument('azrbac_client_secret', options_list=['--app-secret'], arg_group='Azure RBAC', help='Application secret for enabling Azure RBAC. Specify when enabling azure-rbac.')
c.argument('azrbac_skip_authz_check', options_list=['--skip-azure-rbac-list'], arg_group='Azure RBAC', help='Comma separated list of names of usernames/email/oid. Azure RBAC will be skipped for these users. Specify when enabling azure-rbac.')
c.argument('cl_oid', options_list=['--custom-locations-oid'], help="OID of 'custom-locations' app")
with self.argument_context('connectedk8s disable-features') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('features', features_types, options_list=['--features'], help='Space-separated list of features you want to disable.')
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
with self.argument_context('connectedk8s list') as c:
pass
with self.argument_context('connectedk8s show') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
with self.argument_context('connectedk8s delete') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
with self.argument_context('connectedk8s proxy') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('token', options_list=['--token'], help='Service account token to use to authenticate to the kubernetes cluster.')
c.argument('context_name', options_list=['--kube-context'], help='If specified, overwrite the default context name.')
c.argument('path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(), default=os.path.join(os.path.expanduser('~'), '.kube', 'config'), help="Kubernetes configuration file to update. If not provided, updates the file '~/.kube/config'. Use '-' to print YAML to stdout instead.")
c.argument('api_server_port', options_list=['--port'], help='Port used for accessing connected cluster.')
|
def load_arguments(self, _):
with self.argument_context('connectedk8s connect') as c:
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('cluster_name', options_list=['--name', '-n'], help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('https_proxy', options_list=['--proxy-https'], arg_group='Proxy', help='Https proxy URL to be used.')
c.argument('http_proxy', options_list=['--proxy-http'], arg_group='Proxy', help='Http proxy URL to be used.')
c.argument('no_proxy', options_list=['--proxy-skip-range'], arg_group='Proxy', help='List of URLs/CIDRs for which proxy should not to be used.')
c.argument('proxy_cert', options_list=['--proxy-cert'], arg_group='Proxy', type=file_type, completer=FilesCompleter(), help='Path to the certificate file for proxy')
c.argument('distribution', options_list=['--distribution'], help='The Kubernetes distribution which will be running on this connected cluster.', arg_type=get_enum_type(Distribution_Enum_Values))
c.argument('infrastructure', options_list=['--infrastructure'], help='The infrastructure on which the Kubernetes cluster represented by this connected cluster will be running on.', arg_type=get_enum_type(Infrastructure_Enum_Values))
c.argument('disable_auto_upgrade', options_list=['--disable-auto-upgrade'], action='store_true', help='Flag to disable auto upgrade of arc agents.')
c.argument('cl_oid', options_list=['--custom-locations-oid'], help="OID of 'custom-locations' app")
c.argument('onboarding_timeout', options_list=['--onboarding-timeout'], arg_group='Timeout', help='Time required (in seconds) for the arc-agent pods to be installed on the kubernetes cluster. Override this value if the hardware/network constraints on your cluster requires more time for installing the arc-agent pods.')
c.argument('no_wait', options_list=['--no-wait'], arg_group='Timeout', help="Do not wait for the long-running operation to finish.")
c.argument('correlation_id', options_list=['--correlation_id'], help='A guid that is used to track the source of cluster onboarding', validator=override_client_request_id_header)
with self.argument_context('connectedk8s update') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('https_proxy', options_list=['--proxy-https'], arg_group='Proxy', help='Https proxy URL to be used.')
c.argument('http_proxy', options_list=['--proxy-http'], arg_group='Proxy', help='Http proxy URL to be used.')
c.argument('no_proxy', options_list=['--proxy-skip-range'], arg_group='Proxy', help='List of URLs/CIDRs for which proxy should not to be used.')
c.argument('proxy_cert', options_list=['--proxy-cert'], arg_group='Proxy', type=file_type, completer=FilesCompleter(), help='Path to the any additional certificate file (for proxy as well)')
c.argument('disable_proxy', options_list=['--disable-proxy'], arg_group='Proxy', action='store_true', help='Disables proxy settings for agents')
c.argument('auto_upgrade', options_list=['--auto-upgrade'], help='Flag to enable/disable auto upgrade of arc agents. By default, auto upgrade of agents is enabled.', arg_type=get_enum_type(["true", "false"]))
with self.argument_context('connectedk8s upgrade') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('arc_agent_version', options_list=['--agent-version'], help='Version of agent to update the helm charts to.')
c.argument('upgrade_timeout', options_list=['--upgrade-timeout'], help='Time required (in seconds) for the arc-agent pods to be upgraded on the kubernetes cluster. Override this value if the hardware/network constraints on your cluster requires more time for upgrading the arc-agent pods.')
with self.argument_context('connectedk8s enable-features') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('features', features_types, options_list=['--features'], help='Space-separated list of features you want to enable.')
c.argument('azrbac_client_id', options_list=['--app-id'], arg_group='Azure RBAC', help='Application ID for enabling Azure RBAC. Specify when enabling azure-rbac.')
c.argument('azrbac_client_secret', options_list=['--app-secret'], arg_group='Azure RBAC', help='Application secret for enabling Azure RBAC. Specify when enabling azure-rbac.')
c.argument('azrbac_skip_authz_check', options_list=['--skip-azure-rbac-list'], arg_group='Azure RBAC', help='Comma separated list of names of usernames/email/oid. Azure RBAC will be skipped for these users. Specify when enabling azure-rbac.')
c.argument('cl_oid', options_list=['--custom-locations-oid'], help="OID of 'custom-locations' app")
with self.argument_context('connectedk8s disable-features') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
c.argument('features', features_types, options_list=['--features'], help='Space-separated list of features you want to disable.')
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
with self.argument_context('connectedk8s list') as c:
pass
with self.argument_context('connectedk8s show') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
with self.argument_context('connectedk8s delete') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.')
c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.')
with self.argument_context('connectedk8s proxy') as c:
c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.')
c.argument('token', options_list=['--token'], help='Service account token to use to authenticate to the kubernetes cluster.')
c.argument('context_name', options_list=['--kube-context'], help='If specified, overwrite the default context name.')
c.argument('path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(), default=os.path.join(os.path.expanduser('~'), '.kube', 'config'), help="Kubernetes configuration file to update. If not provided, updates the file '~/.kube/config'. Use '-' to print YAML to stdout instead.")
c.argument('api_server_port', options_list=['--port'], help='Port used for accessing connected cluster.')
|
30,800 |
def main():
params = demisto.params()
args = demisto.args()
proxies = handle_proxy()
verify = not params.get('insecure')
base_url = params.get('url')
client = Client(base_url, verify=verify, proxy=proxies)
command = demisto.command()
demisto.info(f"Command being executed is {command}")
try:
commands = {
'spacex-get-company-info': get_company_info_command,
'spacex-get-api-info': get_api_info_command,
'spacex-get-launches': get_launches_command,
'spacex-get-upcoming-launches': get_upcoming_launches_command,
'spacex-get-launch-details': get_launch_details_command,
'spacex-get-next-launch': get_next_launch_command,
'spacex-get-launch-images': get_launch_images_command,
'spacex-get-landing-pads': get_all_landing_pads_command,
'spacex-get-landing-pad': get_landing_pad_command,
'spacex-get-roadster': get_roadster_data_command,
'spacex-get-missions': get_all_missions_command,
'spacex-get-mission': get_mission_command,
'spacex-get-launch-pads': get_launch_pads_command,
'spacex-get-launch-pad': get_launch_pad_command,
'spacex-get-ships': get_ships_command,
'spacex-get-ship': get_ship_command,
'spacex-get-capsules': get_capsules_command,
'spacex-get-capsule': get_capsule_command,
'spacex-get-upcoming-capsules': get_upcoming_capsules_command,
'spacex-get-past-capsules': get_past_capsules_command,
'spacex-get-cores': get_cores_command,
'spacex-get-core': get_core_command,
'spacex-get-upcoming-cores': get_upcoming_cores_command,
'spacex-get-past-cores': get_past_cores_command,
'spacex-get-dragons': get_dragons_command,
'spacex-get-dragon': get_dragon_command,
'spacex-get-historical-events': get_historical_events_command,
'spacex-get-historical-event': get_historical_event_command,
'spacex-get-payloads': get_payloads_command,
'spacex-get-payload': get_payload_command,
'spacex-get-rockets': get_rockets_command,
'spacex-get-rocket': get_rocket_command,
}
if command == 'fetch-incidents':
fetch_incidents_command(client, params)
elif command == 'test-module':
test_module(client, params)
elif command == 'get-mapping-fields':
demisto.results(get_mapping_fields_command(client, args, params))
elif command == 'get-remote-data':
demisto.results(get_remote_data_command(client, args, params))
elif command == 'update-remote-system':
demisto.results(update_remote_system_command(client, args, params))
elif command in commands:
commands[command](client, args)
else:
return_error(f"{command} not recognised")
except Exception as err:
return_error(str(err))
|
def main():
params = demisto.params()
args = demisto.args()
proxies = handle_proxy()
verify = not params.get('insecure')
base_url = params.get('url')
client = Client(base_url, verify=verify, proxy=proxies)
command = demisto.command()
demisto.info(f"Command being executed is {command}")
try:
commands = {
'spacex-get-company-info': get_company_info_command,
'spacex-get-api-info': get_api_info_command,
'spacex-get-launches': get_launches_command,
'spacex-get-upcoming-launches': get_upcoming_launches_command,
'spacex-get-launch-details': get_launch_details_command,
'spacex-get-next-launch': get_next_launch_command,
'spacex-get-launch-images': get_launch_images_command,
'spacex-get-landing-pads': get_all_landing_pads_command,
'spacex-get-landing-pad': get_landing_pad_command,
'spacex-get-roadster': get_roadster_data_command,
'spacex-get-missions': get_all_missions_command,
'spacex-get-mission': get_mission_command,
'spacex-get-launch-pads': get_launch_pads_command,
'spacex-get-launch-pad': get_launch_pad_command,
'spacex-get-ships': get_ships_command,
'spacex-get-ship': get_ship_command,
'spacex-get-capsules': get_capsules_command,
'spacex-get-capsule': get_capsule_command,
'spacex-get-upcoming-capsules': get_upcoming_capsules_command,
'spacex-get-past-capsules': get_past_capsules_command,
'spacex-get-cores': get_cores_command,
'spacex-get-core': get_core_command,
'spacex-get-upcoming-cores': get_upcoming_cores_command,
'spacex-get-past-cores': get_past_cores_command,
'spacex-get-dragons': get_dragons_command,
'spacex-get-dragon': get_dragon_command,
'spacex-get-historical-events': get_historical_events_command,
'spacex-get-historical-event': get_historical_event_command,
'spacex-get-payloads': get_payloads_command,
'spacex-get-payload': get_payload_command,
'spacex-get-rockets': get_rockets_command,
'spacex-get-rocket': get_rocket_command,
}
if command == 'fetch-incidents':
fetch_incidents_command(client, params)
elif command == 'test-module':
test_module(client, params)
elif command == 'get-mapping-fields':
demisto.results(get_mapping_fields_command(client, args, params))
elif command == 'get-remote-data':
demisto.results(get_remote_data_command(client, args, params))
elif command == 'update-remote-system':
demisto.results(update_remote_system_command(client, args, params))
elif command in commands:
commands[command](client, args)
else:
return_error(f"{command} not recognised")
except Exception as err:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(err)}')
|
10,325 |
def main():
argument_spec = url_argument_spec()
# setup aliases
argument_spec['url_username']['aliases'] = ['username']
argument_spec['url_password']['aliases'] = ['password']
argument_spec.update(
url=dict(type='str', required=True),
dest=dict(type='path', required=True),
backup=dict(type='bool'),
sha256sum=dict(type='str', default=''),
checksum=dict(type='str', default=''),
timeout=dict(type='int', default=10),
headers=dict(type='raw'),
tmp_dest=dict(type='path'),
)
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
mutually_exclusive=[['checksum', 'sha256sum']],
)
if module.params.get('thirsty'):
module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead', version='2.13')
url = module.params['url']
dest = module.params['dest']
backup = module.params['backup']
force = module.params['force']
sha256sum = module.params['sha256sum']
checksum = module.params['checksum']
use_proxy = module.params['use_proxy']
timeout = module.params['timeout']
tmp_dest = module.params['tmp_dest']
result = dict(
changed=False,
checksum_dest=None,
checksum_src=None,
dest=dest,
elapsed=0,
url=url,
)
# Parse headers to dict
if isinstance(module.params['headers'], dict):
headers = module.params['headers']
elif module.params['headers']:
try:
headers = dict(item.split(':', 1) for item in module.params['headers'].split(','))
module.deprecate('Supplying `headers` as a string is deprecated. Please use dict/hash format for `headers`', version='2.10')
except Exception:
module.fail_json(msg="The string representation for the `headers` parameter requires a key:value,key:value syntax to be properly parsed.", **result)
else:
headers = None
dest_is_dir = os.path.isdir(dest)
last_mod_time = None
# workaround for usage of deprecated sha256sum parameter
if sha256sum:
checksum = 'sha256:%s' % (sha256sum)
# checksum specified, parse for algorithm and checksum
if checksum:
try:
algorithm, checksum = checksum.split(':', 1)
except ValueError:
module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>", **result)
if checksum.startswith('http://') or checksum.startswith('https://') or checksum.startswith('ftp://'):
checksum_url = checksum
# download checksum file to checksum_tmpsrc
checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
with open(checksum_tmpsrc) as f:
lines = [line.rstrip('\n') for line in f]
os.remove(checksum_tmpsrc)
checksum_map = {}
for line in lines:
parts = line.split(None, 1)
if len(parts) == 2:
checksum_map[parts[0]] = parts[1]
filename = url_filename(url)
# Look through each line in the checksum file for a hash corresponding to
# the filename in the url, returning the first hash that is found.
for cksum in (s for (s, f) in checksum_map.items() if f.strip('./') == filename):
checksum = cksum
break
else:
checksum = None
if checksum is None:
module.fail_json(msg="Unable to find a checksum for file '%s' in '%s'" % (filename, checksum_url))
# Remove any non-alphanumeric characters, including the infamous
# Unicode zero-width space
checksum = re.sub(r'\W+', '', checksum).lower()
# Ensure the checksum portion is a hexdigest
try:
int(checksum, 16)
except ValueError:
module.fail_json(msg='The checksum format is invalid', **result)
if not dest_is_dir and os.path.exists(dest):
checksum_mismatch = False
# If the download is not forced and there is a checksum, allow
# checksum match to skip the download.
if not force and checksum != '':
destination_checksum = module.digest_from_file(dest, algorithm)
if checksum != destination_checksum:
checksum_mismatch = True
# Not forcing redownload, unless checksum does not match
if not force and checksum and not checksum_mismatch:
# Not forcing redownload, unless checksum does not match
# allow file attribute changes
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
result['changed'] = module.set_fs_attributes_if_different(file_args, False)
if result['changed']:
module.exit_json(msg="file already exists but file attributes changed", **result)
module.exit_json(msg="file already exists", **result)
# If the file already exists, prepare the last modified time for the
# request.
mtime = os.path.getmtime(dest)
last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
# If the checksum does not match we have to force the download
# because last_mod_time may be newer than on remote
if checksum_mismatch:
force = True
# download to tmpsrc
start = datetime.datetime.utcnow()
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
result['src'] = tmpsrc
# Now the request has completed, we can finally generate the final
# destination file name from the info dict.
if dest_is_dir:
filename = extract_filename_from_headers(info)
if not filename:
# Fall back to extracting the filename from the URL.
# Pluck the URL from the info, since a redirect could have changed
# it.
filename = url_filename(info['url'])
dest = os.path.join(dest, filename)
result['dest'] = dest
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], **result)
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Source %s is not readable" % (tmpsrc), **result)
result['checksum_src'] = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not writable" % (dest), **result)
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not readable" % (dest), **result)
result['checksum_dest'] = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(dest)):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s does not exist" % (os.path.dirname(dest)), **result)
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not writable" % (os.path.dirname(dest)), **result)
if module.check_mode:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
result['changed'] = ('checksum_dest' not in result or
result['checksum_src'] != result['checksum_dest'])
module.exit_json(msg=info.get('msg', ''), **result)
# If a checksum was provided, ensure that the temporary file matches this checksum
# before moving it to the destination.
if checksum != '':
tmpsrc_checksum = module.digest_from_file(tmpsrc, algorithm)
if checksum != tmpsrc_checksum:
os.remove(tmpsrc)
module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (
tmpsrc, checksum, tmpsrc_checksum
), **result)
# Copy temporary file to destination if necessary
backup_file = None
if result['checksum_src'] != result['checksum_dest']:
try:
if backup:
if os.path.exists(dest):
backup_file = module.backup_local(dest)
module.atomic_move(tmpsrc, dest)
except Exception as e:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)),
exception=traceback.format_exc(), **result)
result['changed'] = True
else:
result['changed'] = False
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
# allow file attribute changes
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
# Backwards compat only. We'll return None on FIPS enabled systems
try:
result['md5sum'] = module.md5(dest)
except ValueError:
result['md5sum'] = None
if backup_file:
result['backup_file'] = backup_file
# Mission complete
module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result)
|
def main():
argument_spec = url_argument_spec()
# setup aliases
argument_spec['url_username']['aliases'] = ['username']
argument_spec['url_password']['aliases'] = ['password']
argument_spec.update(
url=dict(type='str', required=True),
dest=dict(type='path', required=True),
backup=dict(type='bool'),
sha256sum=dict(type='str', default=''),
checksum=dict(type='str', default=''),
timeout=dict(type='int', default=10),
headers=dict(type='raw'),
tmp_dest=dict(type='path'),
)
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
mutually_exclusive=[['checksum', 'sha256sum']],
)
if module.params.get('thirsty'):
module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead', version='2.13')
url = module.params['url']
dest = module.params['dest']
backup = module.params['backup']
force = module.params['force']
sha256sum = module.params['sha256sum']
checksum = module.params['checksum']
use_proxy = module.params['use_proxy']
timeout = module.params['timeout']
tmp_dest = module.params['tmp_dest']
result = dict(
changed=False,
checksum_dest=None,
checksum_src=None,
dest=dest,
elapsed=0,
url=url,
)
# Parse headers to dict
if isinstance(module.params['headers'], dict):
headers = module.params['headers']
elif module.params['headers']:
try:
headers = dict(item.split(':', 1) for item in module.params['headers'].split(','))
module.deprecate('Supplying `headers` as a string is deprecated. Please use dict/hash format for `headers`', version='2.10')
except Exception:
module.fail_json(msg="The string representation for the `headers` parameter requires a key:value,key:value syntax to be properly parsed.", **result)
else:
headers = None
dest_is_dir = os.path.isdir(dest)
last_mod_time = None
# workaround for usage of deprecated sha256sum parameter
if sha256sum:
checksum = 'sha256:%s' % (sha256sum)
# checksum specified, parse for algorithm and checksum
if checksum:
try:
algorithm, checksum = checksum.split(':', 1)
except ValueError:
module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>", **result)
if checksum.startswith('http://') or checksum.startswith('https://') or checksum.startswith('ftp://'):
checksum_url = checksum
# download checksum file to checksum_tmpsrc
checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
with open(checksum_tmpsrc) as f:
lines = [line.rstrip('\n') for line in f]
os.remove(checksum_tmpsrc)
checksum_map = {}
for line in lines:
parts = line.split(None, 1)
if len(parts) == 2:
checksum_map[parts[0]] = parts[1]
filename = url_filename(url)
# Look through each line in the checksum file for a hash corresponding to
# the filename in the url, returning the first hash that is found.
for cksum in (s for (s, f) in checksum_map.items() if f.strip('./') == filename):
checksum = cksum
break
else:
checksum = None
if checksum is None:
module.fail_json(msg="Unable to find a checksum for file '%s' in '%s'" % (filename, checksum_url))
# Remove any non-alphanumeric characters, including the infamous
# Unicode zero-width space
checksum = re.sub(r'\W+', '', checksum).lower()
# Ensure the checksum portion is a hexdigest
try:
int(checksum, 16)
except ValueError:
module.fail_json(msg='The checksum format is invalid', **result)
if not dest_is_dir and os.path.exists(dest):
checksum_mismatch = False
# If the download is not forced and there is a checksum, allow
# checksum match to skip the download.
if not force and checksum != '':
destination_checksum = module.digest_from_file(dest, algorithm)
if checksum != destination_checksum:
checksum_mismatch = True
# Not forcing redownload, unless checksum does not match
if not force and checksum and not checksum_mismatch:
# Not forcing redownload, unless checksum does not match
# allow file attribute changes
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
result['changed'] = module.set_fs_attributes_if_different(file_args, False)
if result['changed']:
module.exit_json(msg="file already exists but file attributes changed", **result)
module.exit_json(msg="file already exists", **result)
# If the file already exists, prepare the last modified time for the
# request.
mtime = os.path.getmtime(dest)
last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
# If the checksum does not match we have to force the download
# because last_mod_time may be newer than on remote
if checksum_mismatch:
force = True
# download to tmpsrc
start = datetime.datetime.utcnow()
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
result['src'] = tmpsrc
# Now the request has completed, we can finally generate the final
# destination file name from the info dict.
if dest_is_dir:
filename = extract_filename_from_headers(info)
if not filename:
# Fall back to extracting the filename from the URL.
# Pluck the URL from the info, since a redirect could have changed
# it.
filename = url_filename(info['url'])
dest = os.path.join(dest, filename)
result['dest'] = dest
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], **result)
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Source %s is not readable" % (tmpsrc), **result)
result['checksum_src'] = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not writable" % (dest), **result)
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not readable" % (dest), **result)
result['checksum_dest'] = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(dest)):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s does not exist" % (os.path.dirname(dest)), **result)
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not writable" % (os.path.dirname(dest)), **result)
if module.check_mode:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
result['changed'] = ('checksum_dest' not in result or
result['checksum_src'] != result['checksum_dest'])
module.exit_json(msg=info.get('msg', ''), **result)
# If a checksum was provided, ensure that the temporary file matches this checksum
# before moving it to the destination.
if checksum != '':
tmpsrc_checksum = module.digest_from_file(tmpsrc, algorithm)
if checksum != tmpsrc_checksum:
os.remove(tmpsrc)
module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (tmpsrc, checksum, tmpsrc_checksum), **result)
# Copy temporary file to destination if necessary
backup_file = None
if result['checksum_src'] != result['checksum_dest']:
try:
if backup:
if os.path.exists(dest):
backup_file = module.backup_local(dest)
module.atomic_move(tmpsrc, dest)
except Exception as e:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)),
exception=traceback.format_exc(), **result)
result['changed'] = True
else:
result['changed'] = False
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
# allow file attribute changes
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
# Backwards compat only. We'll return None on FIPS enabled systems
try:
result['md5sum'] = module.md5(dest)
except ValueError:
result['md5sum'] = None
if backup_file:
result['backup_file'] = backup_file
# Mission complete
module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result)
|
53,363 |
def _strip_code_flanked_in_backticks(line: str) -> str:
"""Alter line so code flanked in backticks is ignored.
Pyenchant automatically strips backticks when parsing tokens, so this cannot be done at the individual filter level.
"""
def replace_code_but_leave_surrounding_characters(match_obj) -> str:
return match_obj.group(1) + match_obj.group(5)
return CODE_FLANKED_IN_BACKTICK_REGEX.sub(
replace_code_but_leave_surrounding_characters, line
)
|
def _strip_code_flanked_in_backticks(line: str) -> str:
"""Alter line so code flanked in backticks is ignored.
Pyenchant automatically strips backticks when parsing tokens,
so this cannot be done at the individual filter level."""
def replace_code_but_leave_surrounding_characters(match_obj) -> str:
return match_obj.group(1) + match_obj.group(5)
return CODE_FLANKED_IN_BACKTICK_REGEX.sub(
replace_code_but_leave_surrounding_characters, line
)
|
31,178 |
def create_context_from_network_artifacts(network_artifacts, ip_context):
domain_context = []
if network_artifacts:
for artifact in network_artifacts:
domain = artifact.get('network_domain')
if domain:
domain_context.append(
{
'Name': domain
}
)
network_ip_details = {
'Address': artifact.get('network_remote_ip'),
'GEO.Country': artifact.get('network_country')
}
remove_nulls_from_dictionary(network_ip_details)
if network_ip_details:
ip_context.append(network_ip_details)
return domain_context
|
def create_context_from_network_artifacts(network_artifacts, ip_context):
domain_context = []
if network_artifacts:
for artifact in network_artifacts:
domain = artifact.get('network_domain')
if domain:
domain_context.append(
{
'Name': domain,
}
)
network_ip_details = {
'Address': artifact.get('network_remote_ip'),
'GEO.Country': artifact.get('network_country')
}
remove_nulls_from_dictionary(network_ip_details)
if network_ip_details:
ip_context.append(network_ip_details)
return domain_context
|
35,588 |
def densenet169(pretrained: bool = False, progress: bool = True, **kwargs) -> DenseNet:
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
**kwargs)
|
def densenet169(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
**kwargs)
|
2,657 |
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"):
r"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : ndarray of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, default=5
The number of neighbors that will be considered. Should be fewer than
`n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as
mentioned in [1]_. A warning will be raised otherwise.
metric : str or callable, default='euclidean'
Which metric to use for computing pairwise distances between samples
from the original input space. If metric is 'precomputed', X must be a
matrix of pairwise distances or squared distances. Otherwise, for a list
of available metrics, see the documentation of argument metric in
`sklearn.pairwise.pairwise_distances` and metrics listed in
`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the
"cosine" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.
.. versionadded:: 0.20
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
References
----------
.. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood
Preservation in Nonlinear Projection Methods: An Experimental Study.
In Proceedings of the International Conference on Artificial Neural Networks
(ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.
.. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving
Local Structure. Proceedings of the Twelth International Conference on
Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.
"""
n_samples = X.shape[0]
if n_neighbors >= n_samples / 2:
warnings.warn(
f"n_neighbors ({n_neighbors}) should be less than {n_samples / 2}"
)
dist_X = pairwise_distances(X, metric=metric)
if metric == "precomputed":
dist_X = dist_X.copy()
# we set the diagonal to np.inf to exclude the points themselves from
# their own neighborhood
np.fill_diagonal(dist_X, np.inf)
ind_X = np.argsort(dist_X, axis=1)
# `ind_X[i]` is the index of sorted distances between i and other samples
ind_X_embedded = (
NearestNeighbors(n_neighbors=n_neighbors)
.fit(X_embedded)
.kneighbors(return_distance=False)
)
# We build an inverted index of neighbors in the input space: For sample i,
# we define `inverted_index[i]` as the inverted index of sorted distances:
# inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1)
inverted_index = np.zeros((n_samples, n_samples), dtype=int)
ordered_indices = np.arange(n_samples + 1)
inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:]
ranks = (
inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors
)
t = np.sum(ranks[ranks > 0])
t = 1.0 - t * (
2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))
)
return t
|
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"):
r"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : ndarray of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, default=5
The number of neighbors that will be considered. Should be fewer than
`n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as
mentioned in [1]_. A warning will be raised otherwise.
metric : str or callable, default='euclidean'
Which metric to use for computing pairwise distances between samples
from the original input space. If metric is 'precomputed', X must be a
matrix of pairwise distances or squared distances. Otherwise, for a list
of available metrics, see the documentation of argument metric in
`sklearn.pairwise.pairwise_distances` and metrics listed in
`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the
"cosine" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.
.. versionadded:: 0.20
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
References
----------
.. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood
Preservation in Nonlinear Projection Methods: An Experimental Study.
In Proceedings of the International Conference on Artificial Neural Networks
(ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.
.. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving
Local Structure. Proceedings of the Twelth International Conference on
Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.
"""
n_samples = X.shape[0]
if n_neighbors >= n_samples / 2:
warnings.warn(
f"n_neighbors ({n_neighbors}) should be less than n_samples / 2 ({n_samples / 2})"
)
dist_X = pairwise_distances(X, metric=metric)
if metric == "precomputed":
dist_X = dist_X.copy()
# we set the diagonal to np.inf to exclude the points themselves from
# their own neighborhood
np.fill_diagonal(dist_X, np.inf)
ind_X = np.argsort(dist_X, axis=1)
# `ind_X[i]` is the index of sorted distances between i and other samples
ind_X_embedded = (
NearestNeighbors(n_neighbors=n_neighbors)
.fit(X_embedded)
.kneighbors(return_distance=False)
)
# We build an inverted index of neighbors in the input space: For sample i,
# we define `inverted_index[i]` as the inverted index of sorted distances:
# inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1)
inverted_index = np.zeros((n_samples, n_samples), dtype=int)
ordered_indices = np.arange(n_samples + 1)
inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:]
ranks = (
inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors
)
t = np.sum(ranks[ranks > 0])
t = 1.0 - t * (
2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))
)
return t
|
31,333 |
def prepare_hr_for_alerts(alerts: List[Dict[str, Any]], header: str) -> str:
"""
Prepare the Human readable info for alerts command.
:param alerts: The alerts data.
:param header: Header of the hr.
:return: Human readable.
"""
hr_list = []
for record in alerts:
hr_record = {
'Alert ID': record.get('alertId', ''),
'Create Time': record.get('createTime', ''),
'Update Time': record.get('updateTime', ''),
'Alert Type': record.get('type', ''),
'Source': record.get('source', ''),
'Severity': record.get('metadata', {}).get('severity', ''),
'Status': record.get('metadata', {}).get('status', '')
}
hr_list.append(hr_record)
return tableToMarkdown(header, hr_list, ['Alert ID', 'Alert Type', 'Source', 'Severity', 'Status', 'Create Time',
'Update Time'], removeNull=True)
|
def prepare_hr_for_alerts(alerts: List[Dict[str, Any]], header: str) -> str:
"""
Prepare the Human readable info for alerts command.
:param alerts: The alerts data.
:param header: Header of the hr table.
:return: Human readable.
"""
hr_list = []
for record in alerts:
hr_record = {
'Alert ID': record.get('alertId', ''),
'Create Time': record.get('createTime', ''),
'Update Time': record.get('updateTime', ''),
'Alert Type': record.get('type', ''),
'Source': record.get('source', ''),
'Severity': record.get('metadata', {}).get('severity', ''),
'Status': record.get('metadata', {}).get('status', '')
}
hr_list.append(hr_record)
return tableToMarkdown(header, hr_list, ['Alert ID', 'Alert Type', 'Source', 'Severity', 'Status', 'Create Time',
'Update Time'], removeNull=True)
|
43,662 |
def meanfield(
name, geometry, charge=0, mult=1, basis="sto-3g", package="pyscf", outpath="."
): # pylint: disable=too-many-arguments
r"""Generates a file from which the mean field electronic structure
of the molecule can be retrieved.
This function uses OpenFermion-PySCF and OpenFermion-Psi4 plugins to
perform the Hartree-Fock (HF) calculation for the polyatomic system using the quantum
chemistry packages ``PySCF`` and ``Psi4``, respectively. The mean field electronic
structure is saved in an hdf5-formatted file in the directory
``os.path.join(outpath, package, basis)``.
Args:
name (str): String used to label the molecule
geometry (list): List containing the symbol and Cartesian coordinates for each atom
charge (int): Net charge of the system
mult (int): Spin multiplicity :math:`\mathrm{mult}=N_\mathrm{unpaired} + 1` for
:math:`N_\mathrm{unpaired}` unpaired electrons occupying the HF orbitals.
Possible values for ``mult`` are :math:`1, 2, 3, \ldots`. If not specified,
a closed-shell HF state is assumed.
basis (str): Atomic basis set used to represent the HF orbitals. Basis set
availability per element can be found
`here <www.psicode.org/psi4manual/master/basissets_byelement.html#apdx-basiselement>`_
package (str): Quantum chemistry package used to solve the Hartree-Fock equations.
Either ``'pyscf'`` or ``'psi4'`` can be used.
outpath (str): Path to output directory
Returns:
str: full path to the file containing the mean field electronic structure
**Example**
>>> name = 'h2'
>>> geometry = [['H', (0.0, 0.0, -0.35)], ['H', (0.0, 0.0, 0.35)]]
>>> meanfield(name, geometry)
./pyscf/sto-3g/h2
"""
package = package.strip().lower()
if package not in ("psi4", "pyscf"):
error_message = (
"Integration with quantum chemistry package '{}' is not available. \n Please set"
" 'package' to 'pyscf' or 'psi4'.".format(package)
)
raise TypeError(error_message)
package_dir = os.path.join(outpath.strip(), package)
basis_dir = os.path.join(package_dir, basis.strip())
if not os.path.isdir(package_dir):
os.mkdir(package_dir)
os.mkdir(basis_dir)
elif not os.path.isdir(basis_dir):
os.mkdir(basis_dir)
path_to_file = os.path.join(basis_dir, name.strip())
molecule = MolecularData(geometry, basis, mult, charge, filename=path_to_file)
if package == "psi4":
run_psi4(molecule, run_scf=1, verbose=0, tolerate_error=1)
if package == "pyscf":
run_pyscf(molecule, run_scf=1, verbose=0)
return path_to_file
|
def meanfield(
name, geometry, charge=0, mult=1, basis="sto-3g", package="pyscf", outpath="."
): # pylint: disable=too-many-arguments
r"""Generates a file from which the mean field electronic structure
of the molecule can be retrieved.
This function uses OpenFermion-PySCF and OpenFermion-Psi4 plugins to
perform the Hartree-Fock (HF) calculation for the polyatomic system using the quantum
chemistry packages ``PySCF`` and ``Psi4``, respectively. The mean field electronic
structure is saved in an hdf5-formatted file in the directory
``os.path.join(outpath, package, basis)``.
Args:
name (str): molecule label
geometry (list): List containing the symbol and Cartesian coordinates for each atom
charge (int): Net charge of the system
mult (int): Spin multiplicity :math:`\mathrm{mult}=N_\mathrm{unpaired} + 1` for
:math:`N_\mathrm{unpaired}` unpaired electrons occupying the HF orbitals.
Possible values for ``mult`` are :math:`1, 2, 3, \ldots`. If not specified,
a closed-shell HF state is assumed.
basis (str): Atomic basis set used to represent the HF orbitals. Basis set
availability per element can be found
`here <www.psicode.org/psi4manual/master/basissets_byelement.html#apdx-basiselement>`_
package (str): Quantum chemistry package used to solve the Hartree-Fock equations.
Either ``'pyscf'`` or ``'psi4'`` can be used.
outpath (str): Path to output directory
Returns:
str: full path to the file containing the mean field electronic structure
**Example**
>>> name = 'h2'
>>> geometry = [['H', (0.0, 0.0, -0.35)], ['H', (0.0, 0.0, 0.35)]]
>>> meanfield(name, geometry)
./pyscf/sto-3g/h2
"""
package = package.strip().lower()
if package not in ("psi4", "pyscf"):
error_message = (
"Integration with quantum chemistry package '{}' is not available. \n Please set"
" 'package' to 'pyscf' or 'psi4'.".format(package)
)
raise TypeError(error_message)
package_dir = os.path.join(outpath.strip(), package)
basis_dir = os.path.join(package_dir, basis.strip())
if not os.path.isdir(package_dir):
os.mkdir(package_dir)
os.mkdir(basis_dir)
elif not os.path.isdir(basis_dir):
os.mkdir(basis_dir)
path_to_file = os.path.join(basis_dir, name.strip())
molecule = MolecularData(geometry, basis, mult, charge, filename=path_to_file)
if package == "psi4":
run_psi4(molecule, run_scf=1, verbose=0, tolerate_error=1)
if package == "pyscf":
run_pyscf(molecule, run_scf=1, verbose=0)
return path_to_file
|
7,547 |
def get_pkg_data_path(*path, package=None):
"""Make path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings -- for ``os.path`` intelligent path joining.
package : str, optional, keyword only
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, *path)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return path
|
def get_pkg_data_path(*path, package=None):
"""Make path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings -- for ``os.path`` intelligent path joining.
package : str, optional, keyword only
If specified, look for a file relative to the given package, rather
than the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, *path)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return path
|
22,932 |
def summarize_file(file_path, num_head_lines, num_tail_lines, max_line_length, truncation_text):
"""
Summarizes the file at the given path, returning a string containing the
given numbers of lines from beginning and end of the file. If the file needs
to be truncated, places truncation_text at the truncation point.
Unlike other methods, this method treats everything as Unicodes string.
"""
assert num_head_lines > 0 or num_tail_lines > 0
def ensure_ends_with_newline(lines, remove_line_without_newline=False):
if lines and not lines[-1].endswith('\n'):
if remove_line_without_newline:
lines.pop()
else:
lines[-1] += '\n'
file_size = os.stat(file_path).st_size
with open(file_path) as fileobj:
if file_size > (num_head_lines + num_tail_lines) * max_line_length:
if num_head_lines > 0:
# To ensure that the last line is a whole line, we remove the
# last line if it doesn't have a newline character.
head_lines = fileobj.read(num_head_lines * max_line_length).splitlines(True)[
:num_head_lines
]
ensure_ends_with_newline(head_lines, remove_line_without_newline=True)
if num_tail_lines > 0:
# To ensure that the first line is a whole line, we read an
# extra character and always remove the first line. If the first
# character is a newline, then the first line will just be
# empty and the second line is a whole line. If the first
# character is not a new line, then the first line, had we not
# read the extra character, would not be a whole line. Thus, it
# should also be dropped.
fileobj.seek(file_size - num_tail_lines * max_line_length - 1, os.SEEK_SET)
tail_lines = fileobj.read(num_tail_lines * max_line_length).splitlines(True)[1:][
-num_tail_lines:
]
ensure_ends_with_newline(tail_lines)
if num_head_lines > 0 and num_tail_lines > 0:
lines = head_lines + [truncation_text] + tail_lines
elif num_head_lines > 0:
lines = head_lines
else:
lines = tail_lines
else:
lines = fileobj.readlines()
ensure_ends_with_newline(lines)
if len(lines) > num_head_lines + num_tail_lines:
if num_head_lines > 0 and num_tail_lines > 0:
lines = lines[:num_head_lines] + [truncation_text] + lines[-num_tail_lines:]
elif num_head_lines > 0:
lines = lines[:num_head_lines]
else:
lines = lines[-num_tail_lines:]
return ''.join(lines)
|
def summarize_file(file_path, num_head_lines, num_tail_lines, max_line_length, truncation_text):
"""
Summarizes the file at the given path, returning a string containing the
given numbers of lines from beginning and end of the file. If the file needs
to be truncated, places truncation_text at the truncation point.
Unlike other methods, this method treats everything as a Unicode string.
"""
assert num_head_lines > 0 or num_tail_lines > 0
def ensure_ends_with_newline(lines, remove_line_without_newline=False):
if lines and not lines[-1].endswith('\n'):
if remove_line_without_newline:
lines.pop()
else:
lines[-1] += '\n'
file_size = os.stat(file_path).st_size
with open(file_path) as fileobj:
if file_size > (num_head_lines + num_tail_lines) * max_line_length:
if num_head_lines > 0:
# To ensure that the last line is a whole line, we remove the
# last line if it doesn't have a newline character.
head_lines = fileobj.read(num_head_lines * max_line_length).splitlines(True)[
:num_head_lines
]
ensure_ends_with_newline(head_lines, remove_line_without_newline=True)
if num_tail_lines > 0:
# To ensure that the first line is a whole line, we read an
# extra character and always remove the first line. If the first
# character is a newline, then the first line will just be
# empty and the second line is a whole line. If the first
# character is not a new line, then the first line, had we not
# read the extra character, would not be a whole line. Thus, it
# should also be dropped.
fileobj.seek(file_size - num_tail_lines * max_line_length - 1, os.SEEK_SET)
tail_lines = fileobj.read(num_tail_lines * max_line_length).splitlines(True)[1:][
-num_tail_lines:
]
ensure_ends_with_newline(tail_lines)
if num_head_lines > 0 and num_tail_lines > 0:
lines = head_lines + [truncation_text] + tail_lines
elif num_head_lines > 0:
lines = head_lines
else:
lines = tail_lines
else:
lines = fileobj.readlines()
ensure_ends_with_newline(lines)
if len(lines) > num_head_lines + num_tail_lines:
if num_head_lines > 0 and num_tail_lines > 0:
lines = lines[:num_head_lines] + [truncation_text] + lines[-num_tail_lines:]
elif num_head_lines > 0:
lines = lines[:num_head_lines]
else:
lines = lines[-num_tail_lines:]
return ''.join(lines)
|
41,197 |
def _to_cliiford_tableau(
rotation_map: Optional[Dict[Pauli, PauliTransform]] = None,
*,
x_to: Optional[PauliTransform] = None,
z_to: Optional[PauliTransform] = None,
) -> qis.CliffordTableau:
"""Transfer the rotation map to clifford tableau representation"""
if x_to is None and z_to is None and rotation_map is None:
# coverage: ignore
raise ValueError(
"The function either takes rotation_map or a combination "
' of x_to and z_to but none were given.'
)
elif rotation_map is not None:
x_to = rotation_map[pauli_gates.X]
z_to = rotation_map[pauli_gates.Z]
else:
assert x_to is not None and z_to is not None, "Both x_to and z_to have to be provided."
clifford_tableau = qis.CliffordTableau(num_qubits=1)
clifford_tableau.xs[0, 0] = x_to.to in (pauli_gates.X, pauli_gates.Y)
clifford_tableau.zs[0, 0] = x_to.to in (pauli_gates.Y, pauli_gates.Z)
clifford_tableau.xs[1, 0] = z_to.to in (pauli_gates.X, pauli_gates.Y)
clifford_tableau.zs[1, 0] = z_to.to in (pauli_gates.Y, pauli_gates.Z)
clifford_tableau.rs = (x_to.flip, z_to.flip)
return clifford_tableau
|
def _to_clifford_tableau(
rotation_map: Optional[Dict[Pauli, PauliTransform]] = None,
*,
x_to: Optional[PauliTransform] = None,
z_to: Optional[PauliTransform] = None,
) -> qis.CliffordTableau:
"""Transfer the rotation map to clifford tableau representation"""
if x_to is None and z_to is None and rotation_map is None:
# coverage: ignore
raise ValueError(
"The function either takes rotation_map or a combination "
' of x_to and z_to but none were given.'
)
elif rotation_map is not None:
x_to = rotation_map[pauli_gates.X]
z_to = rotation_map[pauli_gates.Z]
else:
assert x_to is not None and z_to is not None, "Both x_to and z_to have to be provided."
clifford_tableau = qis.CliffordTableau(num_qubits=1)
clifford_tableau.xs[0, 0] = x_to.to in (pauli_gates.X, pauli_gates.Y)
clifford_tableau.zs[0, 0] = x_to.to in (pauli_gates.Y, pauli_gates.Z)
clifford_tableau.xs[1, 0] = z_to.to in (pauli_gates.X, pauli_gates.Y)
clifford_tableau.zs[1, 0] = z_to.to in (pauli_gates.Y, pauli_gates.Z)
clifford_tableau.rs = (x_to.flip, z_to.flip)
return clifford_tableau
|
36,988 |
def _server_accepts_artifact_path() -> bool:
# Note on Version:
# * On 04/05/2022 the UI was modified to accept artifact_paths in the media
# panel:
# https://github.com/wandb/core/commit/341e05a9335a75cc306f74c081ed345cd369636a
# * At the time, the max_cli_version was set to 0.12.11:
# https://github.com/wandb/core/blob/341e05a9335a75cc306f74c081ed345cd369636a/services/gorilla/deploy/gorilla/values.yaml#L59
# * On the same day (04/05/2022) we made a custom release for a customer
# (0.9.50-[CUSTOMER]):
# https://hub.docker.com/r/wandb/local-dev/tags?page=1&name=0.9.50
# * As of this writing (04/15/2022), the max_cli_version is set to 0.12.14,
# which will be set in local version >= 0.9.51:
# https://github.com/wandb/core/commit/b10b86888118249733b6eaa874b121591820a359
#
# Therefore, setting this version to 0.12.14 is the most accurate, however
# this will exclude the target customer. Therefore, setting this version to
# 0.12.11 is an acceptable compromise as it will only be incorrect for
# customers on local version 0.9.50. Customers on <=0.9.49 will correctly
# return false, and customers on >=0.9.51 will correctly return true.
#
# After the target customer re-updates local, we can bump this to 0.12.14
# safely.
target_version = "0.12.11"
max_cli_version = util._get_max_cli_version() if not util._is_offline() else None
return max_cli_version is not None and parse_version(
target_version
) <= parse_version(max_cli_version)
|
def _server_accepts_artifact_path() -> bool:
target_version = "0.12.14"
max_cli_version = util._get_max_cli_version() if not util._is_offline() else None
return max_cli_version is not None and parse_version(
target_version
) <= parse_version(max_cli_version)
|
57,957 |
def get_sendmail_instances():
"""
Get the enabled instances that has send-mail command
:rtype: ``list``
:return: list of enabled instances
"""
integration_commands = get_all_integrations_commands()
# if we only want to search enabled integrations, we must fetch that list from another API
integration_instances = get_all_instances()
integration_instances_enabled: Dict[str, list] = dict()
for integration in integration_instances:
if integration['enabled'] == 'true':
if not integration_instances_enabled.get(integration['brand']):
integration_instances_enabled[integration['brand']] = []
integration_instances_enabled[integration['brand']].append(integration['name'])
integrations_that_send_mail = []
for integration in integration_commands:
integration_name = integration['name'] # integration brand name
if 'commands' in integration:
for command in integration['commands']:
if command['name'] == 'send-mail':
if integration_name in integration_instances_enabled.keys():
integrations_that_send_mail.extend(integration_instances_enabled[integration_name])
if len(integrations_that_send_mail) == 0:
return []
else:
return integrations_that_send_mail
|
def get_sendmail_instances():
"""
Get the enabled instances that has send-mail command
:rtype: ``list``
:return: list of enabled instances
"""
integration_commands = get_all_integrations_commands()
# if we only want to search enabled integrations, we must fetch that list from another API
integration_instances = get_all_instances()
integration_instances_enabled: Dict[str, list] = dict()
for instance in integration_instances:
if instance['enabled'] == 'true':
if not integration_instances_enabled.get(instance['brand']):
integration_instances_enabled[instance['brand']] = []
integration_instances_enabled[integration['brand']].append(instance['name'])
integrations_that_send_mail = []
for integration in integration_commands:
integration_name = integration['name'] # integration brand name
if 'commands' in integration:
for command in integration['commands']:
if command['name'] == 'send-mail':
if integration_name in integration_instances_enabled.keys():
integrations_that_send_mail.extend(integration_instances_enabled[integration_name])
if len(integrations_that_send_mail) == 0:
return []
else:
return integrations_that_send_mail
|
8,357 |
def _isophote_list_to_table(isophote_list, key_properties=['main']):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or \
`~photutils.isophote.IsophoteList` instance
A list of isophotes.
key_properties : A list of properties to export from the isophote_list
If key_properties = ['all'] or ['main'], it will pick all or few
of the main properties.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the selected or all isophote parameters.
"""
properties = OrderedDict()
isotable = QTable()
# main_properties: `List`
# A list of main parameters matching the original names of
# the isophote_list parameters
def __rename_properties(properties,
orig_names = ['int_err', 'eps', 'ellip_err',
'grad_r_error', 'nflag'],
new_names = ['intens_err', 'ellipticity',
'ellipticity_err', 'grad_rerror',
'nflag']
):
'''
Simple renaming for some of the isophote_list parameters.
Parameters
----------
properties: `OrderedDict`
An OrderedDict with the list of the isophote_list parameters
orig_names: `List`
A list of original names in the isophote_list parameters
to be renamed
new_names: `List`
A list of new names matching in length of the orig_names
Returns
-------
properties: `OrderedDict`
An OrderedDict with the list of the renamed isophote_list
parameters
'''
main_properties = ['sma', 'intens', 'int_err', 'eps', 'ellip_err',
'pa', 'pa_err', 'grad', 'grad_error',
'grad_r_error', 'x0', 'x0_err', 'y0', 'y0_err',
'ndata', 'nflag', 'niter', 'stop_code']
for an_item in main_properties:
if an_item in orig_names:
properties[an_item] = new_names[orig_names.index(an_item)]
else:
properties[an_item] = an_item
return properties
if 'all' in key_properties:
properties = _get_properties(isophote_list)
properties = __rename_properties(properties)
elif 'main' in key_properties:
properties = __rename_properties(properties)
else:
for an_item in key_properties:
properties[an_item] = an_item
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
|
def _isophote_list_to_table(isophote_list, columns='main'):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or \
`~photutils.isophote.IsophoteList` instance
A list of isophotes.
key_properties : A list of properties to export from the isophote_list
If key_properties = ['all'] or ['main'], it will pick all or few
of the main properties.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the selected or all isophote parameters.
"""
properties = OrderedDict()
isotable = QTable()
# main_properties: `List`
# A list of main parameters matching the original names of
# the isophote_list parameters
def __rename_properties(properties,
orig_names = ['int_err', 'eps', 'ellip_err',
'grad_r_error', 'nflag'],
new_names = ['intens_err', 'ellipticity',
'ellipticity_err', 'grad_rerror',
'nflag']
):
'''
Simple renaming for some of the isophote_list parameters.
Parameters
----------
properties: `OrderedDict`
An OrderedDict with the list of the isophote_list parameters
orig_names: `List`
A list of original names in the isophote_list parameters
to be renamed
new_names: `List`
A list of new names matching in length of the orig_names
Returns
-------
properties: `OrderedDict`
An OrderedDict with the list of the renamed isophote_list
parameters
'''
main_properties = ['sma', 'intens', 'int_err', 'eps', 'ellip_err',
'pa', 'pa_err', 'grad', 'grad_error',
'grad_r_error', 'x0', 'x0_err', 'y0', 'y0_err',
'ndata', 'nflag', 'niter', 'stop_code']
for an_item in main_properties:
if an_item in orig_names:
properties[an_item] = new_names[orig_names.index(an_item)]
else:
properties[an_item] = an_item
return properties
if 'all' in key_properties:
properties = _get_properties(isophote_list)
properties = __rename_properties(properties)
elif 'main' in key_properties:
properties = __rename_properties(properties)
else:
for an_item in key_properties:
properties[an_item] = an_item
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
|
20,596 |
def init_envvars() -> None:
"""Initialize environment variables which need to be set early."""
if objects.backend == usertypes.Backend.QtWebEngine:
software_rendering = config.val.qt.force_software_rendering
if software_rendering == 'software-opengl':
os.environ['QT_XCB_FORCE_SOFTWARE_OPENGL'] = '1'
elif software_rendering == 'qt-quick':
os.environ['QT_QUICK_BACKEND'] = 'software'
elif software_rendering == 'chromium':
os.environ['QT_WEBENGINE_DISABLE_NOUVEAU_WORKAROUND'] = '1'
else:
assert objects.backend == usertypes.Backend.QtWebKit, objects.backend
if config.val.qt.force_platform is not None:
os.environ['QT_QPA_PLATFORM'] = config.val.qt.force_platform
if config.val.qt.force_platformtheme is not None:
os.environ['QT_QPA_PLATFORMTHEME'] = config.val.qt.force_platformtheme
if config.val.window.hide_decoration:
os.environ['QT_WAYLAND_DISABLE_WINDOWDECORATION'] = '1'
if config.val.qt.highdpi:
env_var = ('QT_ENABLE_HIGHDPI_SCALING'
if qtutils.version_check('5.14', compiled=False)
else 'QT_AUTO_SCREEN_SCALE_FACTOR')
os.environ[env_var] = '1'
for var in config.val.qt.environ:
val = config.val.qt.environ[var]
if val == 'None':
os.environ[var] = ''
else:
os.environ[var] = val
|
def init_envvars() -> None:
"""Initialize environment variables which need to be set early."""
if objects.backend == usertypes.Backend.QtWebEngine:
software_rendering = config.val.qt.force_software_rendering
if software_rendering == 'software-opengl':
os.environ['QT_XCB_FORCE_SOFTWARE_OPENGL'] = '1'
elif software_rendering == 'qt-quick':
os.environ['QT_QUICK_BACKEND'] = 'software'
elif software_rendering == 'chromium':
os.environ['QT_WEBENGINE_DISABLE_NOUVEAU_WORKAROUND'] = '1'
else:
assert objects.backend == usertypes.Backend.QtWebKit, objects.backend
if config.val.qt.force_platform is not None:
os.environ['QT_QPA_PLATFORM'] = config.val.qt.force_platform
if config.val.qt.force_platformtheme is not None:
os.environ['QT_QPA_PLATFORMTHEME'] = config.val.qt.force_platformtheme
if config.val.window.hide_decoration:
os.environ['QT_WAYLAND_DISABLE_WINDOWDECORATION'] = '1'
if config.val.qt.highdpi:
env_var = ('QT_ENABLE_HIGHDPI_SCALING'
if qtutils.version_check('5.14', compiled=False)
else 'QT_AUTO_SCREEN_SCALE_FACTOR')
os.environ[env_var] = '1'
for var in config.val.qt.environ:
val = config.val.qt.environ[var]
if val is None:
os.environ[var] = ''
else:
os.environ[var] = val
|
42,900 |
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
.. code-block:: python
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
13,518 |
def test_define_update_ingored_non_enum():
def_str = "INT 0 100"
define = canmatrix.canmatrix.Define(def_str)
define.update()
assert define.definition == def_str
|
def test_define_update_ingored_non_enum():
def_str = "INT 0 100"
define = canmatrix.canmatrix.Define(def_str)
define.update()
assert define.definition == def_str
|
32,239 |
def install_software(topology: Topology, version: str,
device_filter_string: str = None, sync: bool = False) -> InstallSoftwareCommandResult:
"""
Install the given software version onto the device. Download the software first with
pan-os-platform-download-software
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only install to specific devices or serial numbers
:param version: software version to upgrade to, ex. 9.1.2
:param sync: If provided, runs the download synchronously - make sure 'execution-timeout' is increased.
"""
if sync == "false":
sync = False
result: InstallSoftwareCommandResult = UniversalCommand.install_software(topology, version,
device_filter_str=device_filter_string,
sync=sync)
return result
|
def install_software(topology: Topology, version: str,
device_filter_string: str = None, sync: bool = False) -> InstallSoftwareCommandResult:
"""
Install the given software version onto the device. Download the software first with
pan-os-platform-download-software
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only install to specific devices or serial numbers
:param version: software version to upgrade to, ex. 9.1.2
:param sync: If provided, runs the download synchronously - make sure 'execution-timeout' is increased.
"""
_sync = argToBoolean(sync)
result: InstallSoftwareCommandResult = UniversalCommand.install_software(topology, version,
device_filter_str=device_filter_string,
sync=sync)
return result
|
31,586 |
def get_whois_history_command(client, args):
hostname = args.get('hostname')
page = int(args.get('page', 1))
res = client.get_whois_history(hostname=hostname, page=page)
readable_output = tableToMarkdown(f"WHOIS history for {hostname}:", res.get('items'))
command_results = CommandResults(
outputs_prefix=f"SecurityTrails.Domain",
outputs_key_field="name",
outputs={
"name": hostname,
f"whois_history": res.get('items', []),
"whois_history_count": res.get('count', 0)
},
readable_output=readable_output
)
return_results(command_results)
domain_data = {
"Name": hostname
}
contacts = res.get('items', [])[0].get('contact') if res.get('items', None) else []
admin_contact = [x for x in contacts if x.get('type', None) == "administrativeContact"]
admin_contact = admin_contact[0] if admin_contact else None
registrant_contact = [x for x in contacts if x.get('type', None) == "registrant"]
registrant_contact = registrant_contact[0] if registrant_contact else None
registrar_contact = admin_contact if admin_contact else None
whois_objects = list()
for x in res.get('items', []):
whois_object = {
"DomainStatus": ", ".join(x.get('status', [])),
"NameServers": ", ".join(x.get('nameServers', [])),
"CreationDate": datetime.fromtimestamp((x.get('createdDate') / 1000)).strftime("%Y-%m-%dT%H:%M:%SZ") if x.get('createdDate', None) else None,
"UpdatedDate": datetime.fromtimestamp((x.get('updatedDate') / 1000)).strftime("%Y-%m-%dT%H:%M:%SZ") if x.get('updatedDate', None) else None,
"ExpirationDate": datetime.fromtimestamp((x.get('expiresDate') / 1000)).strftime("%Y-%m-%dT%H:%M:%SZ") if x.get('expiresDate', None) else None
}
if admin_contact:
whois_object['Admin'] = {
"Name": admin_contact.get('name'),
"Email": admin_contact.get('email'),
"Phone": admin_contact.get('telephone')
}
if registrant_contact:
whois_object['Registrant'] = {
"Name": registrant_contact.get('name'),
"Email": registrant_contact.get('email'),
"Phone": registrant_contact.get('telephone')
}
if registrar_contact:
whois_object['Registrar'] = {
"Name": registrar_contact.get('name'),
"Email": registrar_contact.get('email'),
"Phone": registrar_contact.get('telephone')
}
whois_objects.append(whois_object)
if len(whois_objects) > 0:
domain_data['WHOIS/History'] = whois_objects
create_standard_domain_context(domain_data=domain_data)
|
def get_whois_history_command(client, args):
hostname = args.get('hostname')
page = int(args.get('page', 1))
res = client.get_whois_history(hostname=hostname, page=page)
readable_output = tableToMarkdown(f"WHOIS history for {hostname}:", res.get('items'))
command_results = CommandResults(
outputs_prefix="SecurityTrails.Domain",
outputs_key_field="name",
outputs={
"name": hostname,
f"whois_history": res.get('items', []),
"whois_history_count": res.get('count', 0)
},
readable_output=readable_output
)
return_results(command_results)
domain_data = {
"Name": hostname
}
contacts = res.get('items', [])[0].get('contact') if res.get('items', None) else []
admin_contact = [x for x in contacts if x.get('type', None) == "administrativeContact"]
admin_contact = admin_contact[0] if admin_contact else None
registrant_contact = [x for x in contacts if x.get('type', None) == "registrant"]
registrant_contact = registrant_contact[0] if registrant_contact else None
registrar_contact = admin_contact if admin_contact else None
whois_objects = list()
for x in res.get('items', []):
whois_object = {
"DomainStatus": ", ".join(x.get('status', [])),
"NameServers": ", ".join(x.get('nameServers', [])),
"CreationDate": datetime.fromtimestamp((x.get('createdDate') / 1000)).strftime("%Y-%m-%dT%H:%M:%SZ") if x.get('createdDate', None) else None,
"UpdatedDate": datetime.fromtimestamp((x.get('updatedDate') / 1000)).strftime("%Y-%m-%dT%H:%M:%SZ") if x.get('updatedDate', None) else None,
"ExpirationDate": datetime.fromtimestamp((x.get('expiresDate') / 1000)).strftime("%Y-%m-%dT%H:%M:%SZ") if x.get('expiresDate', None) else None
}
if admin_contact:
whois_object['Admin'] = {
"Name": admin_contact.get('name'),
"Email": admin_contact.get('email'),
"Phone": admin_contact.get('telephone')
}
if registrant_contact:
whois_object['Registrant'] = {
"Name": registrant_contact.get('name'),
"Email": registrant_contact.get('email'),
"Phone": registrant_contact.get('telephone')
}
if registrar_contact:
whois_object['Registrar'] = {
"Name": registrar_contact.get('name'),
"Email": registrar_contact.get('email'),
"Phone": registrar_contact.get('telephone')
}
whois_objects.append(whois_object)
if len(whois_objects) > 0:
domain_data['WHOIS/History'] = whois_objects
create_standard_domain_context(domain_data=domain_data)
|
26,039 |
def load_command_table(self, _):
security_secure_scores_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecureScoresOperations.{}',
client_factory=cf_security_secure_scores
)
security_secure_score_controls_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecureScoreControlsOperations.{}',
client_factory=cf_security_secure_score_controls
)
security_secure_score_control_definitions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecureScoreControlDefinitionsOperations.{}',
client_factory=cf_security_secure_score_control_definitions
)
security_regulatory_compliance_standards_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceStandardsOperations.{}',
client_factory=cf_security_regulatory_compliance_standards
)
security_regulatory_compliance_controls_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceControlsOperations.{}',
client_factory=cf_security_regulatory_compliance_control
)
security_regulatory_compliance_assessment_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceAssessmentsOperations.{}',
client_factory=cf_security_regulatory_compliance_assessment
)
security_tasks_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#TasksOperations.{}',
client_factory=cf_security_tasks,
operation_group='security_tasks'
)
security_alerts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AlertsOperations.{}',
client_factory=cf_security_alerts,
operation_group='security_alerts'
)
security_alerts_suppression_rule_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AlertsSuppressionRulesOperations.{}',
client_factory=cf_security_alerts_suppression_rule,
operation_group='security_alerts_suppression_rule'
)
security_settings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SettingsOperations.{}',
client_factory=cf_security_settings,
operation_group='security_settings'
)
security_contacts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecurityContactsOperations.{}',
client_factory=cf_security_contacts,
operation_group='security_contacts'
)
security_auto_provisioning_settings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AutoProvisioningSettingsOperations.{}',
client_factory=cf_security_auto_provisioning_settings,
operation_group='security_auto_provisioning_settings'
)
security_discovered_security_solutions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#DiscoveredSecuritySolutionsOperations.{}',
client_factory=cf_security_discovered_security_solutions,
operation_group='security_discovered_security_solutions'
)
security_external_security_solutions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#ExternalSecuritySolutionsOperations.{}',
client_factory=cf_security_external_security_solutions,
operation_group='security_external_security_solutions'
)
security_jit_network_access_policies_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#JitNetworkAccessPoliciesOperations.{}',
client_factory=cf_security_jit_network_access_policies,
operation_group='security_jit_network_access_policies'
)
security_locations_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#LocationsOperations.{}',
client_factory=cf_security_locations,
operation_group='security_locations'
)
security_pricings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#PricingsOperations.{}',
client_factory=cf_security_pricings,
operation_group='security_pricings'
)
security_topology_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#TopologyOperations.{}',
client_factory=cf_security_topology,
operation_group='security_topology'
)
security_workspace_settings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#WorkspaceSettingsOperations.{}',
client_factory=cf_security_workspace_settings,
operation_group='security_workspace_settings'
)
security_advanced_threat_protection_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AdvancedThreatProtectionOperations.{}',
client_factory=cf_security_advanced_threat_protection
)
security_sql_vulnerability_assessment_scans_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentScansOperations.{}',
client_factory=cf_sql_vulnerability_assessment_scans
)
security_sql_vulnerability_assessment_results_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentScanResultsOperations.{}',
client_factory=cf_sql_vulnerability_assessment_results
)
security_sql_vulnerability_assessment_baseline_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentBaselineRulesOperations.{}',
client_factory=cf_sql_vulnerability_assessment_baseline
)
security_assessment_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AssessmentsOperations.{}',
client_factory=cf_security_assessment
)
security_assessment_metadata_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AssessmentMetadataOperations.{}',
client_factory=cf_security_assessment_metadata
)
security_sub_assessment_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SubAssessmentsOperations.{}',
client_factory=cf_security_sub_assessment
)
security_adaptive_application_controls_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AdaptiveApplicationControlsOperations.{}',
client_factory=cf_security_adaptive_application_controls,
operation_group='cf_security_adaptive_application_controls'
)
security_adaptive_network_hardenings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AdaptiveNetworkhardeningsOperations.{}',
client_factory=cf_security_adaptive_network_hardenings,
operation_group='security_adaptive_network_hardenings'
)
security_allowed_connections_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AllowedConnectionsOperations.{}',
client_factory=cf_security_allowed_connections,
operation_group='security_allowed_connections'
)
security_iot_solution_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#IotSolutionOperations.{}',
client_factory=cf_security_iot_solution
)
security_iot_analytics_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#IotAnalyticsOperations.{}',
client_factory=cf_security_iot_analytics
)
security_iot_alerts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#IotAlertsOperations.{}',
client_factory=cf_security_iot_alerts
)
security_iot_recommendations_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#IotRecommendationsOperations.{}',
client_factory=cf_security_iot_recommendations
)
security_automations_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AutomationsOperations.{}',
client_factory=cf_security_automations,
operation_group='security_automations'
)
security_solutions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecuritySolutionsOperations.{}',
client_factory=cf_security_security_solutions
)
with self.command_group('security secure-scores',
security_secure_scores_sdk,
client_factory=cf_security_secure_scores) as g:
g.custom_command('list', 'list_secure_scores')
g.custom_show_command('show', 'get_secure_score')
with self.command_group('security secure-score-controls',
security_secure_score_controls_sdk,
client_factory=cf_security_secure_score_controls) as g:
g.custom_command('list', 'list_secure_score_controls')
g.custom_show_command('list_by_score', 'list_by_score')
with self.command_group('security secure-score-control-definitions',
security_secure_score_control_definitions_sdk,
client_factory=cf_security_secure_score_control_definitions) as g:
g.custom_command('list', 'list_secure_score_control_definitions')
with self.command_group('security regulatory-compliance-standards',
security_regulatory_compliance_standards_sdk,
client_factory=cf_security_regulatory_compliance_standards) as g:
g.custom_command('list', 'list_regulatory_compliance_standards')
g.custom_show_command('show', 'get_regulatory_compliance_standard')
with self.command_group('security regulatory-compliance-controls',
security_regulatory_compliance_controls_sdk,
client_factory=cf_security_regulatory_compliance_control) as g:
g.custom_command('list', 'list_regulatory_compliance_controls')
g.custom_show_command('show', 'get_regulatory_compliance_control')
with self.command_group('security regulatory-compliance-assessments',
security_regulatory_compliance_assessment_sdk,
client_factory=cf_security_regulatory_compliance_assessment) as g:
g.custom_command('list', 'list_regulatory_compliance_assessments')
g.custom_show_command('show', 'get_regulatory_compliance_assessment')
with self.command_group('security task',
security_tasks_sdk,
client_factory=cf_security_tasks) as g:
g.custom_command('list', 'list_security_tasks')
g.custom_show_command('show', 'get_security_task')
with self.command_group('security alerts-suppression-rule',
security_alerts_suppression_rule_sdk,
client_factory=cf_security_alerts_suppression_rule) as g:
g.custom_command('list', 'list_security_alerts_suppression_rule')
g.custom_show_command('show', 'show_security_alerts_suppression_rule')
g.custom_command('delete', 'delete_security_alerts_suppression_rule')
g.custom_command('update', 'update_security_alerts_suppression_rule')
g.custom_command('upsert_scope', 'upsert_security_alerts_suppression_rule_scope')
g.custom_command('delete_scope', 'delete_security_alerts_suppression_rule_scope')
for scope in ['storage', 'cosmosdb']:
with self.command_group(f"security atp {scope}",
security_advanced_threat_protection_sdk,
client_factory=cf_security_advanced_threat_protection) as g:
g.custom_show_command('show', f"get_{scope}_atp_setting")
g.custom_command('update', f"update_{scope}_atp_setting")
with self.command_group('security va sql scans',
security_sql_vulnerability_assessment_scans_sdk,
client_factory=cf_sql_vulnerability_assessment_scans) as g:
g.custom_show_command('show', 'get_va_sql_scan')
g.custom_command('list', 'list_va_sql_scans')
with self.command_group('security va sql results',
security_sql_vulnerability_assessment_results_sdk,
client_factory=cf_sql_vulnerability_assessment_results) as g:
g.custom_show_command('show', 'get_va_sql_result')
g.custom_command('list', 'list_va_sql_results')
with self.command_group('security va sql baseline',
security_sql_vulnerability_assessment_baseline_sdk,
client_factory=cf_sql_vulnerability_assessment_baseline) as g:
g.custom_show_command('show', 'get_va_sql_baseline')
g.custom_command('list', 'list_va_sql_baseline')
g.custom_command('delete', 'delete_va_sql_baseline')
g.custom_command('update', 'update_va_sql_baseline')
g.custom_command('set', 'set_va_sql_baseline')
with self.command_group('security alert',
security_alerts_sdk,
client_factory=cf_security_alerts) as g:
g.custom_command('list', 'list_security_alerts')
g.custom_show_command('show', 'get_security_alert')
g.custom_command('update', 'update_security_alert')
with self.command_group('security setting',
security_settings_sdk,
client_factory=cf_security_settings) as g:
g.custom_command('list', 'list_security_settings')
g.custom_show_command('show', 'get_security_setting')
g.custom_command('update', 'update_security_setting')
with self.command_group('security contact',
security_contacts_sdk,
client_factory=cf_security_contacts) as g:
g.custom_command('list', 'list_security_contacts')
g.custom_show_command('show', 'get_security_contact')
g.custom_command('create', 'create_security_contact')
g.custom_command('delete', 'delete_security_contact')
with self.command_group('security auto-provisioning-setting',
security_auto_provisioning_settings_sdk,
client_factory=cf_security_auto_provisioning_settings) as g:
g.custom_command('list', 'list_security_auto_provisioning_settings')
g.custom_show_command('show', 'get_security_auto_provisioning_setting')
g.custom_command('update', 'update_security_auto_provisioning_setting')
with self.command_group('security discovered-security-solution',
security_discovered_security_solutions_sdk,
client_factory=cf_security_discovered_security_solutions) as g:
g.custom_command('list', 'list_security_discovered_security_solutions')
g.custom_show_command('show', 'get_security_discovered_security_solution')
with self.command_group('security external-security-solution',
security_external_security_solutions_sdk,
client_factory=cf_security_external_security_solutions) as g:
g.custom_command('list', 'list_security_external_security_solutions')
g.custom_show_command('show', 'get_security_external_security_solution')
with self.command_group('security jit-policy',
security_jit_network_access_policies_sdk,
client_factory=cf_security_jit_network_access_policies) as g:
g.custom_command('list', 'list_security_jit_network_access_policies')
g.custom_show_command('show', 'get_security_jit_network_access_policy')
with self.command_group('security location',
security_locations_sdk,
client_factory=cf_security_locations) as g:
g.custom_command('list', 'list_security_locations')
g.custom_show_command('show', 'get_security_location')
with self.command_group('security pricing',
security_pricings_sdk,
client_factory=cf_security_pricings) as g:
g.custom_command('list', 'list_security_pricings')
g.custom_show_command('show', 'get_security_pricing')
g.custom_command('create', 'create_security_pricing')
with self.command_group('security topology',
security_topology_sdk,
client_factory=cf_security_topology) as g:
g.custom_command('list', 'list_security_topology')
g.custom_show_command('show', 'get_security_topology')
with self.command_group('security workspace-setting',
security_workspace_settings_sdk,
client_factory=cf_security_workspace_settings) as g:
g.custom_command('list', 'list_security_workspace_settings')
g.custom_show_command('show', 'get_security_workspace_setting')
g.custom_command('create', 'create_security_workspace_setting')
g.custom_command('delete', 'delete_security_workspace_setting')
with self.command_group('security assessment',
security_assessment_sdk,
client_factory=cf_security_assessment) as g:
g.custom_command('list', 'list_security_assessments')
g.custom_show_command('show', 'get_security_assessment')
g.custom_command('create', 'create_security_assessment')
g.custom_command('delete', 'delete_security_assessment')
with self.command_group('security assessment-metadata',
security_assessment_metadata_sdk,
client_factory=cf_security_assessment_metadata) as g:
g.custom_command('list', 'list_security_assessment_metadata')
g.custom_show_command('show', 'get_security_assessment_metadata')
g.custom_command('create', 'create_security_assessment_metadata')
g.custom_command('delete', 'delete_security_assessment_metadata')
with self.command_group('security sub-assessment',
security_sub_assessment_sdk,
client_factory=cf_security_sub_assessment) as g:
g.custom_command('list', 'list_security_sub_assessments')
g.custom_show_command('show', 'get_security_sub_assessment')
with self.command_group('security adaptive-application-controls',
security_adaptive_application_controls_sdk,
client_factory=cf_security_adaptive_application_controls) as g:
g.custom_command('list', 'list_security_adaptive_application_controls')
g.custom_show_command('show', 'get_security_adaptive_application_controls')
with self.command_group('security adaptive_network_hardenings',
security_adaptive_network_hardenings_sdk,
client_factory=cf_security_adaptive_network_hardenings) as g:
g.custom_show_command('show', 'get_security_adaptive_network_hardenings')
g.custom_command('list', 'list_security_adaptive_network_hardenings')
with self.command_group('security allowed_connections',
security_allowed_connections_sdk,
client_factory=cf_security_allowed_connections) as g:
g.custom_command('list', 'list_security_allowed_connections')
g.custom_show_command('show', 'get_security_allowed_connections')
with self.command_group('security iot-solution',
security_iot_solution_sdk,
client_factory=cf_security_iot_solution) as g:
g.custom_command('list', 'list_security_iot_solution')
g.custom_show_command('show', 'show_security_iot_solution')
g.custom_command('create', 'create_security_iot_solution')
g.custom_command('delete', 'delete_security_iot_solution')
g.custom_command('update', 'update_security_iot_solution')
with self.command_group('security iot-analytics',
security_iot_analytics_sdk,
client_factory=cf_security_iot_analytics) as g:
g.custom_command('list', 'list_security_iot_analytics')
g.custom_show_command('show', 'show_security_iot_analytics')
with self.command_group('security iot-alerts',
security_iot_alerts_sdk,
client_factory=cf_security_iot_alerts) as g:
g.custom_command('list', 'list_security_iot_alerts')
g.custom_show_command('show', 'show_security_iot_alerts')
g.custom_command('delete', 'dismiss_security_iot_alerts')
with self.command_group('security iot-recommendations',
security_iot_recommendations_sdk,
client_factory=cf_security_iot_recommendations) as g:
g.custom_command('list', 'list_security_iot_recommendations')
g.custom_show_command('show', 'show_security_iot_recommendations')
with self.command_group('security automation',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('list', 'list_security_automations')
g.custom_show_command('show', 'get_security_automation')
g.custom_command('delete', 'delete_security_automation')
g.custom_command('create_or_update', 'create_or_update_security_automation')
g.custom_command('validate', 'validate_security_automation')
with self.command_group('security automation-scope',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_scope')
with self.command_group('security automation-rule',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_rule')
with self.command_group('security automation-rule-set',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_rule_set')
with self.command_group('security automation-source',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_source')
with self.command_group('security automation-action-logic-app',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_action_logic_app')
with self.command_group('security automation-action-event-hub',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_action_event_hub')
with self.command_group('security automation-action-workspace',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_action_workspace')
with self.command_group('security security_solutions',
security_solutions_sdk,
client_factory=cf_security_security_solutions) as g:
g.custom_command('list', 'list_security_security_solutions')
with self.command_group('security'):
pass
|
def load_command_table(self, _):
security_secure_scores_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecureScoresOperations.{}',
client_factory=cf_security_secure_scores
)
security_secure_score_controls_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecureScoreControlsOperations.{}',
client_factory=cf_security_secure_score_controls
)
security_secure_score_control_definitions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecureScoreControlDefinitionsOperations.{}',
client_factory=cf_security_secure_score_control_definitions
)
security_regulatory_compliance_standards_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceStandardsOperations.{}',
client_factory=cf_security_regulatory_compliance_standards
)
security_regulatory_compliance_controls_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceControlsOperations.{}',
client_factory=cf_security_regulatory_compliance_control
)
security_regulatory_compliance_assessment_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceAssessmentsOperations.{}',
client_factory=cf_security_regulatory_compliance_assessment
)
security_tasks_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#TasksOperations.{}',
client_factory=cf_security_tasks,
operation_group='security_tasks'
)
security_alerts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AlertsOperations.{}',
client_factory=cf_security_alerts,
operation_group='security_alerts'
)
security_alerts_suppression_rule_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AlertsSuppressionRulesOperations.{}',
client_factory=cf_security_alerts_suppression_rule,
operation_group='security_alerts_suppression_rule'
)
security_settings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SettingsOperations.{}',
client_factory=cf_security_settings,
operation_group='security_settings'
)
security_contacts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecurityContactsOperations.{}',
client_factory=cf_security_contacts,
operation_group='security_contacts'
)
security_auto_provisioning_settings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AutoProvisioningSettingsOperations.{}',
client_factory=cf_security_auto_provisioning_settings,
operation_group='security_auto_provisioning_settings'
)
security_discovered_security_solutions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#DiscoveredSecuritySolutionsOperations.{}',
client_factory=cf_security_discovered_security_solutions,
operation_group='security_discovered_security_solutions'
)
security_external_security_solutions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#ExternalSecuritySolutionsOperations.{}',
client_factory=cf_security_external_security_solutions,
operation_group='security_external_security_solutions'
)
security_jit_network_access_policies_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#JitNetworkAccessPoliciesOperations.{}',
client_factory=cf_security_jit_network_access_policies,
operation_group='security_jit_network_access_policies'
)
security_locations_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#LocationsOperations.{}',
client_factory=cf_security_locations,
operation_group='security_locations'
)
security_pricings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#PricingsOperations.{}',
client_factory=cf_security_pricings,
operation_group='security_pricings'
)
security_topology_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#TopologyOperations.{}',
client_factory=cf_security_topology,
operation_group='security_topology'
)
security_workspace_settings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#WorkspaceSettingsOperations.{}',
client_factory=cf_security_workspace_settings,
operation_group='security_workspace_settings'
)
security_advanced_threat_protection_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AdvancedThreatProtectionOperations.{}',
client_factory=cf_security_advanced_threat_protection
)
security_sql_vulnerability_assessment_scans_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentScansOperations.{}',
client_factory=cf_sql_vulnerability_assessment_scans
)
security_sql_vulnerability_assessment_results_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentScanResultsOperations.{}',
client_factory=cf_sql_vulnerability_assessment_results
)
security_sql_vulnerability_assessment_baseline_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentBaselineRulesOperations.{}',
client_factory=cf_sql_vulnerability_assessment_baseline
)
security_assessment_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AssessmentsOperations.{}',
client_factory=cf_security_assessment
)
security_assessment_metadata_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AssessmentMetadataOperations.{}',
client_factory=cf_security_assessment_metadata
)
security_sub_assessment_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SubAssessmentsOperations.{}',
client_factory=cf_security_sub_assessment
)
security_adaptive_application_controls_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AdaptiveApplicationControlsOperations.{}',
client_factory=cf_security_adaptive_application_controls,
operation_group='cf_security_adaptive_application_controls'
)
security_adaptive_network_hardenings_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AdaptiveNetworkhardeningsOperations.{}',
client_factory=cf_security_adaptive_network_hardenings,
operation_group='security_adaptive_network_hardenings'
)
security_allowed_connections_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AllowedConnectionsOperations.{}',
client_factory=cf_security_allowed_connections,
operation_group='security_allowed_connections'
)
security_iot_solution_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#IotSolutionOperations.{}',
client_factory=cf_security_iot_solution
)
security_iot_analytics_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#IotAnalyticsOperations.{}',
client_factory=cf_security_iot_analytics
)
security_iot_alerts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#IotAlertsOperations.{}',
client_factory=cf_security_iot_alerts
)
security_iot_recommendations_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#IotRecommendationsOperations.{}',
client_factory=cf_security_iot_recommendations
)
security_automations_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#AutomationsOperations.{}',
client_factory=cf_security_automations,
operation_group='security_automations'
)
security_solutions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.security.operations#SecuritySolutionsOperations.{}',
client_factory=cf_security_security_solutions
)
with self.command_group('security secure-scores',
security_secure_scores_sdk,
client_factory=cf_security_secure_scores) as g:
g.custom_command('list', 'list_secure_scores')
g.custom_show_command('show', 'get_secure_score')
with self.command_group('security secure-score-controls',
security_secure_score_controls_sdk,
client_factory=cf_security_secure_score_controls) as g:
g.custom_command('list', 'list_secure_score_controls')
g.custom_show_command('list_by_score', 'list_by_score')
with self.command_group('security secure-score-control-definitions',
security_secure_score_control_definitions_sdk,
client_factory=cf_security_secure_score_control_definitions) as g:
g.custom_command('list', 'list_secure_score_control_definitions')
with self.command_group('security regulatory-compliance-standards',
security_regulatory_compliance_standards_sdk,
client_factory=cf_security_regulatory_compliance_standards) as g:
g.custom_command('list', 'list_regulatory_compliance_standards')
g.custom_show_command('show', 'get_regulatory_compliance_standard')
with self.command_group('security regulatory-compliance-controls',
security_regulatory_compliance_controls_sdk,
client_factory=cf_security_regulatory_compliance_control) as g:
g.custom_command('list', 'list_regulatory_compliance_controls')
g.custom_show_command('show', 'get_regulatory_compliance_control')
with self.command_group('security regulatory-compliance-assessments',
security_regulatory_compliance_assessment_sdk,
client_factory=cf_security_regulatory_compliance_assessment) as g:
g.custom_command('list', 'list_regulatory_compliance_assessments')
g.custom_show_command('show', 'get_regulatory_compliance_assessment')
with self.command_group('security task',
security_tasks_sdk,
client_factory=cf_security_tasks) as g:
g.custom_command('list', 'list_security_tasks')
g.custom_show_command('show', 'get_security_task')
with self.command_group('security alerts-suppression-rule',
security_alerts_suppression_rule_sdk,
client_factory=cf_security_alerts_suppression_rule) as g:
g.custom_command('list', 'list_security_alerts_suppression_rule')
g.custom_show_command('show', 'show_security_alerts_suppression_rule')
g.custom_command('delete', 'delete_security_alerts_suppression_rule')
g.custom_command('update', 'update_security_alerts_suppression_rule')
g.custom_command('upsert_scope', 'upsert_security_alerts_suppression_rule_scope')
g.custom_command('delete_scope', 'delete_security_alerts_suppression_rule_scope')
for scope in ['storage', 'cosmosdb']:
with self.command_group(f"security atp {scope}",
security_advanced_threat_protection_sdk,
client_factory=cf_security_advanced_threat_protection) as g:
g.custom_show_command('show', f"get_{scope}_atp_setting")
g.custom_command('update', f"update_{scope}_atp_setting")
with self.command_group('security va sql scans',
security_sql_vulnerability_assessment_scans_sdk,
client_factory=cf_sql_vulnerability_assessment_scans) as g:
g.custom_show_command('show', 'get_va_sql_scan')
g.custom_command('list', 'list_va_sql_scans')
with self.command_group('security va sql results',
security_sql_vulnerability_assessment_results_sdk,
client_factory=cf_sql_vulnerability_assessment_results) as g:
g.custom_show_command('show', 'get_va_sql_result')
g.custom_command('list', 'list_va_sql_results')
with self.command_group('security va sql baseline',
security_sql_vulnerability_assessment_baseline_sdk,
client_factory=cf_sql_vulnerability_assessment_baseline) as g:
g.custom_show_command('show', 'get_va_sql_baseline')
g.custom_command('list', 'list_va_sql_baseline')
g.custom_command('delete', 'delete_va_sql_baseline')
g.custom_command('update', 'update_va_sql_baseline')
g.custom_command('set', 'set_va_sql_baseline')
with self.command_group('security alert',
security_alerts_sdk,
client_factory=cf_security_alerts) as g:
g.custom_command('list', 'list_security_alerts')
g.custom_show_command('show', 'get_security_alert')
g.custom_command('update', 'update_security_alert')
with self.command_group('security setting',
security_settings_sdk,
client_factory=cf_security_settings) as g:
g.custom_command('list', 'list_security_settings')
g.custom_show_command('show', 'get_security_setting')
g.custom_command('update', 'update_security_setting')
with self.command_group('security contact',
security_contacts_sdk,
client_factory=cf_security_contacts) as g:
g.custom_command('list', 'list_security_contacts')
g.custom_show_command('show', 'get_security_contact')
g.custom_command('create', 'create_security_contact')
g.custom_command('delete', 'delete_security_contact')
with self.command_group('security auto-provisioning-setting',
security_auto_provisioning_settings_sdk,
client_factory=cf_security_auto_provisioning_settings) as g:
g.custom_command('list', 'list_security_auto_provisioning_settings')
g.custom_show_command('show', 'get_security_auto_provisioning_setting')
g.custom_command('update', 'update_security_auto_provisioning_setting')
with self.command_group('security discovered-security-solution',
security_discovered_security_solutions_sdk,
client_factory=cf_security_discovered_security_solutions) as g:
g.custom_command('list', 'list_security_discovered_security_solutions')
g.custom_show_command('show', 'get_security_discovered_security_solution')
with self.command_group('security external-security-solution',
security_external_security_solutions_sdk,
client_factory=cf_security_external_security_solutions) as g:
g.custom_command('list', 'list_security_external_security_solutions')
g.custom_show_command('show', 'get_security_external_security_solution')
with self.command_group('security jit-policy',
security_jit_network_access_policies_sdk,
client_factory=cf_security_jit_network_access_policies) as g:
g.custom_command('list', 'list_security_jit_network_access_policies')
g.custom_show_command('show', 'get_security_jit_network_access_policy')
with self.command_group('security location',
security_locations_sdk,
client_factory=cf_security_locations) as g:
g.custom_command('list', 'list_security_locations')
g.custom_show_command('show', 'get_security_location')
with self.command_group('security pricing',
security_pricings_sdk,
client_factory=cf_security_pricings) as g:
g.custom_command('list', 'list_security_pricings')
g.custom_show_command('show', 'get_security_pricing')
g.custom_command('create', 'create_security_pricing')
with self.command_group('security topology',
security_topology_sdk,
client_factory=cf_security_topology) as g:
g.custom_command('list', 'list_security_topology')
g.custom_show_command('show', 'get_security_topology')
with self.command_group('security workspace-setting',
security_workspace_settings_sdk,
client_factory=cf_security_workspace_settings) as g:
g.custom_command('list', 'list_security_workspace_settings')
g.custom_show_command('show', 'get_security_workspace_setting')
g.custom_command('create', 'create_security_workspace_setting')
g.custom_command('delete', 'delete_security_workspace_setting')
with self.command_group('security assessment',
security_assessment_sdk,
client_factory=cf_security_assessment) as g:
g.custom_command('list', 'list_security_assessments')
g.custom_show_command('show', 'get_security_assessment')
g.custom_command('create', 'create_security_assessment')
g.custom_command('delete', 'delete_security_assessment')
with self.command_group('security assessment-metadata',
security_assessment_metadata_sdk,
client_factory=cf_security_assessment_metadata) as g:
g.custom_command('list', 'list_security_assessment_metadata')
g.custom_show_command('show', 'get_security_assessment_metadata')
g.custom_command('create', 'create_security_assessment_metadata')
g.custom_command('delete', 'delete_security_assessment_metadata')
with self.command_group('security sub-assessment',
security_sub_assessment_sdk,
client_factory=cf_security_sub_assessment) as g:
g.custom_command('list', 'list_security_sub_assessments')
g.custom_show_command('show', 'get_security_sub_assessment')
with self.command_group('security adaptive-application-controls',
security_adaptive_application_controls_sdk,
client_factory=cf_security_adaptive_application_controls) as g:
g.custom_command('list', 'list_security_adaptive_application_controls')
g.custom_show_command('show', 'get_security_adaptive_application_controls')
with self.command_group('security adaptive_network_hardenings',
security_adaptive_network_hardenings_sdk,
client_factory=cf_security_adaptive_network_hardenings) as g:
g.custom_show_command('show', 'get_security_adaptive_network_hardenings')
g.custom_command('list', 'list_security_adaptive_network_hardenings')
with self.command_group('security allowed_connections',
security_allowed_connections_sdk,
client_factory=cf_security_allowed_connections) as g:
g.custom_command('list', 'list_security_allowed_connections')
g.custom_show_command('show', 'get_security_allowed_connections')
with self.command_group('security iot-solution',
security_iot_solution_sdk,
client_factory=cf_security_iot_solution) as g:
g.custom_command('list', 'list_security_iot_solution')
g.custom_show_command('show', 'show_security_iot_solution')
g.custom_command('create', 'create_security_iot_solution')
g.custom_command('delete', 'delete_security_iot_solution')
g.custom_command('update', 'update_security_iot_solution')
with self.command_group('security iot-analytics',
security_iot_analytics_sdk,
client_factory=cf_security_iot_analytics) as g:
g.custom_command('list', 'list_security_iot_analytics')
g.custom_show_command('show', 'show_security_iot_analytics')
with self.command_group('security iot-alerts',
security_iot_alerts_sdk,
client_factory=cf_security_iot_alerts) as g:
g.custom_command('list', 'list_security_iot_alerts')
g.custom_show_command('show', 'show_security_iot_alerts')
g.custom_command('delete', 'dismiss_security_iot_alerts')
with self.command_group('security iot-recommendations',
security_iot_recommendations_sdk,
client_factory=cf_security_iot_recommendations) as g:
g.custom_command('list', 'list_security_iot_recommendations')
g.custom_show_command('show', 'show_security_iot_recommendations')
with self.command_group('security automation',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('list', 'list_security_automations')
g.custom_show_command('show', 'get_security_automation')
g.custom_command('delete', 'delete_security_automation')
g.custom_command('create_or_update', 'create_or_update_security_automation')
g.custom_command('validate', 'validate_security_automation')
with self.command_group('security automation-scope',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_scope')
with self.command_group('security automation-rule',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_rule')
with self.command_group('security automation-rule-set',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_rule_set')
with self.command_group('security automation-source',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_source')
with self.command_group('security automation-action-logic-app',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_action_logic_app')
with self.command_group('security automation-action-event-hub',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_action_event_hub')
with self.command_group('security automation-action-workspace',
security_automations_sdk,
client_factory=cf_security_automations) as g:
g.custom_command('create', 'create_security_automation_action_workspace')
with self.command_group('security security-solutions',
security_solutions_sdk,
client_factory=cf_security_security_solutions) as g:
g.custom_command('list', 'list_security_security_solutions')
with self.command_group('security'):
pass
|
37,657 |
def random_circuit(
num_qubits, depth, max_operands=3, measure=False, conditional=False, reset=False, seed=None
):
"""Generate random circuit of arbitrary size and form.
This function will generate a random circuit by randomly selecting gates
from the set of standard gates in :mod:`qiskit.extensions`. For example:
.. jupyter-execute::
from qiskit.circuit.random import random_circuit
circ = random_circuit(2, 2, measure=True)
circ.draw(output='mpl')
Args:
num_qubits (int): number of quantum wires
depth (int): layers of operations (i.e. critical path length)
max_operands (int): maximum operands of each gate (between 1 and 3)
measure (bool): if True, measure all qubits at the end
conditional (bool): if True, insert middle measurements and conditionals
reset (bool): if True, insert middle resets
seed (int): sets random seed (optional)
Returns:
QuantumCircuit: constructed circuit
Raises:
CircuitError: when invalid options given
"""
if max_operands < 1 or max_operands > 3:
raise CircuitError("max_operands must be between 1 and 3")
one_q_ops = [
IGate,
U1Gate,
U2Gate,
U3Gate,
XGate,
YGate,
ZGate,
HGate,
SGate,
SdgGate,
TGate,
TdgGate,
RXGate,
RYGate,
RZGate,
]
one_param = [U1Gate, RXGate, RYGate, RZGate, RZZGate, CU1Gate, CRZGate]
two_param = [U2Gate]
three_param = [U3Gate, CU3Gate]
two_q_ops = [CXGate, CYGate, CZGate, CHGate, CRZGate, CU1Gate, CU3Gate, SwapGate, RZZGate]
three_q_ops = [CCXGate, CSwapGate]
qr = QuantumRegister(num_qubits, "q")
qc = QuantumCircuit(num_qubits)
if measure or conditional:
cr = ClassicalRegister(num_qubits, "c")
qc.add_register(cr)
if reset:
one_q_ops += [Reset]
if seed is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
rng = np.random.default_rng(seed)
# apply arbitrary random operations at every depth
for _ in range(depth):
# choose either 1, 2, or 3 qubits for the operation
remaining_qubits = list(range(num_qubits))
rng.shuffle(remaining_qubits)
while remaining_qubits:
max_possible_operands = min(len(remaining_qubits), max_operands)
num_operands = rng.choice(range(max_possible_operands)) + 1
operands = [remaining_qubits.pop() for _ in range(num_operands)]
if num_operands == 1:
operation = rng.choice(one_q_ops)
elif num_operands == 2:
operation = rng.choice(two_q_ops)
elif num_operands == 3:
operation = rng.choice(three_q_ops)
if operation in one_param:
num_angles = 1
elif operation in two_param:
num_angles = 2
elif operation in three_param:
num_angles = 3
else:
num_angles = 0
angles = [rng.uniform(0, 2 * np.pi) for x in range(num_angles)]
register_operands = [qr[i] for i in operands]
op = operation(*angles)
# with some low probability, condition on classical bit values
if conditional and rng.choice(range(10)) == 0:
parts = rng.integers(0, 1<<16, size=4)
shift = 0
condition_int = 0
for part in parts:
ipart = (int)(part)
condition_int += ipart << shift
shift += 16
op.condition = (cr, condition_int)
qc.append(op, register_operands)
if measure:
qc.measure(qr, cr)
return qc
|
def random_circuit(
num_qubits, depth, max_operands=3, measure=False, conditional=False, reset=False, seed=None
):
"""Generate random circuit of arbitrary size and form.
This function will generate a random circuit by randomly selecting gates
from the set of standard gates in :mod:`qiskit.extensions`. For example:
.. jupyter-execute::
from qiskit.circuit.random import random_circuit
circ = random_circuit(2, 2, measure=True)
circ.draw(output='mpl')
Args:
num_qubits (int): number of quantum wires
depth (int): layers of operations (i.e. critical path length)
max_operands (int): maximum operands of each gate (between 1 and 3)
measure (bool): if True, measure all qubits at the end
conditional (bool): if True, insert middle measurements and conditionals
reset (bool): if True, insert middle resets
seed (int): sets random seed (optional)
Returns:
QuantumCircuit: constructed circuit
Raises:
CircuitError: when invalid options given
"""
if max_operands < 1 or max_operands > 3:
raise CircuitError("max_operands must be between 1 and 3")
one_q_ops = [
IGate,
U1Gate,
U2Gate,
U3Gate,
XGate,
YGate,
ZGate,
HGate,
SGate,
SdgGate,
TGate,
TdgGate,
RXGate,
RYGate,
RZGate,
]
one_param = [U1Gate, RXGate, RYGate, RZGate, RZZGate, CU1Gate, CRZGate]
two_param = [U2Gate]
three_param = [U3Gate, CU3Gate]
two_q_ops = [CXGate, CYGate, CZGate, CHGate, CRZGate, CU1Gate, CU3Gate, SwapGate, RZZGate]
three_q_ops = [CCXGate, CSwapGate]
qr = QuantumRegister(num_qubits, "q")
qc = QuantumCircuit(num_qubits)
if measure or conditional:
cr = ClassicalRegister(num_qubits, "c")
qc.add_register(cr)
if reset:
one_q_ops += [Reset]
if seed is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
rng = np.random.default_rng(seed)
# apply arbitrary random operations at every depth
for _ in range(depth):
# choose either 1, 2, or 3 qubits for the operation
remaining_qubits = list(range(num_qubits))
rng.shuffle(remaining_qubits)
while remaining_qubits:
max_possible_operands = min(len(remaining_qubits), max_operands)
num_operands = rng.choice(range(max_possible_operands)) + 1
operands = [remaining_qubits.pop() for _ in range(num_operands)]
if num_operands == 1:
operation = rng.choice(one_q_ops)
elif num_operands == 2:
operation = rng.choice(two_q_ops)
elif num_operands == 3:
operation = rng.choice(three_q_ops)
if operation in one_param:
num_angles = 1
elif operation in two_param:
num_angles = 2
elif operation in three_param:
num_angles = 3
else:
num_angles = 0
angles = [rng.uniform(0, 2 * np.pi) for x in range(num_angles)]
register_operands = [qr[i] for i in operands]
op = operation(*angles)
# with some low probability, condition on classical bit values
if conditional and rng.choice(range(10)) == 0:
parts = rng.integers(0, 1<<16, size=4)
shift = 0
condition_int = 0
for part in parts:
ipart = int(part)
condition_int += ipart << shift
shift += 16
op.condition = (cr, condition_int)
qc.append(op, register_operands)
if measure:
qc.measure(qr, cr)
return qc
|
13,561 |
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k += 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
21,182 |
def test_init_config_from_python_without_optional_args() -> None:
"""
Tests calling init_config_cli() from Python with optional arguments not set. This should detect bugs related to
Typer not automatically setting default values for arguments when decorated functions are called from Python instead
from the CLI. See also https://github.com/explosion/spaCy/issues/10727.
"""
with make_tempdir() as temp_dir:
init_config_cli(output_file=temp_dir / "config.cfg")
with make_tempdir() as temp_dir:
init_config_cli(
output_file=temp_dir / "config.cfg",
lang="en",
pipeline="ner",
optimize=Optimizations.efficiency,
gpu=False,
pretraining=False,
)
|
def test_init_config_from_python_without_optional_args():
"""
Tests calling init_config_cli() from Python with optional arguments not set. This should detect bugs related to
Typer not automatically setting default values for arguments when decorated functions are called from Python instead
from the CLI. See also https://github.com/explosion/spaCy/issues/10727.
"""
with make_tempdir() as temp_dir:
init_config_cli(output_file=temp_dir / "config.cfg")
with make_tempdir() as temp_dir:
init_config_cli(
output_file=temp_dir / "config.cfg",
lang="en",
pipeline="ner",
optimize=Optimizations.efficiency,
gpu=False,
pretraining=False,
)
|
1,778 |
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False, random_state=0):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
random_state : int, np.random.RandomStateInstance or None, default: None
Pseudo-random number generator to control the starting state.
See :term:`random_state`.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
When the algorithm does not converge, it returns an empty array as
``cluster_center_indices`` and ``-1`` as label for each training sample.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
preference = np.array(preference)
if (n_samples == 1 or
_equal_similarities_and_preferences(S, preference)):
# It makes no sense to run the algorithm in this case, so return 1 or
# n_samples clusters, depending on preferences
warnings.warn("All samples have mutually equal similarities. "
"Returning arbitrary cluster center(s).")
if preference.flat[0] >= S.flat[n_samples - 1]:
return ((np.arange(n_samples), np.arange(n_samples), 0)
if return_n_iter
else (np.arange(n_samples), np.arange(n_samples)))
else:
return ((np.array([0]), np.array([0] * n_samples), 0)
if return_n_iter
else (np.array([0]), np.array([0] * n_samples)))
random_state = check_random_state(random_state)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
never_converged = False
if verbose:
print("Converged after %d iterations." % it)
break
else:
never_converged = True
if verbose:
print("Did not converge")
I = np.flatnonzero(E)
K = I.size # Identify exemplars
if K > 0 and not never_converged:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
warnings.warn("Affinity propagation did not converge, this model "
"will not have any cluster centers.", ConvergenceWarning)
labels = np.array([-1] * n_samples)
cluster_centers_indices = []
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
|
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False, random_state=0):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
random_state : int or np.random.RandomStateInstance, default: None
Pseudo-random number generator to control the starting state.
See :term:`random_state`.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
When the algorithm does not converge, it returns an empty array as
``cluster_center_indices`` and ``-1`` as label for each training sample.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
preference = np.array(preference)
if (n_samples == 1 or
_equal_similarities_and_preferences(S, preference)):
# It makes no sense to run the algorithm in this case, so return 1 or
# n_samples clusters, depending on preferences
warnings.warn("All samples have mutually equal similarities. "
"Returning arbitrary cluster center(s).")
if preference.flat[0] >= S.flat[n_samples - 1]:
return ((np.arange(n_samples), np.arange(n_samples), 0)
if return_n_iter
else (np.arange(n_samples), np.arange(n_samples)))
else:
return ((np.array([0]), np.array([0] * n_samples), 0)
if return_n_iter
else (np.array([0]), np.array([0] * n_samples)))
random_state = check_random_state(random_state)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
never_converged = False
if verbose:
print("Converged after %d iterations." % it)
break
else:
never_converged = True
if verbose:
print("Did not converge")
I = np.flatnonzero(E)
K = I.size # Identify exemplars
if K > 0 and not never_converged:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
warnings.warn("Affinity propagation did not converge, this model "
"will not have any cluster centers.", ConvergenceWarning)
labels = np.array([-1] * n_samples)
cluster_centers_indices = []
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
|
31,609 |
def main():
params = demisto.params()
use_ssl = not params.get("insecure", False)
use_proxy = params.get("proxy", False)
client = Client(params["server_url"], params["api_secret"], use_ssl, use_proxy)
commands: Dict[str, Callable] = {"trendmicro-list-computers": list_computers_command,
"trendmicro-create-computer": create_computer_command,
"trendmicro-search-computers": search_computers_command,
"trendmicro-get-computer": get_computer_command,
"trendmicro-modify-computer": modify_computer_command,
"trendmicro-delete-computer": delete_computer_command,
"trendmicro-get-computer-setting": get_computer_setting_command,
"trendmicro-modify-computer-setting": modify_computer_setting_command,
"trendmicro-reset-computer-setting": reset_computer_setting_command,
"trendmicro-list-firewall-rule-ids-of-computer": list_firewall_rule_ids_of_computer_command,
"trendmicro-add-firewall-rule-ids-to-computer": add_firewall_rule_ids_to_computer_command,
"trendmicro-set-firewall-rule-ids-to-computer": set_firewall_rule_ids_to_computer_command,
"trendmicro-remove-firewall-rule-id-from-computer": remove_firewall_rule_id_from_computer_command, # noqa: E501
"trendmicro-list-computer-groups": list_computer_groups_command,
"trendmicro-create-computer-group": create_computer_group_command,
"trendmicro-search-computer-groups": search_computer_groups_command,
"trendmicro-get-computer-group": get_computer_group_command,
"trendmicro-modify-computer-group": modify_computer_group_command,
"trendmicro-delete-computer-group": delete_computer_group_command,
"trendmicro-search-firewall-rules": search_firewall_rules_command,
"trendmicro-list-firewall-rules": list_firewall_rules_command,
"trendmicro-create-firewall-rule": create_firewall_rule_command,
"trendmicro-get-firewall-rule": get_firewall_rule_command,
"trendmicro-modify-firewall-rule": modify_firewall_rule_command,
"trendmicro-delete-firewall-rule": delete_firewall_rule_command,
"trendmicro-search-policies": search_policies_command,
"trendmicro-get-policy": get_policy_command,
"trendmicro-modify-policy": modify_policy_command,
"trendmicro-delete-policy": delete_policy_command,
"trendmicro-get-default-policy-setting": get_default_policy_setting_command,
"trendmicro-modify-default-policy-setting": modify_default_policy_setting_command,
"trendmicro-reset-default-policy-setting": reset_default_policy_setting_command,
"trendmicro-list-default-policy-settings": list_default_policy_settings_command,
"trendmicro-get-policy-setting": get_policy_setting_command,
"trendmicro-modify-policy-setting": modify_policy_setting_command,
"trendmicro-reset-policy-setting": reset_policy_setting_command,
"trendmicro-list-policies": list_policies_command,
"trendmicro-create-policy": create_policy_command, "test-module": test_module}
error_message = ""
try:
command = demisto.command()
if command in commands:
command_function = commands[command]
return_results(command_function(client, **convert_args(command_function, demisto.args())))
else:
raise NotImplementedError(f"The command {command} does not exist on TrendMicro!")
except (ConnectionError, InvalidURL, InvalidSchema) as e:
demisto.error(traceback.format_exc())
error_message = f"{INVALID_URL_ERROR}\nError:\n{e}"
except HTTPError as e:
demisto.error(traceback.format_exc())
error_message = f"Error in API call [{e.response.status_code}]\n{e.response.json()['message']}"
except Exception as e:
demisto.error(traceback.format_exc())
error_message = f"Failed to execute {demisto.command()} command.\nError:\n{e}"
return_error(error_message)
|
def main():
params = demisto.params()
use_ssl = not params.get("insecure", False)
use_proxy = params.get("proxy", False)
client = Client(params.get("server_url"), params.get("api_secret"), use_ssl, use_proxy)
commands: Dict[str, Callable] = {"trendmicro-list-computers": list_computers_command,
"trendmicro-create-computer": create_computer_command,
"trendmicro-search-computers": search_computers_command,
"trendmicro-get-computer": get_computer_command,
"trendmicro-modify-computer": modify_computer_command,
"trendmicro-delete-computer": delete_computer_command,
"trendmicro-get-computer-setting": get_computer_setting_command,
"trendmicro-modify-computer-setting": modify_computer_setting_command,
"trendmicro-reset-computer-setting": reset_computer_setting_command,
"trendmicro-list-firewall-rule-ids-of-computer": list_firewall_rule_ids_of_computer_command,
"trendmicro-add-firewall-rule-ids-to-computer": add_firewall_rule_ids_to_computer_command,
"trendmicro-set-firewall-rule-ids-to-computer": set_firewall_rule_ids_to_computer_command,
"trendmicro-remove-firewall-rule-id-from-computer": remove_firewall_rule_id_from_computer_command, # noqa: E501
"trendmicro-list-computer-groups": list_computer_groups_command,
"trendmicro-create-computer-group": create_computer_group_command,
"trendmicro-search-computer-groups": search_computer_groups_command,
"trendmicro-get-computer-group": get_computer_group_command,
"trendmicro-modify-computer-group": modify_computer_group_command,
"trendmicro-delete-computer-group": delete_computer_group_command,
"trendmicro-search-firewall-rules": search_firewall_rules_command,
"trendmicro-list-firewall-rules": list_firewall_rules_command,
"trendmicro-create-firewall-rule": create_firewall_rule_command,
"trendmicro-get-firewall-rule": get_firewall_rule_command,
"trendmicro-modify-firewall-rule": modify_firewall_rule_command,
"trendmicro-delete-firewall-rule": delete_firewall_rule_command,
"trendmicro-search-policies": search_policies_command,
"trendmicro-get-policy": get_policy_command,
"trendmicro-modify-policy": modify_policy_command,
"trendmicro-delete-policy": delete_policy_command,
"trendmicro-get-default-policy-setting": get_default_policy_setting_command,
"trendmicro-modify-default-policy-setting": modify_default_policy_setting_command,
"trendmicro-reset-default-policy-setting": reset_default_policy_setting_command,
"trendmicro-list-default-policy-settings": list_default_policy_settings_command,
"trendmicro-get-policy-setting": get_policy_setting_command,
"trendmicro-modify-policy-setting": modify_policy_setting_command,
"trendmicro-reset-policy-setting": reset_policy_setting_command,
"trendmicro-list-policies": list_policies_command,
"trendmicro-create-policy": create_policy_command, "test-module": test_module}
error_message = ""
try:
command = demisto.command()
if command in commands:
command_function = commands[command]
return_results(command_function(client, **convert_args(command_function, demisto.args())))
else:
raise NotImplementedError(f"The command {command} does not exist on TrendMicro!")
except (ConnectionError, InvalidURL, InvalidSchema) as e:
demisto.error(traceback.format_exc())
error_message = f"{INVALID_URL_ERROR}\nError:\n{e}"
except HTTPError as e:
demisto.error(traceback.format_exc())
error_message = f"Error in API call [{e.response.status_code}]\n{e.response.json()['message']}"
except Exception as e:
demisto.error(traceback.format_exc())
error_message = f"Failed to execute {demisto.command()} command.\nError:\n{e}"
return_error(error_message)
|
17,440 |
def _is_numpy_datetime(times):
return times.dtype.char in ("M", "m")
|
def _is_numpy_datetime(times):
return times.dtype.kind in ("M", "m")
|
35,845 |
def test_msg_data_call_args(get_contract):
code = """
@external
def foo(bar: uint256) -> (Bytes[4], uint256):
data: Bytes[36] = msg.data[36]
return (slice(data, 0, 4), convert(slice(data, 4, 32), uint256))
"""
contract = get_contract(code)
a, b = contract.foo(42)
assert a.hex() == "2fbebd38" # fn sig
assert b == 42
|
def test_msg_data_call_args(get_contract):
code = """
@external
def foo(bar: uint256) -> (Bytes[4], uint256):
data: Bytes[36] = slice(msg.data, 0, 36) # Throws if `len(msg.data) < 36`
return (slice(data, 0, 4), convert(slice(data, 4, 32), uint256))
"""
contract = get_contract(code)
a, b = contract.foo(42)
assert a.hex() == "2fbebd38" # fn sig
assert b == 42
|
48,476 |
def main():
module = AnsibleModule(
argument_spec=dict(
database=dict(type='str', required=True),
key=dict(type='str', no_log=False),
service=dict(type='str'),
split=dict(type='str'),
fail_key=dict(type='bool', default=True),
),
supports_check_mode=True,
)
colon = ['passwd', 'shadow', 'group', 'gshadow']
database = module.params['database']
key = module.params.get('key')
split = module.params.get('split')
service = module.params.get('service')
fail_key = module.params.get('fail_key')
getent_bin = module.get_bin_path('getent', True)
if key is not None:
cmd = [getent_bin, database, key]
else:
cmd = [getent_bin, database]
if service is not None:
cmd.extend(['-s', service])
if split is None and database in colon:
split = ':'
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
results = {dbtree: {}}
if rc == 0:
seen = {}
for line in out.splitlines():
record = line.split(split)
# more than one result for same key
if record[0] in seen:
# ensure we store in a list
if seen[record[0]] == 1:
results[dbtree][record[0]] = [results[dbtree][record[0]]]
results[dbtree][record[0]].append(record[1:])
seen[record[0]] += 1
else:
# new key/value, just assign
results[dbtree][record[0]] = record[1:]
seen[record[0]] = 1
module.exit_json(ansible_facts=results)
elif rc == 1:
msg = "Missing arguments, or database unknown."
elif rc == 2:
msg = "One or more supplied key could not be found in the database."
if not fail_key:
results[dbtree][key] = None
module.exit_json(ansible_facts=results, msg=msg)
elif rc == 3:
msg = "Enumeration not supported on this database."
module.fail_json(msg=msg)
|
def main():
module = AnsibleModule(
argument_spec=dict(
database=dict(type='str', required=True),
key=dict(type='str', no_log=False),
service=dict(type='str'),
split=dict(type='str'),
fail_key=dict(type='bool', default=True),
),
supports_check_mode=True,
)
colon = ['passwd', 'shadow', 'group', 'gshadow']
database = module.params['database']
key = module.params.get('key')
split = module.params.get('split')
service = module.params.get('service')
fail_key = module.params.get('fail_key')
getent_bin = module.get_bin_path('getent', True)
if key is not None:
cmd = [getent_bin, database, key]
else:
cmd = [getent_bin, database]
if service is not None:
cmd.extend(['-s', service])
if split is None and database in colon:
split = ':'
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
results = {dbtree: {}}
if rc == 0:
seen = {}
for line in out.splitlines():
record = line.split(split)
if record[0] in seen:
# more than one result for same key, ensure we store in a list
if seen[record[0]] == 1:
results[dbtree][record[0]] = [results[dbtree][record[0]]]
results[dbtree][record[0]].append(record[1:])
seen[record[0]] += 1
else:
# new key/value, just assign
results[dbtree][record[0]] = record[1:]
seen[record[0]] = 1
module.exit_json(ansible_facts=results)
elif rc == 1:
msg = "Missing arguments, or database unknown."
elif rc == 2:
msg = "One or more supplied key could not be found in the database."
if not fail_key:
results[dbtree][key] = None
module.exit_json(ansible_facts=results, msg=msg)
elif rc == 3:
msg = "Enumeration not supported on this database."
module.fail_json(msg=msg)
|
39,664 |
def main():
module = ForemanEntityApypieAnsibleModule(
argument_spec=dict(
name=dict(required=True),
release_name=dict(),
description=dict(),
family=dict(required=True),
major=dict(required=True),
minor=dict(),
architectures=dict(type='list'),
media=dict(type='list'),
ptables=dict(type='list'),
provisioning_templates=dict(type='list'),
password_hash=dict(choices=['MD5', 'SHA256', 'SHA512']),
parameters=dict(type='list', elements='dict', options=dict(
name=dict(required=True),
value=dict(type='raw', required=True),
parameter_type=dict(default='string', choices=['string', 'boolean', 'integer', 'real', 'array', 'hash', 'yaml', 'json']),
)),
state=dict(default='present', choices=['present', 'present_with_defaults', 'absent']),
),
name_map=name_map,
)
entity_dict = module.clean_params()
module.connect()
# Try to find the Operating System to work on
# name is however not unique, but description is, as well as "<name> <major>[.<minor>]"
entity = None
# If we have a description, search for it
if 'description' in entity_dict and entity_dict['description'] != '':
entity = module.find_resource_by_title('operatingsystems', entity_dict['description'], failsafe=True)
# If we did not yet find a unique OS, search by name & version
if entity is None:
search_parts = ['{}="{}"'.format(key, entity_dict[key]) for key in ('name', 'major', 'minor') if key in entity_dict]
entity = module.find_resource('operatingsystems', ','.join(search_parts), failsafe=True)
if not entity and (module.state == 'present' or module.state == 'present_with_defaults'):
# we actually attempt to create a new one...
for param_name in ['major', 'family', 'password_hash']:
if param_name not in entity_dict.keys():
module.fail_json(msg='{} is a required parameter to create a new operating system.'.format(param_name))
if not module.desired_absent:
# Set Architectures of Operating System
if 'architectures' in entity_dict:
entity_dict['architectures'] = module.find_resources_by_name('architectures', entity_dict['architectures'], thin=True)
# Set Installation Media of Operating System
if 'media' in entity_dict:
entity_dict['media'] = module.find_resources_by_name('media', entity_dict['media'], thin=True)
# Set Partition Tables of Operating System
if 'ptables' in entity_dict:
entity_dict['ptables'] = module.find_resources_by_name('ptables', entity_dict['ptables'], thin=True)
# Set Provisioning Templates of Operating System
if 'provisioning_templates' in entity_dict:
entity_dict['provisioning_templates'] = module.find_resources_by_name('provisioning_templates', entity_dict['provisioning_templates'], thin=True)
parameters = entity_dict.get('parameters')
changed, operating_system = module.ensure_resource('operatingsystems', entity_dict, entity)
if parameters is not None:
if module.state == 'present' or (module.state == 'present_with_defaults' and entity is None):
search_params = {'operatingsystem_id': operating_system['id']}
if entity:
current_parameters = {p['name']: p for p in module.list_resource('parameters', params=search_params)}
else:
current_parameters = {}
desired_parameters = {p['name']: p for p in parameters}
for name in desired_parameters:
desired_parameter = desired_parameters[name]
desired_parameter['value'] = parameter_value_to_str(desired_parameter['value'], desired_parameter['parameter_type'])
desired_parameter['operatingsystem'] = operating_system
current_parameter = current_parameters.pop(name, None)
if current_parameter:
if 'parameter_type' not in current_parameter:
current_parameter['parameter_type'] = 'string'
current_parameter['value'] = parameter_value_to_str(current_parameter['value'], current_parameter['parameter_type'])
changed |= module.ensure_resource_state('parameters', desired_parameters[name], current_parameter, state="present", name_map=parameter_name_map)
for current_parameter in current_parameters.values():
current_parameter['operatingsystem_id'] = operating_system['id']
changed |= module.ensure_resource_state('parameters', {}, current_parameter, state="absent", name_map=parameter_name_map)
module.exit_json(changed=changed)
|
def main():
module = ForemanEntityApypieAnsibleModule(
argument_spec=dict(
name=dict(required=True),
release_name=dict(),
description=dict(),
family=dict(required=True),
major=dict(required=True),
minor=dict(),
architectures=dict(type='list'),
media=dict(type='list'),
ptables=dict(type='list'),
provisioning_templates=dict(type='list'),
password_hash=dict(choices=['MD5', 'SHA256', 'SHA512']),
parameters=dict(type='list', elements='dict', options=dict(
name=dict(required=True),
value=dict(type='raw', required=True),
parameter_type=dict(default='string', choices=['string', 'boolean', 'integer', 'real', 'array', 'hash', 'yaml', 'json']),
)),
state=dict(default='present', choices=['present', 'present_with_defaults', 'absent']),
),
name_map=name_map,
)
entity_dict = module.clean_params()
module.connect()
# Try to find the Operating System to work on
# name is however not unique, but description is, as well as "<name> <major>[.<minor>]"
entity = None
# If we have a description, search for it
if 'description' in entity_dict and entity_dict['description'] != '':
entity = module.find_resource_by_title('operatingsystems', entity_dict['description'], failsafe=True)
# If we did not yet find a unique OS, search by name & version
if entity is None:
search_parts = ['{}="{}"'.format(key, entity_dict[key]) for key in ('name', 'major', 'minor') if key in entity_dict]
entity = module.find_resource('operatingsystems', ','.join(search_parts), failsafe=True)
if not entity and (module.state == 'present' or module.state == 'present_with_defaults'):
# we actually attempt to create a new one...
for param_name in ['major', 'family', 'password_hash']:
if param_name not in entity_dict.keys():
module.fail_json(msg='{} is a required parameter to create a new operating system.'.format(param_name))
if not module.desired_absent:
# Set Architectures of Operating System
if 'architectures' in entity_dict:
entity_dict['architectures'] = module.find_resources_by_name('architectures', entity_dict['architectures'], thin=True)
# Set Installation Media of Operating System
if 'media' in entity_dict:
entity_dict['media'] = module.find_resources_by_name('media', entity_dict['media'], thin=True)
# Set Partition Tables of Operating System
if 'ptables' in entity_dict:
entity_dict['ptables'] = module.find_resources_by_name('ptables', entity_dict['ptables'], thin=True)
# Set Provisioning Templates of Operating System
if 'provisioning_templates' in entity_dict:
entity_dict['provisioning_templates'] = module.find_resources_by_name('provisioning_templates', entity_dict['provisioning_templates'], thin=True)
parameters = entity_dict.get('parameters')
changed, operating_system = module.ensure_resource('operatingsystems', entity_dict, entity)
if parameters is not None:
if module.state == 'present' or (module.state == 'present_with_defaults' and entity is None):
search_params = {'operatingsystem_id': operating_system['id']}
if entity:
current_parameters = {p['name']: p for p in module.list_resource('parameters', params=search_params)}
else:
current_parameters = {}
desired_parameters = {parameter['name']: parameter for parameter in parameters}
for name in desired_parameters:
desired_parameter = desired_parameters[name]
desired_parameter['value'] = parameter_value_to_str(desired_parameter['value'], desired_parameter['parameter_type'])
desired_parameter['operatingsystem'] = operating_system
current_parameter = current_parameters.pop(name, None)
if current_parameter:
if 'parameter_type' not in current_parameter:
current_parameter['parameter_type'] = 'string'
current_parameter['value'] = parameter_value_to_str(current_parameter['value'], current_parameter['parameter_type'])
changed |= module.ensure_resource_state('parameters', desired_parameters[name], current_parameter, state="present", name_map=parameter_name_map)
for current_parameter in current_parameters.values():
current_parameter['operatingsystem_id'] = operating_system['id']
changed |= module.ensure_resource_state('parameters', {}, current_parameter, state="absent", name_map=parameter_name_map)
module.exit_json(changed=changed)
|
30,647 |
def build_grid(context_path: str, keys: List[str], columns: List[str], skip_nested_elements: bool) -> pd.DataFrame:
""" Build new DateFrame from current context retrieved by DT.
There is 3 cases:
1. DT returns dict (list including 1 item only)- In this case we will insert it in the table as key,
value each row.
2. DT returns list - In this case each entry in the list will represent a row.
3. DT return unknown obj (str..) - return empty list.
Args:
context_path: DT context path.
keys: Keys to be included
columns: Grid columns name.
skip_nested_elements: False for unpacking nested elements, True otherwise.
Returns:
pd.DataFrame: New Table include data from Entry Context
"""
# Retrieve entry context data
entry_context_data = demisto.dt(demisto.context(), context_path)
# Validate entry context structure
data_type = validate_entry_context(entry_context_data, keys, skip_nested_elements)
# Building new Grid
if not skip_nested_elements:
table = pd.DataFrame(unpack_all_data_from_dict(entry_context_data, keys, columns))
table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
elif data_type == 'list':
# Handle entry context as list of value
table = pd.DataFrame(entry_context_data)
table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
elif isinstance(entry_context_data, list):
# Handle entry context as list of dicts
entry_context_data = [filter_dict(item, keys, len(columns)) for item in entry_context_data]
table = pd.DataFrame(entry_context_data)
table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
elif isinstance(entry_context_data, dict):
# Handle entry context key-value option
entry_context_data = filter_dict(entry_context_data, keys).items()
table = pd.DataFrame(entry_context_data, columns=columns[:2])
else:
table = []
return table
|
def build_grid(context_path: str, keys: List[str], columns: List[str], skip_nested_elements: bool) -> pd.DataFrame:
""" Build new DateFrame from current context retrieved by DT.
There are 3 cases:
1. DT returns dict (list including 1 item only)- In this case we will insert it in the table as key,
value each row.
2. DT returns list - In this case each entry in the list will represent a row.
3. DT return unknown obj (str..) - return empty list.
Args:
context_path: DT context path.
keys: Keys to be included
columns: Grid columns name.
skip_nested_elements: False for unpacking nested elements, True otherwise.
Returns:
pd.DataFrame: New Table include data from Entry Context
"""
# Retrieve entry context data
entry_context_data = demisto.dt(demisto.context(), context_path)
# Validate entry context structure
data_type = validate_entry_context(entry_context_data, keys, skip_nested_elements)
# Building new Grid
if not skip_nested_elements:
table = pd.DataFrame(unpack_all_data_from_dict(entry_context_data, keys, columns))
table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
elif data_type == 'list':
# Handle entry context as list of value
table = pd.DataFrame(entry_context_data)
table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
elif isinstance(entry_context_data, list):
# Handle entry context as list of dicts
entry_context_data = [filter_dict(item, keys, len(columns)) for item in entry_context_data]
table = pd.DataFrame(entry_context_data)
table.rename(columns=dict(zip(table.columns, columns)), inplace=True)
elif isinstance(entry_context_data, dict):
# Handle entry context key-value option
entry_context_data = filter_dict(entry_context_data, keys).items()
table = pd.DataFrame(entry_context_data, columns=columns[:2])
else:
table = []
return table
|
6,812 |
def generate_report_result(report, filters=None, user=None):
status = None
if not user:
user = frappe.session.user
if not filters:
filters = []
if filters and isinstance(filters, string_types):
filters = json.loads(filters)
columns, result, message, chart, data_to_be_printed, skip_total_row = [], [], None, None, None, 0
if report.report_type == "Query Report":
if not report.query:
status = "error"
frappe.msgprint(_("Must specify a Query to run"), raise_exception=True)
if not report.query.lower().startswith("select"):
status = "error"
frappe.msgprint(_("Query must be a SELECT"), raise_exception=True)
result = [list(t) for t in frappe.db.sql(report.query, filters)]
columns = [cstr(c[0]) for c in frappe.db.get_description()]
elif report.report_type == 'Script Report':
res = report.execute_script_report(filters)
columns, result = res[0], res[1]
if len(res) > 2:
message = res[2]
if len(res) > 3:
chart = res[3]
if len(res) > 4:
data_to_be_printed = res[4]
if len(res) > 5:
skip_total_row = cint(res[5])
if report.custom_columns:
columns = json.loads(report.custom_columns)
result = add_data_to_custom_columns(columns, result)
if result:
result = get_filtered_data(report.ref_doctype, columns, result, user)
if cint(report.add_total_row) and result and (not skip_total_row):
result = add_total_row(result, columns)
return {
"result": result,
"columns": columns,
"message": message,
"chart": chart,
"data_to_be_printed": data_to_be_printed,
"skip_total_row": skip_total_row,
"status": status,
"execution_time": frappe.cache().hget('report_execution_time', report.name) or 0
}
|
def generate_report_result(report, filters=None, user=None):
status = None
if not user:
user = frappe.session.user
if not filters:
filters = []
if filters and isinstance(filters, string_types):
filters = json.loads(filters)
columns, result, message, chart, data_to_be_printed, skip_total_row = [], [], None, None, None, 0
if report.report_type == "Query Report":
if not report.query:
status = "error"
frappe.msgprint(_("Must specify a Query to run"), raise_exception=True)
if not report.query.lower().startswith("select"):
status = "error"
frappe.msgprint(_("Query must be a SELECT"), raise_exception=True)
result = [list(t) for t in frappe.db.sql(report.query, filters)]
columns = [cstr(c[0]) for c in frappe.db.get_description()]
elif report.report_type == 'Script Report':
res = report.execute_script_report(filters)
columns, result = res[0], res[1]
if len(res) > 2:
message = res[2]
if len(res) > 3:
chart = res[3]
if len(res) > 4:
data_to_be_printed = res[4]
if len(res) > 5:
skip_total_row = cint(res[5])
if report.custom_columns:
columns = json.loads(report.custom_columns)
result = add_data_to_custom_columns(columns, result)
if result:
result = get_filtered_data(report.ref_doctype, columns, result, user)
if cint(report.add_total_row) and result and not skip_total_row:
result = add_total_row(result, columns)
return {
"result": result,
"columns": columns,
"message": message,
"chart": chart,
"data_to_be_printed": data_to_be_printed,
"skip_total_row": skip_total_row,
"status": status,
"execution_time": frappe.cache().hget('report_execution_time', report.name) or 0
}
|
27,471 |
def list_database_operations(instance_id):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
# List the progress of restore
filter_ = (
"(metadata.@type:type.googleapis.com/"
"google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata)"
)
operations = instance.list_database_operations(filter_=filter_)
for op in operations:
print("Database {} restored from backup is {}% optimized.".format(
op.metadata.name, op.metadata.progress.progress_percent))
# [END spanner_list_database_operations]
|
def list_database_operations(instance_id):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
# List the progress of restore.
filter_ = (
"(metadata.@type:type.googleapis.com/"
"google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata)"
)
operations = instance.list_database_operations(filter_=filter_)
for op in operations:
print("Database {} restored from backup is {}% optimized.".format(
op.metadata.name, op.metadata.progress.progress_percent))
# [END spanner_list_database_operations]
|
24,504 |
def render_manifest_v2_progress():
valid_checks = sorted(i for i in get_valid_integrations())
total_checks = len(valid_checks)
checks_v2_manifest = 0
lines = ['## Manifest V2', '', None, '', '??? check "Completed"']
for check in valid_checks:
if is_manifest_v2(check):
checks_v2_manifest += 1
status = 'X'
else:
status = ' '
lines.append(f' - [{status}] {check}')
percent = checks_v2_manifest / total_checks * 100
formatted_percent = f'{percent:.2f}'
lines[2] = f'[={formatted_percent}% "{formatted_percent}%"]'
lines[4] = f'??? check "Completed {checks_v2_manifest}/{total_checks}"'
return lines
|
def render_manifest_v2_progress():
valid_checks = sorted(get_valid_integrations())
total_checks = len(valid_checks)
checks_v2_manifest = 0
lines = ['## Manifest V2', '', None, '', '??? check "Completed"']
for check in valid_checks:
if is_manifest_v2(check):
checks_v2_manifest += 1
status = 'X'
else:
status = ' '
lines.append(f' - [{status}] {check}')
percent = checks_v2_manifest / total_checks * 100
formatted_percent = f'{percent:.2f}'
lines[2] = f'[={formatted_percent}% "{formatted_percent}%"]'
lines[4] = f'??? check "Completed {checks_v2_manifest}/{total_checks}"'
return lines
|
45,700 |
def _check_inputs(vil, rainrate, velocity, timesteps, ar_order):
if len(vil.shape) != 3:
raise ValueError(
"vil.shape = %s, but a three-dimensional array expected" % str(vil.shape)
)
if rainrate is not None:
if len(rainrate.shape) != 2:
raise ValueError(
"rainrate.shape = %s, but a two-dimensional array expected"
% str(rainrate.shape)
)
if vil.shape[0] != ar_order + 2:
raise ValueError(
"vil.shape[0] = %d, but vil.shape[0] = ar_order + 2 = %d required"
% (vil.shape[0], ar_order + 2)
)
if len(velocity.shape) != 3:
raise ValueError(
"velocity.shape = %s, but a three-dimensional array expected"
% str(velocity.shape)
)
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
|
def _check_inputs(vil, rainrate, velocity, timesteps, ar_order):
if vil.ndim != 3:
raise ValueError(
"vil.shape = %s, but a three-dimensional array expected" % str(vil.shape)
)
if rainrate is not None:
if len(rainrate.shape) != 2:
raise ValueError(
"rainrate.shape = %s, but a two-dimensional array expected"
% str(rainrate.shape)
)
if vil.shape[0] != ar_order + 2:
raise ValueError(
"vil.shape[0] = %d, but vil.shape[0] = ar_order + 2 = %d required"
% (vil.shape[0], ar_order + 2)
)
if len(velocity.shape) != 3:
raise ValueError(
"velocity.shape = %s, but a three-dimensional array expected"
% str(velocity.shape)
)
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
|
40,052 |
def assert_features(original, deserialized):
for feat_1, feat_2 in zip(original, deserialized):
assert feat_1.unique_name() == feat_2.unique_name()
assert feat_1.entityset == feat_2.entityset
# IdentityFeature and DirectFeature objects do not have primitives, so
# series library does not need to be compared
if not (
isinstance(feat_1, IdentityFeature) or isinstance(feat_1, DirectFeature)
):
assert feat_1.primitive.series_library == feat_2.primitive.series_library
|
def assert_features(original, deserialized):
for feat_1, feat_2 in zip(original, deserialized):
assert feat_1.unique_name() == feat_2.unique_name()
assert feat_1.entityset == feat_2.entityset
# IdentityFeature and DirectFeature objects do not have primitives, so
# series library does not need to be compared
if not isinstance(feat_1, (IdentityFeature, DirectFeature)):
assert feat_1.primitive.series_library == feat_2.primitive.series_library
|
30,737 |
def close_alerts(args, close_action, client=CLIENT):
readable_close_action = "closed_as_security" if close_action == "delete_rules" else "closed_as_change"
human_readable = f'Command changes the status of alerts passed as "{readable_close_action}" in Nozomi Networks platform.'
extracted_ids = ids_from_args(args)
response = client.http_post_request(
'/api/open/alerts/close',
{"ids": extracted_ids, "close_action": close_action})
result = 'SUCCESS' if wait_for_job_result(response['result']['id'], 'close', client) else 'FAIL'
return {
'outputs': result,
'outputs_prefix': 'Nozomi.Ids',
'outputs_key_field': '',
'readable_output': human_readable
}
|
def close_alerts(args, close_action, client=CLIENT):
readable_close_action = "closed_as_security" if close_action == "delete_rules" else "closed_as_change"
human_readable = f'Command changes the status of alerts passed as "{readable_close_action}" in Nozomi Networks platform.'
extracted_ids = ids_from_args(args)
response = client.http_post_request(
'/api/open/alerts/close',
{"ids": extracted_ids, "close_action": close_action})
result = 'SUCCESS' if wait_for_job_result(demisto.get(response,'result.id'), 'close', client) else 'FAIL'
return {
'outputs': result,
'outputs_prefix': 'Nozomi.Ids',
'outputs_key_field': '',
'readable_output': human_readable
}
|
32,758 |
def interpreter_version(modules, keys):
"""Returns the interpreter version as a string."""
if 'datadog.tracer.lang_version' not in keys:
return {}
return {
'datadog.tracer.lang_version': platform.python_version()
}
|
def interpreter_version(modules, keys):
"""Returns the interpreter version as a string."""
if 'datadog.tracer.lang_version' not in keys:
return {}
return {
'datadog.tracer.lang_version': platform.python_version(),
}
|
30,633 |
def alert_list_command(client: Client, args: dict):
group_results = args.get('group_results')
minimum_severity = args.get('minimum_severity')
create_time = {
'start': args.get('start_time'),
'end': args.get('end_time')
}
device_os_version = argToList(args.get('device_os_version'))
policy_id = argToList(args.get('policy_id'))
alert_tag = argToList(args.get('alert_tag'))
alert_id = argToList(args.get('alert_id'))
device_username = argToList(args.get('device_username'))
device_id = argToList(args.get('device_id'))
device_os = argToList(args.get('device_os'))
process_sha256 = argToList(args.get('process_sha256'))
policy_name = argToList(args.get('policy_name'))
reputation = argToList(args.get('reputation'))
alert_type = argToList(args.get('alert_type'))
alert_category = argToList(args.get('alert_category'))
workflow = argToList(args.get('workflow'))
device_name = argToList(args.get('device_name'))
process_name = argToList(args.get('process_name'))
sort_field = args.get('sort_field')
sort_order = args.get('sort_order')
limit = args.get('limit')
contents = []
result = client.search_alerts_request(group_results, minimum_severity, create_time,
device_os_version, policy_id, alert_tag, alert_id, device_username,
device_id, device_os, process_sha256, policy_name,
reputation, alert_type, alert_category, workflow, device_name,
process_name, sort_field, sort_order, limit)
alerts = result.get('results')
for alert in alerts:
contents.append({
'AlertID': alert.get('id'),
'CreateTime': alert.get('create_time'),
'DeviceName': alert.get('device_name'),
'DeviceOS': alert.get('device_os'),
'PolicyName': alert.get('policy_name'),
'ProcessName': alert.get('process_name'),
'Type': alert.get('type'),
'WorkflowState': alert.get('workflow').get('state')
})
readable_output = tableToMarkdown('Alerts list results', contents)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Alert',
outputs_key_field='id',
outputs=alerts,
readable_output=readable_output,
raw_response=result
)
return results
|
def alert_list_command(client: Client, args: dict):
group_results = args.get('group_results')
minimum_severity = args.get('minimum_severity')
create_time = {
'start': args.get('start_time'),
'end': args.get('end_time')
}
device_os_version = argToList(args.get('device_os_version'))
policy_id = argToList(args.get('policy_id'))
alert_tag = argToList(args.get('alert_tag'))
alert_id = argToList(args.get('alert_id'))
device_username = argToList(args.get('device_username'))
device_id = argToList(args.get('device_id'))
device_os = argToList(args.get('device_os'))
process_sha256 = argToList(args.get('process_sha256'))
policy_name = argToList(args.get('policy_name'))
reputation = argToList(args.get('reputation'))
alert_type = argToList(args.get('alert_type'))
alert_category = argToList(args.get('alert_category'))
workflow = argToList(args.get('workflow'))
device_name = argToList(args.get('device_name'))
process_name = argToList(args.get('process_name'))
sort_field = args.get('sort_field')
sort_order = args.get('sort_order')
limit = args.get('limit')
contents = []
result = client.search_alerts_request(group_results, minimum_severity, create_time,
device_os_version, policy_id, alert_tag, alert_id, device_username,
device_id, device_os, process_sha256, policy_name,
reputation, alert_type, alert_category, workflow, device_name,
process_name, sort_field, sort_order, limit)
alerts = result.get('results')
for alert in alerts:
contents.append({
'AlertID': alert.get('id'),
'CreateTime': alert.get('create_time'),
'DeviceName': alert.get('device_name'),
'DeviceOS': alert.get('device_os'),
'PolicyName': alert.get('policy_name'),
'ProcessName': alert.get('process_name'),
'Type': alert.get('type'),
'WorkflowState': alert.get('workflow', {}).get('state')
})
readable_output = tableToMarkdown('Alerts list results', contents)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Alert',
outputs_key_field='id',
outputs=alerts,
readable_output=readable_output,
raw_response=result
)
return results
|
31,259 |
def main() -> None:
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
def main() -> None:
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
1,447 |
def test_tsne_with_mahalanobis_distance():
"""Make sure that tha mahalanobis distance works with metric_params
properly set and it doesn't otherwise"""
random_state = check_random_state(0)
n_features = 10
n_embedding = 3
n_samples = 500
X = random_state.randn(n_samples, n_features)
# 1. raises error here (original issue)
tsne = TSNE(verbose=1, perplexity=40, n_iter=250, learning_rate=50,
n_components=n_embedding, random_state=0, metric='mahalanobis')
ref = "Must provide either V or VI for Mahalanobis distance"
with pytest.raises(ValueError, match=ref):
tsne.fit_transform(X)
# 2. check for correct answer
precomputed_X = squareform(pdist(X, metric='mahalanobis'), checks=True)
ref = TSNE(verbose=1, perplexity=40, n_iter=250, learning_rate=50,
n_components=n_embedding, random_state=0,
metric='precomputed').fit_transform(precomputed_X)
now = TSNE(verbose=1, perplexity=40, n_iter=250, learning_rate=50,
n_components=n_embedding, random_state=0, metric='mahalanobis',
metric_params={'V': np.cov(X.T)}).fit_transform(X)
assert_array_equal(ref, now)
|
def test_tsne_with_mahalanobis_distance():
"""Make sure that mahalanobis distance works with metric_params
properly set and it doesn't otherwise"""
random_state = check_random_state(0)
n_features = 10
n_embedding = 3
n_samples = 500
X = random_state.randn(n_samples, n_features)
# 1. raises error here (original issue)
tsne = TSNE(verbose=1, perplexity=40, n_iter=250, learning_rate=50,
n_components=n_embedding, random_state=0, metric='mahalanobis')
ref = "Must provide either V or VI for Mahalanobis distance"
with pytest.raises(ValueError, match=ref):
tsne.fit_transform(X)
# 2. check for correct answer
precomputed_X = squareform(pdist(X, metric='mahalanobis'), checks=True)
ref = TSNE(verbose=1, perplexity=40, n_iter=250, learning_rate=50,
n_components=n_embedding, random_state=0,
metric='precomputed').fit_transform(precomputed_X)
now = TSNE(verbose=1, perplexity=40, n_iter=250, learning_rate=50,
n_components=n_embedding, random_state=0, metric='mahalanobis',
metric_params={'V': np.cov(X.T)}).fit_transform(X)
assert_array_equal(ref, now)
|
57,509 |
def encode_default(dft: Any) -> Any:
if type(dft) in (int, float, str):
return dft
elif sequence_like(dft):
t = dft.__class__
seq_args = (encode_default(v) for v in dft)
return t(*seq_args) if is_namedtuple(t) else t(seq_args)
elif isinstance(dft, dict):
return {encode_default(k): encode_default(v) for k, v in dft.items()}
elif dft is None:
return None
else:
return pydantic_encoder(dft)
|
def encode_default(dft: Any) -> Any:
if isinstance(dft, Enum):
return dft.value
elif isinstance(dft, (int, float, str)):
return dft
elif sequence_like(dft):
t = dft.__class__
seq_args = (encode_default(v) for v in dft)
return t(*seq_args) if is_namedtuple(t) else t(seq_args)
elif isinstance(dft, dict):
return {encode_default(k): encode_default(v) for k, v in dft.items()}
elif dft is None:
return None
else:
return pydantic_encoder(dft)
|
56,428 |
def export(data, kind=COMMODITY_DEFAULT, filename=None) -> None:
"""
Export commodity data from the given CAPI data.
:param data: CAPI data.
:param kind: The type of file to write.
:param filename: Filename to write to, or None for a standard format name.
:return:
"""
querytime = config.get_int('querytime', default=int(time.time()))
if not filename:
filename_system = data['lastSystem']['name'].strip(),
filename_starport = data['lastStarport']['name'].strip(),
filename_time = time.strftime('%Y-%m-%dT%H.%M.%S', time.localtime(querytime)),
filename_kind = 'csv'
filename = f'{filename_system}.{filename_starport}.{filename_time}.{filename_kind}'
filename = join(config.get_str('outdir'), filename)
if kind == COMMODITY_CSV:
sep = ';' # BUG: for fixing later after cleanup
header = sep.join(('System', 'Station', 'Commodity', 'Sell', 'Buy', 'Demand', '', 'Supply', '', 'Date', '\n'))
rowheader = sep.join((data['lastSystem']['name'], data['lastStarport']['name']))
else:
sep = ','
header = sep.join(
('System', 'Station', 'Commodity', 'Sell', 'Buy', 'Demand', '', 'Supply', '', 'Average', 'FDevID', 'Date\n')
)
rowheader = sep.join((data['lastSystem']['name'], data['lastStarport']['name']))
with open(filename, 'wt') as h: # codecs can't automatically handle line endings, so encode manually where required
h.write(header)
for commodity in data['lastStarport']['commodities']:
line = sep.join((
rowheader,
commodity['name'],
commodity['sellPrice'] and str(int(commodity['sellPrice'])) or '',
commodity['buyPrice'] and str(int(commodity['buyPrice'])) or '',
str(int(commodity['demand'])) if commodity['demandBracket'] else '',
bracketmap[commodity['demandBracket']],
str(int(commodity['stock'])) if commodity['stockBracket'] else '',
bracketmap[commodity['stockBracket']]
))
if kind == COMMODITY_DEFAULT:
line = sep.join((
line,
str(int(commodity['meanPrice'])),
str(commodity['id']),
data['timestamp'] + '\n')
)
else:
line = sep.join((line, data['timestamp'] + '\n'))
h.write(line)
|
def export(data, kind=COMMODITY_DEFAULT, filename=None) -> None:
"""
Export commodity data from the given CAPI data.
:param data: CAPI data.
:param kind: The type of file to write.
:param filename: Filename to write to, or None for a standard format name.
:return:
"""
querytime = config.get_int('querytime', default=int(time.time()))
if not filename:
filename_system = data['lastSystem']['name'].strip(),
filename_starport = data['lastStarport']['name'].strip(),
filename_time = time.strftime('%Y-%m-%dT%H.%M.%S', time.localtime(querytime)),
filename_kind = 'csv'
filename = f'{filename_system}.{filename_starport}.{filename_time}.{filename_kind}'
filename = join(config.get_str('outdir'), filename)
if kind == COMMODITY_CSV:
sep = ';' # BUG: for fixing later after cleanup
header = sep.join(('System', 'Station', 'Commodity', 'Sell', 'Buy', 'Demand', '', 'Supply', '', 'Date', '\n'))
rowheader = sep.join((data['lastSystem']['name'], data['lastStarport']['name']))
else:
sep = ','
header = sep.join(
('System', 'Station', 'Commodity', 'Sell', 'Buy', 'Demand', '', 'Supply', '', 'Average', 'FDevID', 'Date\n')
)
rowheader = sep.join((data['lastSystem']['name'], data['lastStarport']['name']))
with open(filename, 'wt') as h: # codecs can't automatically handle line endings, so encode manually where required
h.write(header)
for commodity in data['lastStarport']['commodities']:
line = sep.join((
rowheader,
commodity['name'],
commodity['sellPrice'] and str(int(commodity['sellPrice'])) or '',
commodity['buyPrice'] and str(int(commodity['buyPrice'])) or '',
str(int(commodity['demand'])) if commodity['demandBracket'] else '',
bracketmap[commodity['demandBracket']],
str(int(commodity['stock'])) if commodity['stockBracket'] else '',
bracketmap[commodity['stockBracket']]
))
if kind == COMMODITY_DEFAULT:
line = sep.join(
(
line,
str(int(commodity['meanPrice'])
),
str(commodity['id']),
data['timestamp'] + '\n')
)
else:
line = sep.join((line, data['timestamp'] + '\n'))
h.write(line)
|
1,373 |
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Generally, ``y`` is in a multilabel format
if it has the following three properties
1. It has exactly two dimensions
2. Its second dimension has least 2 elements
3. Its data type is either bool, int or unsign int
Parameters
----------
y : an array-like object of target values. y can be a sparse matrix too.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
>>> is_multilabel(None)
False
>>> is_multilabel([])
False
>>> is_multilabel([[1, 2], [1, 1]])
True
>>> is_multilabel(np.array([[1, 2], [3, 1]]))
False
"""
if issparse(y):
if not (y.ndim == 2 and y.shape[1] > 1):
return False
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in ('b', 'i', 'u') or # bool, int, uint
_is_integral_float(np.unique(y.data))))
y = np.asarray(y)
if y.ndim != 2 or y.shape[1] < 2:
return False
labels = np.unique(y)
return len(labels) < 3 and \
(y.dtype.kind in ('b', 'i', 'u') or _is_integral_float(labels))
|
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Generally, ``y`` is in a multilabel format
if it has the following three properties
1. It has exactly two dimensions
2. Its second dimension has least 2 elements
3. Its data type is either bool, int or unsign int
Parameters
----------
y : an array-like object of target values. y can be a sparse matrix too.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
>>> is_multilabel(None)
False
>>> is_multilabel([])
False
>>> is_multilabel([[1, 2], [1, 1]])
True
>>> is_multilabel(np.array([[1, 2], [3, 1]]))
False
"""
if issparse(y):
if y.ndim != 2 or y.shape[1] < 2:
return False
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in ('b', 'i', 'u') or # bool, int, uint
_is_integral_float(np.unique(y.data))))
y = np.asarray(y)
if y.ndim != 2 or y.shape[1] < 2:
return False
labels = np.unique(y)
return len(labels) < 3 and \
(y.dtype.kind in ('b', 'i', 'u') or _is_integral_float(labels))
|
5,750 |
def poisson_means_test(k1, n1, k2, n2, diff=0, alternative='two-sided'):
r"""
Calculates the poisson mean test, the "E-test", for the mean difference of
two samples that follow a Poisson distribution from descriptive statistics.
This is a two-sided test. The null hypothesis is that two independent
samples have identical average (expected) values.
Let :math:`X_{11},...,X_{1n_1}` and :math:`X_{21},...,X_{2n_2}` be
independent samples from distributions :math:`Poisson(\lambda_1)` and
:math:`Poisson(\lambda_2)`. It is well known that :math:`X_1`
and :math:`X_2` are independent:
.. math:: X_1 = \sum_{i=1}^{n_1} X_{1i} \sim Poisson(n_1\lambda_1)
.. math:: X_2 = \sum_{i=1}^{n_2} X_{2i} \sim Poisson(n_2\lambda_2)
Let `count1` and `count2` be the observed values of :math:`X_1` and
:math:`X_2`, respectively. The null hypothesis and alternative
hypothesis under comparison are
.. math::
H_0: \lambda_1 = \lambda_2 + \mathtt{diff} \quad vs. \quad
H_a: \lambda_1 \ne \lambda_2 + \mathtt{diff}
for ``alternative=two-sided``, where :math:`\mathtt{diff} \ge 0`.
Parameters
----------
k1 : int
Sample values of interest from sample 1.
n1: int
Sample size from sample 1.
k2 : int
Sample values of interest from sample 2.
n2: int
Sample size from sample 2.
diff : int or float, optional
The difference of mean between two samples under the null hypothesis
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': :math:`\lambda_1 \ne \lambda_2 + \mathtt{diff}`
* 'less': :math:`\lambda_1 \le \lambda_2 + \mathtt{diff}`
* 'greater': :math:`\lambda_1 \ge \lambda_2 + \mathtt{diff}`
Returns
-------
statistic : float
The test statistic calculated from observed samples
pvalue : float
The associated p-value based on the estimated p-value of the
standardized difference.
Notes
-----
A benefit of the E-test is that it maintains its power even
with smaller sample sizes which can reduce sampling costs [1]_. It has
been evaluated and determined to be more powerful than the comparable
C-test, sometimes referred to as the poisson exact test.
References
----------
.. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
comparing two Poisson means. Journal of Statistical Planning and
Inference, 119(1), 23-35.
.. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
testing samples from Poisson series: With an application to testing
clover seed for dodder. Biometrika, 31(3/4), 313-323.
Examples
--------
Suppose that a gardener wishes to test the number of dodder seeds, a weed,
in a sack of clover seeds that they buy from a seed company.
A 100 gram sample is drawn from the sack before being shipped to the
gardener. The sample is analyzed, and it is found to contain no dodder
seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
another 100 gram sample from the sack. This time, three dodder seeds are
found in the sample; that is, `k2` is 3. The gardener would like to
know if the difference between is significant and not due to chance. The
null hypothesis is that the difference between the two samples is merely
due to chance, or that :math:`\lambda_1 = \lambda_2 + \mathtt{diff}`
where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
difference is not due to chance, or :math:`\lambda_1 \ne \lambda_2 + 0`.
The gardener selects a significance level of 5% to reject the null
hypothesis in favor of the alternative [2]_.
>>> res = stats.poisson_means_test(0, 100, 3, 100)
>>> res.statistic, res.pvalue
(-1.7320508075688772, 0.08837900929018157)
The p-value is .088, indicating a near 9% chance of observing a value of
the test statistic under the null hypothesis. This exceeds 5%, so the
gardener does not reject the null hypothesis as the difference cannot be
regarded as significant at this level.
"""
_chck_args_poisson_mean_test(k1, n1, k2, n2, diff, alternative)
# "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
# "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
# case the null hypothesis cannot be rejected ... [and] it is not necessary
# to compute the p-value". [1] page 26 below eq. (3.6).
if lmbd_hat2 <= 0:
return PoissonMeansTestResult(0, 1)
# the unbiased variance estimate [1] (3.2)
var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
# the _observed_ pivot statistic from the input. It follows the
# unnumbered equation following equation (3.3) This is used later in
# comparison with the computed pivot statistics in an indicator function.
t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
# equation (3.5) of [1] is lengthy, so it is broken into several parts,
# beginning here. Note that the probability mass function of poisson is
# exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
# here as nlmbd_hat*. The strategy for evaluating the double summation in
# (3.5) is to create two arrays of the values of the two products inside
# the summation and then broadcast them together into a matrix, and then
# sum across the entire matrix.
# compute constants (as seen in the first and second separated products in
# (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
nlmbd_hat2 = n2 * lmbd_hat2
# determine summation bounds for tail ends of distribution rather than
# summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
# sum
x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
# construct arrays to function as the x_1 and x_2 counters on the summation
# in (3.5). `x1` is in columns and `x2` is in rows to allow for
# broadcasting.
x1 = np.arange(x1_lb, x1_ub + 1)
x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
# these are the two products in equation (3.5) with `prob_x1` being the
# first (left side) and `prob_x2` being the second (right side). (To
# make as clear as possible: the 1st contains a "+ d" term, the 2nd does
# not.)
prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
# compute constants for use in the the "pivot statistic" per the
# unnumbered equation following (3.3).
lmbd_x1 = x1 / n1
lmbd_x2 = x2 / n2
lmbds_diff = lmbd_x1 - lmbd_x2 - diff
var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
# this is the 'pivot statistic' for use in the indicator of the summation
# (left side of "I[.]"). Before dividing, mask zero-elements in the
# denominator with infinity so that they are `false` in the indicator.
mask_out_invalid = (np.abs(lmbd_x1 - lmbd_x2) > diff
if alternative == 'two-sided' else lmbds_diff > 0)
var_x1x2[~mask_out_invalid] = np.inf
t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
if alternative == 'two-sided':
alternative_comparison = lambda x, y: np.abs(x) >= np.abs(y)
elif alternative == 'less':
alternative_comparison = lambda x, y: np.less_equal(x, y)
else:
alternative_comparison = lambda x, y: np.less_equal(x, y)
# `[indicator]` implements the "I[.] ... the indicator function" per
# the paragraph following equation (3.5).
indicator = alternative_comparison(t_x1x2, t_k1k2)
# multiply all combinations of the products together, exclude terms
# based on the `indicator` and then sum. (3.5)
pvalue = np.sum((prob_x1 * prob_x2)[indicator])
return PoissonMeansTestResult(t_k1k2, pvalue)
|
def poisson_means_test(k1, n1, k2, n2, *, diff=0, alternative='two-sided'):
r"""
Calculates the poisson mean test, the "E-test", for the mean difference of
two samples that follow a Poisson distribution from descriptive statistics.
This is a two-sided test. The null hypothesis is that two independent
samples have identical average (expected) values.
Let :math:`X_{11},...,X_{1n_1}` and :math:`X_{21},...,X_{2n_2}` be
independent samples from distributions :math:`Poisson(\lambda_1)` and
:math:`Poisson(\lambda_2)`. It is well known that :math:`X_1`
and :math:`X_2` are independent:
.. math:: X_1 = \sum_{i=1}^{n_1} X_{1i} \sim Poisson(n_1\lambda_1)
.. math:: X_2 = \sum_{i=1}^{n_2} X_{2i} \sim Poisson(n_2\lambda_2)
Let `count1` and `count2` be the observed values of :math:`X_1` and
:math:`X_2`, respectively. The null hypothesis and alternative
hypothesis under comparison are
.. math::
H_0: \lambda_1 = \lambda_2 + \mathtt{diff} \quad vs. \quad
H_a: \lambda_1 \ne \lambda_2 + \mathtt{diff}
for ``alternative=two-sided``, where :math:`\mathtt{diff} \ge 0`.
Parameters
----------
k1 : int
Sample values of interest from sample 1.
n1: int
Sample size from sample 1.
k2 : int
Sample values of interest from sample 2.
n2: int
Sample size from sample 2.
diff : int or float, optional
The difference of mean between two samples under the null hypothesis
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': :math:`\lambda_1 \ne \lambda_2 + \mathtt{diff}`
* 'less': :math:`\lambda_1 \le \lambda_2 + \mathtt{diff}`
* 'greater': :math:`\lambda_1 \ge \lambda_2 + \mathtt{diff}`
Returns
-------
statistic : float
The test statistic calculated from observed samples
pvalue : float
The associated p-value based on the estimated p-value of the
standardized difference.
Notes
-----
A benefit of the E-test is that it maintains its power even
with smaller sample sizes which can reduce sampling costs [1]_. It has
been evaluated and determined to be more powerful than the comparable
C-test, sometimes referred to as the poisson exact test.
References
----------
.. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
comparing two Poisson means. Journal of Statistical Planning and
Inference, 119(1), 23-35.
.. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
testing samples from Poisson series: With an application to testing
clover seed for dodder. Biometrika, 31(3/4), 313-323.
Examples
--------
Suppose that a gardener wishes to test the number of dodder seeds, a weed,
in a sack of clover seeds that they buy from a seed company.
A 100 gram sample is drawn from the sack before being shipped to the
gardener. The sample is analyzed, and it is found to contain no dodder
seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
another 100 gram sample from the sack. This time, three dodder seeds are
found in the sample; that is, `k2` is 3. The gardener would like to
know if the difference between is significant and not due to chance. The
null hypothesis is that the difference between the two samples is merely
due to chance, or that :math:`\lambda_1 = \lambda_2 + \mathtt{diff}`
where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
difference is not due to chance, or :math:`\lambda_1 \ne \lambda_2 + 0`.
The gardener selects a significance level of 5% to reject the null
hypothesis in favor of the alternative [2]_.
>>> res = stats.poisson_means_test(0, 100, 3, 100)
>>> res.statistic, res.pvalue
(-1.7320508075688772, 0.08837900929018157)
The p-value is .088, indicating a near 9% chance of observing a value of
the test statistic under the null hypothesis. This exceeds 5%, so the
gardener does not reject the null hypothesis as the difference cannot be
regarded as significant at this level.
"""
_chck_args_poisson_mean_test(k1, n1, k2, n2, diff, alternative)
# "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
# "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
# case the null hypothesis cannot be rejected ... [and] it is not necessary
# to compute the p-value". [1] page 26 below eq. (3.6).
if lmbd_hat2 <= 0:
return PoissonMeansTestResult(0, 1)
# the unbiased variance estimate [1] (3.2)
var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
# the _observed_ pivot statistic from the input. It follows the
# unnumbered equation following equation (3.3) This is used later in
# comparison with the computed pivot statistics in an indicator function.
t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
# equation (3.5) of [1] is lengthy, so it is broken into several parts,
# beginning here. Note that the probability mass function of poisson is
# exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
# here as nlmbd_hat*. The strategy for evaluating the double summation in
# (3.5) is to create two arrays of the values of the two products inside
# the summation and then broadcast them together into a matrix, and then
# sum across the entire matrix.
# compute constants (as seen in the first and second separated products in
# (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
nlmbd_hat2 = n2 * lmbd_hat2
# determine summation bounds for tail ends of distribution rather than
# summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
# sum
x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
# construct arrays to function as the x_1 and x_2 counters on the summation
# in (3.5). `x1` is in columns and `x2` is in rows to allow for
# broadcasting.
x1 = np.arange(x1_lb, x1_ub + 1)
x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
# these are the two products in equation (3.5) with `prob_x1` being the
# first (left side) and `prob_x2` being the second (right side). (To
# make as clear as possible: the 1st contains a "+ d" term, the 2nd does
# not.)
prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
# compute constants for use in the the "pivot statistic" per the
# unnumbered equation following (3.3).
lmbd_x1 = x1 / n1
lmbd_x2 = x2 / n2
lmbds_diff = lmbd_x1 - lmbd_x2 - diff
var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
# this is the 'pivot statistic' for use in the indicator of the summation
# (left side of "I[.]"). Before dividing, mask zero-elements in the
# denominator with infinity so that they are `false` in the indicator.
mask_out_invalid = (np.abs(lmbd_x1 - lmbd_x2) > diff
if alternative == 'two-sided' else lmbds_diff > 0)
var_x1x2[~mask_out_invalid] = np.inf
t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
if alternative == 'two-sided':
alternative_comparison = lambda x, y: np.abs(x) >= np.abs(y)
elif alternative == 'less':
alternative_comparison = lambda x, y: np.less_equal(x, y)
else:
alternative_comparison = lambda x, y: np.less_equal(x, y)
# `[indicator]` implements the "I[.] ... the indicator function" per
# the paragraph following equation (3.5).
indicator = alternative_comparison(t_x1x2, t_k1k2)
# multiply all combinations of the products together, exclude terms
# based on the `indicator` and then sum. (3.5)
pvalue = np.sum((prob_x1 * prob_x2)[indicator])
return PoissonMeansTestResult(t_k1k2, pvalue)
|
6,209 |
def _makeComponentDict(component, setupDict, installedDict, compType, system, runitDict):
componentDict = {
'Setup': component in setupDict.get(compType, []).get(system, []),
'Installed': component in installedDict.get(compType, []).get(system, []),
'RunitStatus': 'Unknown',
'Timeup': 0,
'PID': 0,
}
compDir = system + '_' + component
if compDir in runitDict:
componentDict['RunitStatus'] = runitDict[compDir]['RunitStatus']
componentDict['Timeup'] = runitDict[compDir]['Timeup']
componentDict['PID'] = _safeInt(runitDict[compDir].get('PID', -1))
componentDict['CPU'] = _safeFloat(runitDict[compDir].get('CPU', -1))
componentDict['MEM'] = _safeFloat(runitDict[compDir].get('MEM', -1))
componentDict['RSS'] = _safeFloat(runitDict[compDir].get('RSS', -1))
componentDict['VSZ'] = _safeFloat(runitDict[compDir].get('VSZ', -1))
return componentDict
|
def _makeComponentDict(component, setupDict, installedDict, compType, system, runitDict):
componentDict = {
'Setup': component in setupDict.get(compType, {}).get(system, {}),
'Installed': component in installedDict.get(compType, []).get(system, []),
'RunitStatus': 'Unknown',
'Timeup': 0,
'PID': 0,
}
compDir = system + '_' + component
if compDir in runitDict:
componentDict['RunitStatus'] = runitDict[compDir]['RunitStatus']
componentDict['Timeup'] = runitDict[compDir]['Timeup']
componentDict['PID'] = _safeInt(runitDict[compDir].get('PID', -1))
componentDict['CPU'] = _safeFloat(runitDict[compDir].get('CPU', -1))
componentDict['MEM'] = _safeFloat(runitDict[compDir].get('MEM', -1))
componentDict['RSS'] = _safeFloat(runitDict[compDir].get('RSS', -1))
componentDict['VSZ'] = _safeFloat(runitDict[compDir].get('VSZ', -1))
return componentDict
|
34,535 |
def create_interpreter(
obj: Union[
"rasa.shared.nlu.interpreter.NaturalLanguageInterpreter",
EndpointConfig,
Text,
None,
]
) -> "rasa.shared.nlu.interpreter.NaturalLanguageInterpreter":
"""Factory to create an natural language interpreter."""
if isinstance(obj, rasa.shared.nlu.interpreter.NaturalLanguageInterpreter):
return obj
elif isinstance(obj, str) and os.path.exists(obj):
return RasaNLUInterpreter(model_directory=obj)
elif isinstance(obj, str) and not os.path.exists(obj):
# user passed in a string, but file does not exist
logger.warning(
f"No local NLU model '{obj}' found. Using RegexInterpreter instead."
)
return rasa.shared.nlu.interpreter.RegexInterpreter()
else:
return _create_from_endpoint_config(obj)
|
def create_interpreter(
obj: Union[
rasa.shared.nlu.interpreter.NaturalLanguageInterpreter,
EndpointConfig,
Text,
None,
]
) -> "rasa.shared.nlu.interpreter.NaturalLanguageInterpreter":
"""Factory to create an natural language interpreter."""
if isinstance(obj, rasa.shared.nlu.interpreter.NaturalLanguageInterpreter):
return obj
elif isinstance(obj, str) and os.path.exists(obj):
return RasaNLUInterpreter(model_directory=obj)
elif isinstance(obj, str) and not os.path.exists(obj):
# user passed in a string, but file does not exist
logger.warning(
f"No local NLU model '{obj}' found. Using RegexInterpreter instead."
)
return rasa.shared.nlu.interpreter.RegexInterpreter()
else:
return _create_from_endpoint_config(obj)
|
13,570 |
def extend_arnoldi(A, E, V, H, f, p):
"""Extend an existing Arnoldi factorization.
Assuming that the inputs `V`, `H` and `f` define an Arnoldi factorization of length
:math:`l` (see :func:`arnoldi`), computes matrices :math:`V_{l+p}` and :math:`H_{l+p}`
and a vector :math:`f_{l+p}` which extend the factorization to a length `l+p` Arnoldi
factorization.
Parameters
----------
A
The |Operator| A.
E
The |Operator| E.
V
The |VectorArray| V from the length :math:`l` Arnoldi factorization.
H
The |NumPy array| H from the length :math:`l` Arnoldi factorization.
f
The |VectorArray| f from the length :math:`l` Arnoldi factorization.
p
The number of addditional Arnoldi steps which are to be performed.
Returns
-------
V
A |VectorArray| whose columns span an orthogonal basis for R^(l+p).
H
A |NumPy array| which is an upper Hessenberg matrix.
f
A |VectorArray| which represents the residual vector of the Arnoldi factorzation.
"""
k = len(V)
res = f.l2_norm()[0]
H = np.pad(H, ((0, p), (0, p)), 'constant')
H[k, k - 1] = res
v = f * (1 / res)
# since i cannot append to the VectorArrayView V I copy it before appending...
# is there a better way to do this?
V = V.copy()
V.append(v)
for i in range(k, k + p):
v = E.apply_inverse(A.apply(v))
V.append(v)
_, R = gram_schmidt(V, return_R=True, atol=0, rtol=0, offset=len(V) - 1, copy=False)
H[:i + 2, i] = R[:k + p, i + 1]
v = V[-1]
return V[:k + p], H, v * R[k + p, k + p]
|
def extend_arnoldi(A, E, V, H, f, p):
"""Extend an existing Arnoldi factorization.
Assuming that the inputs `V`, `H` and `f` define an Arnoldi factorization of length
:math:`l` (see :func:`arnoldi`), computes matrices :math:`V_{l+p}` and :math:`H_{l+p}`
and a vector :math:`f_{l+p}` which extend the factorization to a length `l+p` Arnoldi
factorization.
Parameters
----------
A
The |Operator| A.
E
The |Operator| E.
V
The |VectorArray| V from the length :math:`l` Arnoldi factorization.
H
The |NumPy array| H from the length :math:`l` Arnoldi factorization.
f
The |VectorArray| f from the length :math:`l` Arnoldi factorization.
p
The number of addditional Arnoldi steps which are to be performed.
Returns
-------
V
A |VectorArray| whose columns span an orthogonal basis for R^(l+p).
H
A |NumPy array| which is an upper Hessenberg matrix.
f
A |VectorArray| which represents the residual vector of the Arnoldi factorization.
"""
k = len(V)
res = f.l2_norm()[0]
H = np.pad(H, ((0, p), (0, p)), 'constant')
H[k, k - 1] = res
v = f * (1 / res)
# since i cannot append to the VectorArrayView V I copy it before appending...
# is there a better way to do this?
V = V.copy()
V.append(v)
for i in range(k, k + p):
v = E.apply_inverse(A.apply(v))
V.append(v)
_, R = gram_schmidt(V, return_R=True, atol=0, rtol=0, offset=len(V) - 1, copy=False)
H[:i + 2, i] = R[:k + p, i + 1]
v = V[-1]
return V[:k + p], H, v * R[k + p, k + p]
|
45,976 |
def draw_convex_polygon(image: torch.Tensor, polygon: torch.Tensor, color: torch.Tensor):
r"""Draws convex polygons on a batch of image tensors.
Args:
image: is tensor of BxCxHxW.
polygon: represents polygons to draw in BxNx2
N is the number of points
2 is (x, y).
color: a Bx3 tensor.
Returns:
This operation modifies image inplace but also returns the drawn tensor for
convenience with same shape the of the input BxCxHxW.
Example:
>>> img = torch.rand(2, 3, 10, 12)
>>> poly = torch.tensor([[[0, 0, 4, 4]], [[4, 4, 10, 10]]])
>>> color = torch.tensor([[0.5,0.5,0.5],[0.5,0.5,0.5]])
>>> out = draw_rectangle(img, poly, color)
"""
# TODO: implement optional linetypes for smooth edges
b_i, _, h_i, w_i, device_i = *image.shape, image.device
b_p, _, xy, device_p = *polygon.shape, polygon.device
b_c, _, device_c = *color.shape, color.device
if xy != 2:
raise AssertionError("Polygon vertices must be xy, i.e. 2-dimensional")
if not (b_i == b_p == b_c):
raise AssertionError("Image, polygon, and color must have same batch dimension")
if not (device_i == device_p == device_c):
raise AssertionError("Image, polygon, and color must have same device")
x_left, x_right = _get_convex_edges(polygon, h_i, w_i)
ws = torch.arange(w_i, device=x_left.device)[None, None, :]
fill_region = (ws >= x_left[..., :, None]) & (ws <= x_right[..., :, None])
image += fill_region[:, None] * color[..., None, None]
return image
|
def draw_polygon_convex(image: Tensor, polygon: Tensor, color: Tensor):
r"""Draws convex polygons on a batch of image tensors.
Args:
image: is tensor of BxCxHxW.
polygon: represents polygons to draw in BxNx2
N is the number of points
2 is (x, y).
color: a Bx3 tensor.
Returns:
This operation modifies image inplace but also returns the drawn tensor for
convenience with same shape the of the input BxCxHxW.
Example:
>>> img = torch.rand(2, 3, 10, 12)
>>> poly = torch.tensor([[[0, 0, 4, 4]], [[4, 4, 10, 10]]])
>>> color = torch.tensor([[0.5,0.5,0.5],[0.5,0.5,0.5]])
>>> out = draw_rectangle(img, poly, color)
"""
# TODO: implement optional linetypes for smooth edges
b_i, _, h_i, w_i, device_i = *image.shape, image.device
b_p, _, xy, device_p = *polygon.shape, polygon.device
b_c, _, device_c = *color.shape, color.device
if xy != 2:
raise AssertionError("Polygon vertices must be xy, i.e. 2-dimensional")
if not (b_i == b_p == b_c):
raise AssertionError("Image, polygon, and color must have same batch dimension")
if not (device_i == device_p == device_c):
raise AssertionError("Image, polygon, and color must have same device")
x_left, x_right = _get_convex_edges(polygon, h_i, w_i)
ws = torch.arange(w_i, device=x_left.device)[None, None, :]
fill_region = (ws >= x_left[..., :, None]) & (ws <= x_right[..., :, None])
image += fill_region[:, None] * color[..., None, None]
return image
|
30,786 |
def import_ioc_without_approval(import_type, import_value, confidence="50", classification="Private",
threat_type="exploit", severity="low", ip_mapping=False, domain_mapping=False,
url_mapping=False, email_mapping=False, md5_mapping=False, tags=None):
"""
Imports indicators data to ThreatStream.
The data can be imported using one of three import_types: data-text (plain-text),
file-id of uploaded file to war room or URL.
"""
if not tags:
tags = ''
ip_mapping = ip_mapping == 'yes'
domain_mapping = domain_mapping == 'yes'
url_mapping = url_mapping == 'yes'
email_mapping = email_mapping == 'yes'
md5_mapping = md5_mapping == 'yes'
files = None
uploaded_file = None
data = {
'confidence': confidence,
'classification': classification,
'ip_mapping': ip_mapping,
'domain_mapping': domain_mapping,
'url_mapping': url_mapping,
'email_mapping': email_mapping,
'md5_mapping': md5_mapping,
'threat_type': threat_type,
'severity': severity,
'tags': tags
}
if import_type == 'file-id':
try:
# import_value will be entry id of uploaded file to war room
file_info = demisto.getFilePath(import_value)
except Exception:
return_error(F"Entry {import_value} does not contain a file.")
uploaded_file = open(file_info['path'], 'rb')
files = {'file': (file_info['name'], uploaded_file)}
params = build_params()
else:
if import_value == 'url':
params = build_params(url=import_value)
else:
params = build_params(datatext=import_value)
# in case import_type is not file-id, http_requests will receive None as files
res = http_request("PATCH", "v1/intelligence", params=params, data=data, files=files)
# closing the opened file if exist
if uploaded_file:
uploaded_file.close()
# checking that response contains success key
if res.get('success', False):
imported_id = res.get('import_session_id', '')
ec = {'ThreatStream.Import.ImportID': imported_id}
return_outputs(F"The data was imported successfully. The ID of imported job is: {imported_id}", ec, res)
else:
return_outputs("The data was not imported. Check if valid arguments were passed", None)
|
def import_ioc_without_approval(import_type, import_value, confidence="50", classification="Private",
threat_type="exploit", severity="low", ip_mapping=False, domain_mapping=False,
url_mapping=False, email_mapping=False, md5_mapping=False, tags=None):
"""
Imports indicators data to ThreatStream.
The data can be imported using one of three import_types: data-text (plain-text),
file-id of uploaded file to war room or URL.
"""
if not tags:
tags = ''
ip_mapping = ip_mapping == 'yes'
domain_mapping = domain_mapping == 'yes'
url_mapping = url_mapping == 'yes'
email_mapping = email_mapping == 'yes'
md5_mapping = md5_mapping == 'yes'
files = None
uploaded_file = None
data = {
'confidence': confidence,
'classification': classification,
'ip_mapping': ip_mapping,
'domain_mapping': domain_mapping,
'url_mapping': url_mapping,
'email_mapping': email_mapping,
'md5_mapping': md5_mapping,
'threat_type': threat_type,
'severity': severity,
'tags': tags,
}
if import_type == 'file-id':
try:
# import_value will be entry id of uploaded file to war room
file_info = demisto.getFilePath(import_value)
except Exception:
return_error(F"Entry {import_value} does not contain a file.")
uploaded_file = open(file_info['path'], 'rb')
files = {'file': (file_info['name'], uploaded_file)}
params = build_params()
else:
if import_value == 'url':
params = build_params(url=import_value)
else:
params = build_params(datatext=import_value)
# in case import_type is not file-id, http_requests will receive None as files
res = http_request("PATCH", "v1/intelligence", params=params, data=data, files=files)
# closing the opened file if exist
if uploaded_file:
uploaded_file.close()
# checking that response contains success key
if res.get('success', False):
imported_id = res.get('import_session_id', '')
ec = {'ThreatStream.Import.ImportID': imported_id}
return_outputs(F"The data was imported successfully. The ID of imported job is: {imported_id}", ec, res)
else:
return_outputs("The data was not imported. Check if valid arguments were passed", None)
|
58,683 |
def test_read_mixed_training_data_file(default_domain: Domain):
training_data_file = "data/test_mixed_yaml_training_data/training_data.yml"
reader = YAMLStoryReader(default_domain)
yaml_content = rasa.utils.io.read_yaml_file(training_data_file)
with pytest.warns(None) as record:
reader.read_from_parsed_yaml(yaml_content)
assert not len(record)
|
def test_read_mixed_training_data_file(default_domain: Domain):
training_data_file = "data/test_mixed_yaml_training_data/training_data.yml"
reader = YAMLStoryReader(default_domain)
yaml_content = rasa.utils.io.read_yaml_file(training_data_file)
with pytest.warns(None) as record:
reader.read_from_parsed_yaml(yaml_content)
assert not len(record)
|
22,056 |
def _pandas_dt_fix(x):
# see https://github.com/pandas-dev/pandas/issues/23276
# not sure which version this is fixed in
if isinstance(x, pa.lib.TimestampArray) or (
isinstance(x, pa.lib.ChunkedArray) and isinstance(x.type, pa.lib.TimestampType)
):
return x.to_pandas()
if not x.flags['WRITEABLE']:
x = x.copy()
return x
|
def _pandas_dt_fix(x):
# see https://github.com/pandas-dev/pandas/issues/23276
# not sure which version this is fixed in
if vaex.array_types.is_arrow_array(x) and isinstance(x.type, pa.lib.TimestampType):
return x.to_pandas()
if not x.flags['WRITEABLE']:
x = x.copy()
return x
|
12,960 |
def validate_price_amount(value: "Decimal", currency: str = None):
"""Validate if price amount does not have too many decimal places.
Price amount can't have more decimal places than currency allow to.
"""
if not value:
return
if currency:
currency_fraction = get_currency_fraction(currency)
else:
currency_fraction = get_currency_fraction(settings.DEFAULT_CURRENCY)
value = value.normalize()
if abs(value.as_tuple().exponent) > currency_fraction:
raise ValidationError(
f"Value cannot have more than {currency_fraction} decimal places."
)
|
def validate_price_amount(value: "Decimal", currency: str = None):
"""Validate if price amount does not have too many decimal places.
Price amount can't have more decimal places than currency allow to.
"""
if not value:
return
currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY)
value = value.normalize()
if abs(value.as_tuple().exponent) > currency_fraction:
raise ValidationError(
f"Value cannot have more than {currency_fraction} decimal places."
)
|
4,483 |
def setup(app):
"""Set up for Sphinx app."""
directives = dict(
details=DetailsDirective,
)
for key, value in directives.items():
app.add_directive(key, value)
try:
app.add_css_file('bootstrap_divs.css')
except AttributeError:
app.add_stylesheet('bootstrap_divs.css')
try:
app.add_js_file('bootstrap_divs.js')
except AttributeError:
app.add_javascript('bootstrap_divs.js')
app.connect('build-finished', copy_asset_files)
for node in (DetailsNode):
app.add_node(node,
html=(node.visit_node, node.depart_node),
latex=(node.visit_node, node.depart_node),
text=(node.visit_node, node.depart_node))
return dict(version='0.1', parallel_read_safe=True,
parallel_write_safe=True)
|
def setup(app):
"""Set up for Sphinx app."""
directives = dict(
details=DetailsDirective,
)
for key, value in directives.items():
app.add_directive(key, value)
try:
app.add_css_file('bootstrap_divs.css')
except AttributeError:
app.add_stylesheet('bootstrap_divs.css')
try:
app.add_js_file('bootstrap_divs.js')
except AttributeError:
app.add_javascript('bootstrap_divs.js')
app.connect('build-finished', copy_asset_files)
for node in (DetailsNode,):
app.add_node(node,
html=(node.visit_node, node.depart_node),
latex=(node.visit_node, node.depart_node),
text=(node.visit_node, node.depart_node))
return dict(version='0.1', parallel_read_safe=True,
parallel_write_safe=True)
|
7,190 |
def test_2d_motion():
# Generate synthetic data
rnd = np.random.RandomState(0)
image0 = rnd.normal(size=(256, 256))
gt_flow, image1 = _sin_flow_gen(image0)
# Estimate the flow
flow = optical_flow_ilk(image0, image1)
# Assert that the average absolute error is less then half a pixel
assert abs(flow - gt_flow) .mean() < 0.5
|
def test_2d_motion():
# Generate synthetic data
rnd = np.random.RandomState(0)
image0 = rnd.normal(size=(256, 256))
gt_flow, image1 = _sin_flow_gen(image0)
# Estimate the flow
flow = optical_flow_ilk(image0, image1)
# Assert that the average absolute error is less then half a pixel
assert abs(flow - gt_flow).mean() < 0.5
|
9,207 |
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at from the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somwhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set fo files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images.
To avoid using the full pupil plane configuration, use the optional keyword ``pupil_bin``.
The full pupil-plane images are 4096 x 4096, which is more detail than is typically needed for
most applications. The default binning is 4x4, which results in an image that is 1024 x 1024.
This provides enough detail for most purposes and is much faster to render than using the full
pupil plane image. Using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly
reasonable results and is even faster to render, but it is not recommended to use higher
binning than that, as the diffraction spikes will be noticeably degraded.
Also note that currently the orientation of the struts is fixed, rather than rotating depending
on the orientation of the focal plane. Rotation of the PSF can easily be affected by the user
via::
psf = galsim.roman.getPSF(...).rotate(angle)
which will rotate the entire PSF (including the diffraction spikes and all other features).
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from . import n_pix, n_sca, longwave_bands, shortwave_bands
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
return psf
|
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at from the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somwhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set fo files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images.
To avoid using the full pupil plane configuration, use the optional keyword ``pupil_bin``.
The full pupil-plane images are 4096 x 4096, which is more detail than is typically needed for
most applications. The default binning is 4x4, which results in an image that is 1024 x 1024.
This provides enough detail for most purposes and is much faster to render than using the full
pupil plane image. Using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly
reasonable results and is even faster to render, but it is not recommended to use higher
binning than that, as the diffraction spikes will be noticeably degraded.
Also note that currently the orientation of the struts is fixed, rather than rotating depending
on the orientation of the focal plane. Rotation of the PSF can easily be affected by the user
via:
psf = galsim.roman.getPSF(...).rotate(angle)
which will rotate the entire PSF (including the diffraction spikes and all other features).
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from . import n_pix, n_sca, longwave_bands, shortwave_bands
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
return psf
|
13,688 |
def check_and_get_upgrade_link_and_date(user, enrollment=None, course=None):
"""
For an authenticated user, return a link to allow them to upgrade
in the specified course.
Returns the upgrade link and upgrade deadline for a user in a given course given
that the user is within the window to upgrade defined by our dynamic pacing feature;
otherwise, returns None for both the link and date.
"""
if enrollment is None and course is None:
logger.warn(u'Must specify either an enrollment or a course')
return (None, None, None)
enrollment_course = course
if enrollment:
if course is None:
enrollment_course = enrollment.course
elif enrollment.course_id != course.id:
logger.warn(u'{} refers to a different course than {} which was supplied. Enrollment course id={}, '
u'repr={!r}, deprecated={}. Course id={}, repr={!r}, deprecated={}.'
.format(enrollment,
course,
enrollment.course_id,
enrollment.course_id,
enrollment.course_id.deprecated,
course.id,
course.id,
course.id.deprecated
)
)
return (None, None, None)
if enrollment.user_id != user.id:
logger.warn(u'{} refers to a different user than {} which was supplied. Enrollment user id={}, repr={!r}. '
u'User id={}, repr={!r}.'.format(enrollment,
user,
enrollment.user_id,
enrollment.user_id,
user.id,
user.id,
)
)
return (None, None, None)
if enrollment is None:
enrollment = CourseEnrollment.get_enrollment(user, course.id)
if enrollment is None:
return (None, None, None)
if user.is_authenticated and can_show_verified_upgrade(user, enrollment, course):
return (
verified_upgrade_deadline_link(user, enrollment_course),
enrollment.upgrade_deadline,
enrollment.course_upgrade_deadline,
)
return (None, None, enrollment.course_upgrade_deadline)
|
def check_and_get_upgrade_link_and_date(user, enrollment=None, course=None):
"""
For an authenticated user, return a link to allow them to upgrade
in the specified course.
Returns the upgrade link and upgrade deadline for a user in a given course given
that the user is within the window to upgrade defined by our dynamic pacing feature;
otherwise, returns None for both the link and date.
"""
if enrollment is None and course is None:
logger.warn(u'Must specify either an enrollment or a course')
return (None, None, None)
enrollment_course = course
if enrollment:
if course is None:
enrollment_course = enrollment.course
elif enrollment.course_id != course.id:
logger.warn(u'{} refers to a different course than {} which was supplied. Enrollment course id={}, '
u'repr={!r}, deprecated={}. Course id={}, repr={!r}, deprecated={}.'
.format(enrollment,
course,
enrollment.course_id,
enrollment.course_id,
enrollment.course_id.deprecated,
course.id,
course.id,
course.id.deprecated
)
)
return (None, None, None)
if enrollment.user_id != user.id:
logger.warn(u'{} refers to a different user than {} which was supplied. Enrollment user id={}, repr={!r}. '
u'User id={}, repr={!r}.'.format(enrollment,
user,
enrollment.user_id,
enrollment.user_id,
user.id,
user.id,
)
)
return (None, None, None)
if enrollment is None:
enrollment = CourseEnrollment.get_enrollment(user, course.id)
if enrollment is None:
return (None, None, None)
if user.is_authenticated and can_show_verified_upgrade(user, enrollment, course):
return (
verified_upgrade_deadline_link(user, course or enrollment.course),
enrollment.upgrade_deadline,
enrollment.course_upgrade_deadline,
)
return (None, None, enrollment.course_upgrade_deadline)
|
41,886 |
def _get_optimization_history_plot_matplotlib(study: Study) -> Figure:
"""Plot optimization history of all trials in a study with matplotlib.
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values.
Returns:
A :class:`matplotlib.figure.Figure` object.
"""
# Setup
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig, ax = plt.subplots()
ax.set_title("Optimization History Plot")
ax.set_xlabel("#Trials")
ax.set_ylabel("Objective Value")
cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly.
# Prepare data for plotting
trials = [t for t in study.trials if t.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Study instance does not contain trials.")
return fig
best_values = [float("inf")] if study.direction == StudyDirection.MINIMIZE else [-float("inf")]
comp = min if study.direction == StudyDirection.MINIMIZE else max
for trial in trials:
trial_value = trial.value
assert trial_value is not None # For mypy
best_values.append(comp(best_values[-1], trial_value))
best_values.pop(0)
# Draw graphs
ax.scatter(
x=[t.number for t in trials],
y=[t.value for t in trials],
color=cmap(0),
alpha=1,
label="Objective Value",
)
ax.plot(
[t.number for t in trials],
best_values,
marker="o",
color=cmap(3),
alpha=0.5,
label="Best Value",
)
ax.legend()
return fig
|
def _get_optimization_history_plot_matplotlib(study: Study) -> Figure:
"""Plot optimization history of all trials in a study with matplotlib.
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values.
Returns:
A :class:`matplotlib.figure.Figure` object.
"""
# Setup
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig, ax = plt.subplots()
ax.set_title("Optimization History Plot")
ax.set_xlabel("#Trials")
ax.set_ylabel("Objective Value")
cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly.
# Prepare data for plotting
trials = [t for t in study.trials if t.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Study instance does not contain trials.")
return fig
best_values = [float("inf")] if study.direction == StudyDirection.MINIMIZE else [-float("inf")]
comp = min if study.direction == StudyDirection.MINIMIZE else max
for trial in trials:
trial_value = trial.value
assert trial_value is not None # For mypy
best_values.append(comp(best_values[-1], trial_value))
best_values.pop(0)
# Draw graphs.
ax.scatter(
x=[t.number for t in trials],
y=[t.value for t in trials],
color=cmap(0),
alpha=1,
label="Objective Value",
)
ax.plot(
[t.number for t in trials],
best_values,
marker="o",
color=cmap(3),
alpha=0.5,
label="Best Value",
)
ax.legend()
return fig
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.