id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
52,745 |
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:%s" %
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
50,488 |
def print_summary(covdata, log_summary):
'''Print a small report to the standard output or to the logger in addition.
Output the percentage, covered and total lines and branches.
'''
(lines_total, lines_covered, percent,
functions_total, functions_covered, percent_functions,
branches_total, branches_covered, percent_branches) = get_global_stats(covdata)
lines_out = "lines: %0.1f%% (%s out of %s)" % (
percent, lines_covered, lines_total
)
functions_out = "functions: %0.1f%% (%s out of %s)" % (
percent_functions, functions_covered, functions_total
)
branches_out = "branches: %0.1f%% (%s out of %s)" % (
percent_branches, branches_covered, branches_total
)
if log_summary:
logger.info(lines_out)
logger.info(functions_out)
logger.info(branches_out)
else:
sys.stdout.write(lines_out + '\n')
sys.stdout.write(functions_out + '\n')
sys.stdout.write(branches_out + '\n')
|
def print_summary(covdata, log_summary):
'''Print a small report to the standard output or to the logger in addition.
Output the percentage, covered and total lines and branches.
'''
(lines_total, lines_covered, percent,
functions_total, functions_covered, percent_functions,
branches_total, branches_covered, percent_branches) = get_global_stats(covdata)
lines_out = "lines: %0.1f%% (%s out of %s)" % (
percent, lines_covered, lines_total
)
functions_out = "functions: %0.1f%% (%s out of %s)" % (
percent_functions, functions_covered, functions_total
)
branches_out = "branches: %0.1f%% (%s out of %s)" % (
percent_branches, branches_covered, branches_total
)
sys.stdout.write(lines_out + '\n')
sys.stdout.write(functions_out + '\n')
sys.stdout.write(branches_out + '\n')
|
31,690 |
def main():
handle_proxy()
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
wsdl = url + "/skybox/webservice/jaxws/tickets?wsdl"
username = params['credentials']['identifier']
password = params['credentials']['password']
session: Session = Session()
session.auth = (username, password)
session.verify = verify_certificate
cache: SqliteCache = SqliteCache(path=get_cache_path(), timeout=None)
transport: Transport = Transport(session=session, cache=cache)
settings: Settings = Settings(strict=False, xsd_ignore_sequence_order=True)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = zClient(wsdl=wsdl, transport=transport, settings=settings)
commands = {
'skybox-getTicketWorkflow': getTicketWorkflow_command,
'skybox-getOriginalChangeRequestRouteInfoV1': getOriginalChangeRequestRouteInfoV1_command,
'skybox-getTicketTypePhasesByTicketType': getTicketTypePhasesByTicketType_command,
'skybox-getOriginalChangeRequestV7': getOriginalChangeRequestV7_command,
'skybox-setTicketFields': setTicketFields_command,
'skybox-createAccessChangeTicket': createAccessChangeTicket_command,
'skybox-getAttachmentFile': getAttachmentFile_command,
'skybox-createRecertifyTicketV2': createRecertifyTicketV2_command,
'skybox-getAccessChangeTicket': getAccessChangeTicket_command,
'skybox-getAccessRequests': getAccessRequests_command,
'skybox-getPotentialVulnerabilitiesV2': getPotentialVulnerabilitiesV2_command,
'skybox-deleteChangeRequests': deleteChangeRequests_command,
'skybox-deleteAccessChangeTicket': deleteAccessChangeTicket_command,
'skybox-getNotImplementedChangeRequestsV2': getNotImplementedChangeRequestsV2_command,
'skybox-getTicketEvents': getTicketEvents_command,
'skybox-getChangeRequestReviewers': getChangeRequestReviewers_command,
'skybox-getChangeRequestRuleAttributes': getChangeRequestRuleAttributes_command,
'skybox-getGeneratedCommands': getGeneratedCommands_command,
'skybox-getImplementedChangeRequests': getImplementedChangeRequests_command,
'skybox-operateOnVulnerabilityDefinitionTicket': operateOnVulnerabilityDefinitionTicket_command,
'skybox-createChangeManagerTicket': createChangeManagerTicket_command,
'skybox-getTicketsNotImplementedChangeRequestsV2': getTicketsNotImplementedChangeRequestsV2_command,
'skybox-findAccessRequests': findAccessRequests_command,
'skybox-expandFirewallsForAccessChangeTicket': expandFirewallsForAccessChangeTicket_command,
'skybox-addAttachmentFile': addAttachmentFile_command,
'skybox-countAccessChangeTickets': countAccessChangeTickets_command,
'skybox-getDerivedChangeRequestsV7': getDerivedChangeRequestsV7_command,
'skybox-setTicketAccessRequests': setTicketAccessRequests_command,
'skybox-updateAccessChangeTicket': updateAccessChangeTicket_command,
'skybox-addDerivedChangeRequests': addDerivedChangeRequests_command,
'skybox-getPolicyViolations': getPolicyViolations_command,
'skybox-removeAttachmentFile': removeAttachmentFile_command,
'skybox-getTicketWorkflows': getTicketWorkflows_command,
'skybox-recalculateTicketChangeRequests': recalculateTicketChangeRequests_command,
'skybox-findConfigurationItems': findConfigurationItems_command,
'skybox-getSponsoringApplication': getSponsoringApplication_command,
'skybox-addOriginalChangeRequestsV7': addOriginalChangeRequestsV7_command,
'skybox-createTicketAccessRequestsForObjectChange': createTicketAccessRequestsForObjectChange_command,
'skybox-getDerivedChangeRequestRouteInfoV1': getDerivedChangeRequestRouteInfoV1_command,
'skybox-implementChangeRequests': implementChangeRequests_command,
'skybox-getAnalysisTree': getAnalysisTree_command,
'skybox-operateOnAccessChangeTicket': operateOnAccessChangeTicket_command,
'skybox-analyzeAccessChangeTicket': analyzeAccessChangeTicket_command,
'skybox-getVerificationDetails': getVerificationDetails_command,
'skybox-getTicketPhases': getTicketPhases_command,
'skybox-findTickets': findTickets_command,
'skybox-setChangeRequestRuleAttributes': setChangeRequestRuleAttributes_command,
'skybox-getAttachmentList': getAttachmentList_command,
'skybox-setAddRuleChangeRequestFields': setAddRuleChangeRequestFields_command,
'skybox-setTicketPhases': setTicketPhases_command,
'skybox-getTicketAccessRequests': getTicketAccessRequests_command,
'skybox-testService': testService_command,
'skybox-setRecertificationStatus': setRecertificationStatus_command,
'skybox-setTicketDeferChangeRequestsCalculationStatus': setTicketDeferChangeRequestsCalculationStatus_command,
'skybox-setSponsoringApplication': setSponsoringApplication_command,
'skybox-findAccessChangeTickets': findAccessChangeTickets_command,
'skybox-getTicketFields': getTicketFields_command,
'skybox-getTicketsImplementedChangeRequests': getTicketsImplementedChangeRequests_command,
'skybox-getTicketDeferChangeRequestsCalculationStatus': getTicketDeferChangeRequestsCalculationStatus_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
def main():
handle_proxy()
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
wsdl = url + "/skybox/webservice/jaxws/tickets?wsdl"
username = params['credentials']['identifier']
password = params['credentials']['password']
session: Session = Session()
session.auth = (username, password)
session.verify = verify_certificate
cache: SqliteCache = SqliteCache(path=get_cache_path(), timeout=None)
transport: Transport = Transport(session=session, cache=cache)
settings: Settings = Settings(strict=False, xsd_ignore_sequence_order=True)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = zClient(wsdl=wsdl, transport=transport, settings=settings)
commands = {
'skybox-get-ticket-workflow': getTicketWorkflow_command,
'skybox-getOriginalChangeRequestRouteInfoV1': getOriginalChangeRequestRouteInfoV1_command,
'skybox-getTicketTypePhasesByTicketType': getTicketTypePhasesByTicketType_command,
'skybox-getOriginalChangeRequestV7': getOriginalChangeRequestV7_command,
'skybox-setTicketFields': setTicketFields_command,
'skybox-createAccessChangeTicket': createAccessChangeTicket_command,
'skybox-getAttachmentFile': getAttachmentFile_command,
'skybox-createRecertifyTicketV2': createRecertifyTicketV2_command,
'skybox-getAccessChangeTicket': getAccessChangeTicket_command,
'skybox-getAccessRequests': getAccessRequests_command,
'skybox-getPotentialVulnerabilitiesV2': getPotentialVulnerabilitiesV2_command,
'skybox-deleteChangeRequests': deleteChangeRequests_command,
'skybox-deleteAccessChangeTicket': deleteAccessChangeTicket_command,
'skybox-getNotImplementedChangeRequestsV2': getNotImplementedChangeRequestsV2_command,
'skybox-getTicketEvents': getTicketEvents_command,
'skybox-getChangeRequestReviewers': getChangeRequestReviewers_command,
'skybox-getChangeRequestRuleAttributes': getChangeRequestRuleAttributes_command,
'skybox-getGeneratedCommands': getGeneratedCommands_command,
'skybox-getImplementedChangeRequests': getImplementedChangeRequests_command,
'skybox-operateOnVulnerabilityDefinitionTicket': operateOnVulnerabilityDefinitionTicket_command,
'skybox-createChangeManagerTicket': createChangeManagerTicket_command,
'skybox-getTicketsNotImplementedChangeRequestsV2': getTicketsNotImplementedChangeRequestsV2_command,
'skybox-findAccessRequests': findAccessRequests_command,
'skybox-expandFirewallsForAccessChangeTicket': expandFirewallsForAccessChangeTicket_command,
'skybox-addAttachmentFile': addAttachmentFile_command,
'skybox-countAccessChangeTickets': countAccessChangeTickets_command,
'skybox-getDerivedChangeRequestsV7': getDerivedChangeRequestsV7_command,
'skybox-setTicketAccessRequests': setTicketAccessRequests_command,
'skybox-updateAccessChangeTicket': updateAccessChangeTicket_command,
'skybox-addDerivedChangeRequests': addDerivedChangeRequests_command,
'skybox-getPolicyViolations': getPolicyViolations_command,
'skybox-removeAttachmentFile': removeAttachmentFile_command,
'skybox-getTicketWorkflows': getTicketWorkflows_command,
'skybox-recalculateTicketChangeRequests': recalculateTicketChangeRequests_command,
'skybox-findConfigurationItems': findConfigurationItems_command,
'skybox-getSponsoringApplication': getSponsoringApplication_command,
'skybox-addOriginalChangeRequestsV7': addOriginalChangeRequestsV7_command,
'skybox-createTicketAccessRequestsForObjectChange': createTicketAccessRequestsForObjectChange_command,
'skybox-getDerivedChangeRequestRouteInfoV1': getDerivedChangeRequestRouteInfoV1_command,
'skybox-implementChangeRequests': implementChangeRequests_command,
'skybox-getAnalysisTree': getAnalysisTree_command,
'skybox-operateOnAccessChangeTicket': operateOnAccessChangeTicket_command,
'skybox-analyzeAccessChangeTicket': analyzeAccessChangeTicket_command,
'skybox-getVerificationDetails': getVerificationDetails_command,
'skybox-getTicketPhases': getTicketPhases_command,
'skybox-findTickets': findTickets_command,
'skybox-setChangeRequestRuleAttributes': setChangeRequestRuleAttributes_command,
'skybox-getAttachmentList': getAttachmentList_command,
'skybox-setAddRuleChangeRequestFields': setAddRuleChangeRequestFields_command,
'skybox-setTicketPhases': setTicketPhases_command,
'skybox-getTicketAccessRequests': getTicketAccessRequests_command,
'skybox-testService': testService_command,
'skybox-setRecertificationStatus': setRecertificationStatus_command,
'skybox-setTicketDeferChangeRequestsCalculationStatus': setTicketDeferChangeRequestsCalculationStatus_command,
'skybox-setSponsoringApplication': setSponsoringApplication_command,
'skybox-findAccessChangeTickets': findAccessChangeTickets_command,
'skybox-getTicketFields': getTicketFields_command,
'skybox-getTicketsImplementedChangeRequests': getTicketsImplementedChangeRequests_command,
'skybox-getTicketDeferChangeRequestsCalculationStatus': getTicketDeferChangeRequestsCalculationStatus_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
37,857 |
def test_put_vector_mixed_dtypes():
"""Passing a numpy array of mixed dtypes to a dataset.
See https://github.com/GenericMappingTools/pygmt/issues/255
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
for dtypex in dtypes:
for dtypey in dtypes:
# skip same dtypes
if dtypex == dtypey:
continue
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[2, 5, 1, 0], # columns, rows, layers, dtype
)
x = np.array([1, 2, 3, 4, 5], dtype=dtypex)
y = np.array([6, 7, 8, 9, 10], dtype=dtypey)
lib.put_vector(dataset, column=lib["GMT_X"], vector=x)
lib.put_vector(dataset, column=lib["GMT_Y"], vector=y)
# Turns out wesn doesn't matter for Datasets
wesn = [0] * 6
# Save the data to a file to see if it's being accessed
# correctly
with GMTTempFile() as tmp_file:
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
wesn,
tmp_file.name,
dataset,
)
# Load the data and check that it's correct
newx, newy = tmp_file.loadtxt(
unpack=True, dtype=[("x", dtypex), ("y", dtypey)]
)
assert x.dtype == newx.dtype
assert y.dtype == newy.dtype
npt.assert_allclose(newx, x)
npt.assert_allclose(newy, y)
|
def test_put_vector_mixed_dtypes():
"""Passing a numpy array of mixed dtypes to a dataset.
See https://github.com/GenericMappingTools/pygmt/issues/255
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
for dtypex, dtypey in itertools.combinations(iterable=dtypes, r=2):
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[2, 5, 1, 0], # columns, rows, layers, dtype
)
x = np.array([1, 2, 3, 4, 5], dtype=dtypex)
y = np.array([6, 7, 8, 9, 10], dtype=dtypey)
lib.put_vector(dataset, column=lib["GMT_X"], vector=x)
lib.put_vector(dataset, column=lib["GMT_Y"], vector=y)
# Turns out wesn doesn't matter for Datasets
wesn = [0] * 6
# Save the data to a file to see if it's being accessed
# correctly
with GMTTempFile() as tmp_file:
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
wesn,
tmp_file.name,
dataset,
)
# Load the data and check that it's correct
newx, newy = tmp_file.loadtxt(
unpack=True, dtype=[("x", dtypex), ("y", dtypey)]
)
assert x.dtype == newx.dtype
assert y.dtype == newy.dtype
npt.assert_allclose(newx, x)
npt.assert_allclose(newy, y)
|
42,341 |
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (for example, started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
47,929 |
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model",
required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
required=True, type=str, nargs="+")
args.add_argument("-c", "--classes", help="Optional. Path to a text file containing class labels.",
type=str)
args.add_argument("-col", "--colors", help="Optional. Path to a text file containing colors for classes.",
type=str)
args.add_argument("-lw", "--legend_width", help="Optional. Width of legend.", default=300, type=int)
args.add_argument("-o", "--output_dir", help="Optional. Path to a folder where output files will be saved",
default="results", type=str)
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. "
"Absolute MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the "
"kernels implementations", type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is "
"acceptable. Sample will look for a suitable plugin for device specified. Default value is CPU",
default="CPU", type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
return parser
|
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model",
required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
required=True, type=str, nargs="+")
args.add_argument("-c", "--classes", help="Optional. Path to a text file containing class labels.",
type=str)
args.add_argument("-c", "--colors", help="Optional. Path to a text file containing colors for classes.",
type=str)
args.add_argument("-lw", "--legend_width", help="Optional. Width of legend.", default=300, type=int)
args.add_argument("-o", "--output_dir", help="Optional. Path to a folder where output files will be saved",
default="results", type=str)
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. "
"Absolute MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the "
"kernels implementations", type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is "
"acceptable. Sample will look for a suitable plugin for device specified. Default value is CPU",
default="CPU", type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
return parser
|
32,206 |
def main() -> None:
api_key = demisto.params().get('apikey')
base_url = demisto.params().get('url')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = dateparser.parse(demisto.params().get('first_fetch', '3 days')).strftime(XSOAR_DATE_FORMAT) # type: ignore
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
mirror_tags = set(demisto.params().get('mirror_tag', '').split(',')) \
if demisto.params().get('mirror_tag') else set([])
query = demisto.params().get('query', '') or ''
disable_from_same_integration = demisto.params().get('disable_from_same_integration')
if disable_from_same_integration:
query += ' -sourceBrand:"XSOAR Mirroring"'
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch'
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
try:
headers = {
'Authorization': api_key
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy
)
if demisto.command() == 'test-module':
if demisto.params().get('isFetch'):
fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time,
query=query,
mirror_direction=demisto.params().get('mirror_direction'),
mirror_tag=list(mirror_tags)
)
return_results(test_module(client, first_fetch_time))
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time,
query=query,
mirror_direction=demisto.params().get('mirror_direction'),
mirror_tag=list(mirror_tags)
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'xsoar-search-incidents':
return_results(search_incidents_command(client, demisto.args()))
elif demisto.command() == 'xsoar-get-incident':
return_results(get_incident_command(client, demisto.args()))
elif demisto.command() == 'get-mapping-fields':
return_results(get_mapping_fields_command(client))
elif demisto.command() == 'get-remote-data':
return_results(get_remote_data_command(client, demisto.args(), demisto.params()))
elif demisto.command() == 'update-remote-system':
return_results(update_remote_system_command(client, demisto.args(), mirror_tags))
else:
raise NotImplementedError('Command not implemented')
except NotImplementedError:
raise
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
api_key = demisto.params().get('apikey')
base_url = demisto.params().get('url')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = dateparser.parse(demisto.params().get('first_fetch', '3 days')).strftime(XSOAR_DATE_FORMAT) # type: ignore[union-attr]
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
mirror_tags = set(demisto.params().get('mirror_tag', '').split(',')) \
if demisto.params().get('mirror_tag') else set([])
query = demisto.params().get('query', '') or ''
disable_from_same_integration = demisto.params().get('disable_from_same_integration')
if disable_from_same_integration:
query += ' -sourceBrand:"XSOAR Mirroring"'
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch'
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
try:
headers = {
'Authorization': api_key
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy
)
if demisto.command() == 'test-module':
if demisto.params().get('isFetch'):
fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time,
query=query,
mirror_direction=demisto.params().get('mirror_direction'),
mirror_tag=list(mirror_tags)
)
return_results(test_module(client, first_fetch_time))
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time,
query=query,
mirror_direction=demisto.params().get('mirror_direction'),
mirror_tag=list(mirror_tags)
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'xsoar-search-incidents':
return_results(search_incidents_command(client, demisto.args()))
elif demisto.command() == 'xsoar-get-incident':
return_results(get_incident_command(client, demisto.args()))
elif demisto.command() == 'get-mapping-fields':
return_results(get_mapping_fields_command(client))
elif demisto.command() == 'get-remote-data':
return_results(get_remote_data_command(client, demisto.args(), demisto.params()))
elif demisto.command() == 'update-remote-system':
return_results(update_remote_system_command(client, demisto.args(), mirror_tags))
else:
raise NotImplementedError('Command not implemented')
except NotImplementedError:
raise
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
27,472 |
def delete_backup(instance_id, backup_id):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
backup = instance.backup(backup_id)
backup.reload()
# Wait for databases that reference this backup to finish optimizing
while backup.referencing_databases:
time.sleep(30)
backup.reload()
# Delete the backup.
backup.delete()
# Verify that the backup is deleted.
assert backup.exists() is False
print("Backup {} has been deleted.".format(backup.name))
# [END spanner_delete_backup]
|
def delete_backup(instance_id, backup_id):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
backup = instance.backup(backup_id)
backup.reload()
# Wait for databases that reference this backup to finish optimizing.
while backup.referencing_databases:
time.sleep(30)
backup.reload()
# Delete the backup.
backup.delete()
# Verify that the backup is deleted.
assert backup.exists() is False
print("Backup {} has been deleted.".format(backup.name))
# [END spanner_delete_backup]
|
54,594 |
def _flatten_cli_args(cli_args: List[Union[Sequence[str], str]]) -> List[str]:
"""Allow a user to put long arguments into a list of strs
to make the JSON human readable"""
flat_args = []
for arg in cli_args:
if isinstance(arg, str):
flat_args.append(arg)
continue
new_args_str = ""
for arg_str in arg:
new_args_str += arg_str
flat_args.append(new_args_str)
return flat_args
|
def _flatten_cli_args(cli_args: List[Union[Sequence[str], str]]) -> List[str]:
"""Allow a user to put long arguments into a list of strs
to make the JSON human readable"""
flat_args = []
for arg in cli_args:
if isinstance(arg, str):
flat_args.append(arg)
continue
args_as_str = "".join(arg)
flat_args.append(args_as_str)
return flat_args
|
12,264 |
def check_measure(op, state):
""" Check that all measure results are in measurement_statistics which
was previously tested.
This remove the depandance on the prng.
"""
evs, ess_or_projs, prob = measurement_statistics(op, state)
expected_measurements = EigenPairs(list(zip(evs, ess_or_projs)))
for _ in range(10):
assert_(measure(op, state) in expected_measurements)
|
def check_measure(op, state):
""" Check that all measure results are in measurement_statistics which
was previously tested.
This removes the dependance on the prng.
"""
evs, ess_or_projs, prob = measurement_statistics(op, state)
expected_measurements = EigenPairs(list(zip(evs, ess_or_projs)))
for _ in range(10):
assert_(measure(op, state) in expected_measurements)
|
1,367 |
def test_CountVectorizer_fit_warning():
CountVectorizerMsg = "The parameter 'stop_words' will not be used"
" since analyzer != 'word'"
for vec in [CountVectorizer()]:
vec.set_params(stop_words=["you've", "you", "you'll", 'AND'],
analyzer='char')
assert_warns_message(UserWarning, CountVectorizerMsg, vec.fit,
['hello world'])
|
def test_CountVectorizer_fit_warning():
msg = "The parameter 'stop_words' will not be used"
" since analyzer != 'word'"
for vec in [CountVectorizer()]:
vec.set_params(stop_words=["you've", "you", "you'll", 'AND'],
analyzer='char')
assert_warns_message(UserWarning, CountVectorizerMsg, vec.fit,
['hello world'])
|
13,525 |
def parse_value_name_collumn(value_name, value, signal_size, float_factory):
mini = maxi = offset = None
value_table = dict()
if ".." in value_name:
(mini, maxi) = value_name.strip().split("..", 2)
mini = float_factory(mini)
maxi = float_factory(maxi)
offset = mini
elif value_name.__len__() > 0:
if value.strip().__len__() > 0:
# Value Table
value = int(float(value))
value_table[value] = value_name
maxi = pow(2, signal_size) - 1
maxi = float_factory(maxi)
mini = 0
offset = 1
return mini, maxi, offset, value_table
|
def parse_value_name_collumn(value_name, value, signal_size, float_factory):
mini = maxi = offset = None
value_table = dict()
if ".." in value_name:
(mini, maxi) = value_name.strip().split("..", 2)
mini = float_factory(mini)
maxi = float_factory(maxi)
offset = mini
elif value_name.__len__() > 0:
if len(value.strip()) > 0:
# Value Table
value = int(float(value))
value_table[value] = value_name
maxi = pow(2, signal_size) - 1
maxi = float_factory(maxi)
mini = 0
offset = 1
return mini, maxi, offset, value_table
|
32,037 |
def test_handle_msg_with_attachments():
"""
Given:
- A msg file with attachments
When:
- The handle_msg
Then:
- Ensure that the attachment name is in the results
"""
result = handle_msg('test_data/html_attachment.msg', 'html_attachment.msg')
assert result[0]['Attachments'] == 'dummy-attachment.txt'
|
def test_handle_msg_with_attachments():
"""
Given:
- A msg file with attachments
When:
- Running the 'handle_msg' method
Then:
- Ensure that the attachment name is in the results
"""
result = handle_msg('test_data/html_attachment.msg', 'html_attachment.msg')
assert result[0]['Attachments'] == 'dummy-attachment.txt'
|
13,509 |
def condition_alway_false(data):
return False
|
def condition_always_false(data):
return False
|
31,013 |
def test_get_multiple_packs_dirs(requests_mock):
"""
Scenario: Get a pack dir name from pull request files
Given
- A pull request
- A file in the pull request is in a pack
When
- Getting the pack dir name from a pull request
Then
- Ensure the pack dir name is returned correctly
"""
branch = 'contrib_branch'
pr_number = '1'
repo = 'contrib_repo'
requests_mock.get(
'https://api.github.com/repos/demisto/content/pulls/1/files',
[{'json': github_response_1, 'status_code': 200},
{'json': github_response_2, 'status_code': 200},
{'json': github_response_3, 'status_code': 200},
{'json': github_response_4, 'status_code': 200}]
)
pack_dir = get_pack_dir(branch, pr_number, repo)
assert pack_dir == ['Slack', 'Slack1']
|
def test_get_multiple_packs_dirs(requests_mock):
"""
Scenario: Get a pack dir name from pull request files
Given
- A pull request
- A file in the pull request is in a pack
When
- Getting the pack dir name from a pull request
Then
- Ensure the pack dir name is returned correctly
"""
branch = 'contrib_branch'
pr_number = '1'
repo = 'contrib_repo'
requests_mock.get(
'https://api.github.com/repos/demisto/content/pulls/1/files',
[{'json': github_response_1, 'status_code': 200},
{'json': github_response_2, 'status_code': 200},
{'json': github_response_3, 'status_code': 200},
{'json': github_response_4, 'status_code': 200}]
)
pack_dir = get_pack_dir(branch, pr_number, repo)
assert pack_dir == ['Slack', 'AnotherPackName']
|
38,795 |
def getallnodes(state='all', partitions=None):
rt = runtime.runtime()
nodes = {}
if partitions is None:
partitions = rt.system.partitions
for part in rt.system.partitions:
if part not in partitions:
continue
# This job will not be submitted, it's used only to filter
# the nodes based on the partition configuration
job = Job.create(part.scheduler,
part.launcher_type(),
name='placeholder-job',
sched_access=part.access,
sched_options=rt.jobs_cli_options)
available_nodes = part.scheduler.allnodes()
available_nodes = part.scheduler.filternodes(job, available_nodes)
getlogger().debug(
f'Total available nodes for {part.name}: {len(available_nodes)}'
)
if state.casefold() != 'all':
available_nodes = {n for n in available_nodes
if n.in_state(state)}
getlogger().debug(
f'[F] Selecting nodes in state {state!r}: '
f'available nodes now: {len(available_nodes)}'
)
nodes[part.fullname] = [n.name for n in available_nodes]
return nodes
|
def getallnodes(state='all', job_cli_options=None):
rt = runtime.runtime()
nodes = {}
if partitions is None:
partitions = rt.system.partitions
for part in rt.system.partitions:
if part not in partitions:
continue
# This job will not be submitted, it's used only to filter
# the nodes based on the partition configuration
job = Job.create(part.scheduler,
part.launcher_type(),
name='placeholder-job',
sched_access=part.access,
sched_options=rt.jobs_cli_options)
available_nodes = part.scheduler.allnodes()
available_nodes = part.scheduler.filternodes(job, available_nodes)
getlogger().debug(
f'Total available nodes for {part.name}: {len(available_nodes)}'
)
if state.casefold() != 'all':
available_nodes = {n for n in available_nodes
if n.in_state(state)}
getlogger().debug(
f'[F] Selecting nodes in state {state!r}: '
f'available nodes now: {len(available_nodes)}'
)
nodes[part.fullname] = [n.name for n in available_nodes]
return nodes
|
39,915 |
def confirm_staged_stake(staker_address, value, lock_periods) -> None:
click.confirm(f"""
* Ursula Node Operator Notice *
-------------------------------
By agreeing to stake {str(value)} ({str(value.to_nunits())} NuNits):
- Staked tokens will be locked for the stake duration.
- You are obligated to maintain a networked and available Ursula-Worker node
bonded to the staker address {staker_address} for the duration
of the stake(s) ({lock_periods} periods).
- Agree to allow NuCypher network users to carry out uninterrupted re-encryption
work orders at-will without interference.
Failure to keep your node online, or violation of re-encryption work orders
will result in the loss of staked tokens as described in the NuCypher slashing protocol.
Keeping your Ursula node online during the staking period and successfully
producing correct re-encryption work orders will result in fees
paid out in ethers retro-actively and on-demand.
Accept ursula node operator obligation?""", abort=True)
|
def confirm_staged_stake(staker_address, value, lock_periods) -> None:
click.confirm(f"""
* Ursula Node Operator Notice *
-------------------------------
By agreeing to stake {str(value)} ({str(value.to_nunits())} NuNits):
- Staked tokens will be locked for the stake duration.
- You are obligated to maintain a networked and available Ursula-Worker node
bonded to the staker address {staker_address} for the duration
of the stake(s) ({lock_periods} periods).
- Agree to allow NuCypher network users to carry out uninterrupted re-encryption
work orders at-will without interference.
Failure to keep your node online, or violation of re-encryption work orders
will result in the loss of staked tokens as described in the NuCypher slashing protocol.
Keeping your Ursula node online during the staking period and successfully
producing correct re-encryption work orders will result in fees
paid out in ethers retroactively and on-demand.
Accept ursula node operator obligation?""", abort=True)
|
57,901 |
def main() -> None:
args = demisto.args()
server = demisto.params()['server']
port = demisto.params().get('port', '443')
verify_certificate = not demisto.params().get('insecure', False)
proxy = not demisto.params().get('insecure', False)
partition = demisto.params().get('partition')
username = demisto.params().get('credentials', {}).get('identifier')
password = demisto.params().get('credentials', {}).get('password')
base_url = f'https://{server}:{port}/mgmt/tm/'
handle_proxy()
token = login(server, port, username, password, verify_certificate)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
token=token,
partition=partition,
use_ssl=verify_certificate,
use_proxy=proxy
)
if demisto.command() == 'test-module':
result = test_module(client)
return_results(result)
elif demisto.command() == 'f5-ltm-get-pools':
return_results(ltm_get_pools_command(client, args))
elif demisto.command() == 'f5-ltm-get-pool':
return_results(ltm_get_pool_command(client, args))
elif demisto.command() == 'f5-ltm-get-pool-members':
return_results(ltm_get_pool_members_command(client, args))
elif demisto.command() == 'f5-ltm-get-nodes':
return_results(ltm_get_nodes_command(client))
elif demisto.command() == 'f5-ltm-get-node':
return_results(ltm_get_node_command(client, args))
elif demisto.command() == 'f5-ltm-disable-node':
return_results(ltm_disable_node_command(client, args))
elif demisto.command() == 'f5-ltm-enable-node':
return_results(ltm_enable_node_command(client, args))
elif demisto.command() == 'f5-ltm-get-pool-member-stats':
return_results(ltm_get_pool_member_stats_command(client, args))
elif demisto.command() == 'f5-ltm-get-node-stats':
return_results(ltm_get_node_stats_command(client, args))
elif demisto.command() == 'f5-ltm-get-node-by-address':
return_results(ltm_get_node_by_address_command(client, args))
elif demisto.command() == 'f5-ltm-get-pool-by-node':
return_results(ltm_get_pools_by_node_command(client, args))
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
args = demisto.args()
params = demisto.params()
server = params['server']
port = demisto.params().get('port', '443')
verify_certificate = not demisto.params().get('insecure', False)
proxy = not demisto.params().get('insecure', False)
partition = demisto.params().get('partition')
username = demisto.params().get('credentials', {}).get('identifier')
password = demisto.params().get('credentials', {}).get('password')
base_url = f'https://{server}:{port}/mgmt/tm/'
handle_proxy()
token = login(server, port, username, password, verify_certificate)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
token=token,
partition=partition,
use_ssl=verify_certificate,
use_proxy=proxy
)
if demisto.command() == 'test-module':
result = test_module(client)
return_results(result)
elif demisto.command() == 'f5-ltm-get-pools':
return_results(ltm_get_pools_command(client, args))
elif demisto.command() == 'f5-ltm-get-pool':
return_results(ltm_get_pool_command(client, args))
elif demisto.command() == 'f5-ltm-get-pool-members':
return_results(ltm_get_pool_members_command(client, args))
elif demisto.command() == 'f5-ltm-get-nodes':
return_results(ltm_get_nodes_command(client))
elif demisto.command() == 'f5-ltm-get-node':
return_results(ltm_get_node_command(client, args))
elif demisto.command() == 'f5-ltm-disable-node':
return_results(ltm_disable_node_command(client, args))
elif demisto.command() == 'f5-ltm-enable-node':
return_results(ltm_enable_node_command(client, args))
elif demisto.command() == 'f5-ltm-get-pool-member-stats':
return_results(ltm_get_pool_member_stats_command(client, args))
elif demisto.command() == 'f5-ltm-get-node-stats':
return_results(ltm_get_node_stats_command(client, args))
elif demisto.command() == 'f5-ltm-get-node-by-address':
return_results(ltm_get_node_by_address_command(client, args))
elif demisto.command() == 'f5-ltm-get-pool-by-node':
return_results(ltm_get_pools_by_node_command(client, args))
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
28,577 |
def plot_forest(
data,
kind="forestplot",
model_names=None,
var_names=None,
filter_vars=None,
transform=None,
coords=None,
combined=False,
hdi_prob=None,
rope=None,
quartiles=True,
ess=False,
r_hat=False,
colors="cycle",
textsize=None,
linewidth=None,
markersize=None,
legend=True,
labeller=None,
ridgeplot_alpha=None,
ridgeplot_overlap=2,
ridgeplot_kind="auto",
ridgeplot_truncate=True,
ridgeplot_quantiles=None,
figsize=None,
ax=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""Forest plot to compare HDI intervals from a number of distributions.
Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.
Parameters
----------
data: obj or list[obj]
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
kind: str
Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
model_names: list[str], optional
List with names for the models in the list of data. Useful when plotting more that one
dataset.
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all variables plotted)
Prefix the variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like", interpret
var_names as substrings of the real variables names. If "regex", interpret var_names as
regular expressions on the real variables names. A la ``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e.the identity function)
coords: dict, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`.
combined: bool
Flag for combining multiple chains into a single chain. If ``False``(default), chains will
be plotted separately.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density.
Defaults to `0.94`.
rope: tuple or dictionary of tuples
Lower and upper values of the Region Of Practical Equivalence. If a list with one interval
only is provided, the **rope** will be displayed across the y-axis. If more than one
interval is provided the length of the list should match the number of variables.
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the hdi_prob intervals.
Defaults to ``True``.
r_hat: bool, optional
Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
ess: bool, optional
Flag for plotting the effective sample size. Defaults to ``False``.
colors: list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the matplotlibs
cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all
models. Defaults to 'cycle'.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
linewidth: int
Line width throughout. If None it will be autoscaled based on ``figsize``.
markersize: int
Markersize throughout. If None it will be autoscaled based on ``figsize``.
legend : bool, optional
Show a legend with the color encoded model information.
Defaults to ``True`` if there are multiple models
labeller : labeller instance, optional
Class providing the method ``make_model_label`` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ridgeplot_alpha: float
Transparency for ridgeplot fill. If **0**, border is colored by model, otherwise
a `black` outline is used.
ridgeplot_overlap: float
Overlap height for ridgeplots.
ridgeplot_kind: string
By default ("auto") continuous variables are plotted using KDEs and discrete ones using
histograms. To override this use "hist" to plot histograms and "density" for KDEs
ridgeplot_truncate: bool
Whether to truncate densities according to the value of hdi_prop. Defaults to ``True``.
ridgeplot_quantiles: list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to ``None``.
figsize: tuple
Figure size. If ``None``, it will be defined automatically.
ax: axes, optional
:class:`matplotlib.axes` or :class:`bokeh.plotting.figure`.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``.
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
gridspec: matplotlib GridSpec or bokeh figures
See Also
--------
plot_posterior: Plot Posterior densities in the style of John K. Kruschke’s book.
plot_density: Generate KDE plots for continuous variables and histograms for discrete ones.
Examples
--------
Forestpĺot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered_data = az.load_arviz_data('non_centered_eight')
>>> axes = az.plot_forest(non_centered_data,
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Forestplot with multiple datasets
.. plot::
:context: close-figs
>>> centered_data = az.load_arviz_data('centered_eight')
>>> axes = az.plot_forest([non_centered_data, centered_data],
>>> model_names = ["non centered eight", "centered eight"],
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools models')
Forestpĺot with ropes
.. plot::
:context: close-figs
>>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}
>>> axes = az.plot_forest(non_centered_data,
>>> rope=rope,
>>> var_names='~tau',
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_overlap=3,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot non-truncated and with quantiles
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_truncate=False,
>>> ridgeplot_quantiles=[.25, .5, .75],
>>> ridgeplot_overlap=0.7,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
"""
if not isinstance(data, (list, tuple)):
data = [data]
if len(data) == 1:
legend = False
if coords is None:
coords = {}
if labeller is None:
labeller = NoModelLabeller() if legend else BaseLabeller()
datasets = [convert_to_dataset(datum) for datum in reversed(data)]
if transform is not None:
datasets = [transform(dataset) for dataset in datasets]
datasets = get_coords(
datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords
)
var_names = _var_names(var_names, datasets, filter_vars)
ncols, width_ratios = 1, [3]
if ess:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
plot_forest_kwargs = dict(
ax=ax,
datasets=datasets,
var_names=var_names,
model_names=model_names,
combined=combined,
colors=colors,
figsize=figsize,
width_ratios=width_ratios,
linewidth=linewidth,
markersize=markersize,
kind=kind,
ncols=ncols,
hdi_prob=hdi_prob,
quartiles=quartiles,
rope=rope,
ridgeplot_overlap=ridgeplot_overlap,
ridgeplot_alpha=ridgeplot_alpha,
ridgeplot_kind=ridgeplot_kind,
ridgeplot_truncate=ridgeplot_truncate,
ridgeplot_quantiles=ridgeplot_quantiles,
textsize=textsize,
legend=legend,
labeller=labeller,
ess=ess,
r_hat=r_hat,
backend_kwargs=backend_kwargs,
backend_config=backend_config,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_forest", "forestplot", backend)
axes = plot(**plot_forest_kwargs)
return axes
|
def plot_forest(
data,
kind="forestplot",
model_names=None,
var_names=None,
filter_vars=None,
transform=None,
coords=None,
combined=False,
hdi_prob=None,
rope=None,
quartiles=True,
ess=False,
r_hat=False,
colors="cycle",
textsize=None,
linewidth=None,
markersize=None,
legend=True,
labeller=None,
ridgeplot_alpha=None,
ridgeplot_overlap=2,
ridgeplot_kind="auto",
ridgeplot_truncate=True,
ridgeplot_quantiles=None,
figsize=None,
ax=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""Forest plot to compare HDI intervals from a number of distributions.
Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.
Parameters
----------
data: obj or list[obj]
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
kind: str
Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
model_names: list[str], optional
List with names for the models in the list of data. Useful when plotting more that one
dataset.
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all variables plotted)
Prefix the variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like", interpret
var_names as substrings of the real variables names. If "regex", interpret var_names as
regular expressions on the real variables names. A la ``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e.the identity function)
coords: dict, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`.
combined: bool
Flag for combining multiple chains into a single chain. If ``False``(default), chains will
be plotted separately.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density.
Defaults to `0.94`.
rope: tuple or dictionary of tuples
Lower and upper values of the Region Of Practical Equivalence. If a list with one interval
only is provided, the **rope** will be displayed across the y-axis. If more than one
interval is provided the length of the list should match the number of variables.
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the hdi_prob intervals.
Defaults to ``True``.
r_hat: bool, optional
Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
ess: bool, optional
Flag for plotting the effective sample size. Defaults to ``False``.
colors: list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the matplotlibs
cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all
models. Defaults to 'cycle'.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
linewidth: int
Line width throughout. If None it will be autoscaled based on ``figsize``.
markersize: int
Markersize throughout. If None it will be autoscaled based on ``figsize``.
legend : bool, optional
Show a legend with the color encoded model information.
Defaults to ``True`` if there are multiple models
labeller : labeller instance, optional
Class providing the method ``make_model_label`` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ridgeplot_alpha: float
Transparency for ridgeplot fill. If **0**, border is colored by model, otherwise
a `black` outline is used.
ridgeplot_overlap: float
Overlap height for ridgeplots.
ridgeplot_kind: string
By default ("auto") continuous variables are plotted using KDEs and discrete ones using
histograms. To override this use "hist" to plot histograms and "density" for KDEs
ridgeplot_truncate: bool
Whether to truncate densities according to the value of hdi_prop. Defaults to ``True``.
ridgeplot_quantiles: list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to ``None``.
figsize: tuple
Figure size. If ``None``, it will be defined automatically.
ax: axes, optional
:class:`matplotlib.axes` or :class:`bokeh.plotting.figure`.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Defaults to "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``.
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
gridspec: matplotlib GridSpec or bokeh figures
See Also
--------
plot_posterior: Plot Posterior densities in the style of John K. Kruschke’s book.
plot_density: Generate KDE plots for continuous variables and histograms for discrete ones.
Examples
--------
Forestpĺot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered_data = az.load_arviz_data('non_centered_eight')
>>> axes = az.plot_forest(non_centered_data,
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Forestplot with multiple datasets
.. plot::
:context: close-figs
>>> centered_data = az.load_arviz_data('centered_eight')
>>> axes = az.plot_forest([non_centered_data, centered_data],
>>> model_names = ["non centered eight", "centered eight"],
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools models')
Forestpĺot with ropes
.. plot::
:context: close-figs
>>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}
>>> axes = az.plot_forest(non_centered_data,
>>> rope=rope,
>>> var_names='~tau',
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_overlap=3,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot non-truncated and with quantiles
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_truncate=False,
>>> ridgeplot_quantiles=[.25, .5, .75],
>>> ridgeplot_overlap=0.7,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
"""
if not isinstance(data, (list, tuple)):
data = [data]
if len(data) == 1:
legend = False
if coords is None:
coords = {}
if labeller is None:
labeller = NoModelLabeller() if legend else BaseLabeller()
datasets = [convert_to_dataset(datum) for datum in reversed(data)]
if transform is not None:
datasets = [transform(dataset) for dataset in datasets]
datasets = get_coords(
datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords
)
var_names = _var_names(var_names, datasets, filter_vars)
ncols, width_ratios = 1, [3]
if ess:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
plot_forest_kwargs = dict(
ax=ax,
datasets=datasets,
var_names=var_names,
model_names=model_names,
combined=combined,
colors=colors,
figsize=figsize,
width_ratios=width_ratios,
linewidth=linewidth,
markersize=markersize,
kind=kind,
ncols=ncols,
hdi_prob=hdi_prob,
quartiles=quartiles,
rope=rope,
ridgeplot_overlap=ridgeplot_overlap,
ridgeplot_alpha=ridgeplot_alpha,
ridgeplot_kind=ridgeplot_kind,
ridgeplot_truncate=ridgeplot_truncate,
ridgeplot_quantiles=ridgeplot_quantiles,
textsize=textsize,
legend=legend,
labeller=labeller,
ess=ess,
r_hat=r_hat,
backend_kwargs=backend_kwargs,
backend_config=backend_config,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_forest", "forestplot", backend)
axes = plot(**plot_forest_kwargs)
return axes
|
59,722 |
def _blockm(block_method, table, outfile, x, y, z, **kwargs):
r"""
Block average (x,y,z) data tables by mean, median, or mode estimation.
Reads arbitrarily located (x,y,z) triples [or optionally weighted
quadruples (x,y,z,w)] from a table and writes to the output a mean,
median, or mode (depending on ``block_method``) position and value for every
non-empty block in a grid region defined by the ``region`` and ``spacing``
parameters.
Parameters
----------
block_method : str
Name of the GMT module to call. Must be "blockmean" "blockmedian" or "blockmode".
Returns
-------
output : pandas.DataFrame or None
Return type depends on whether the ``outfile`` parameter is set:
- :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile``
is not set
- None if ``outfile`` is set (filtered output will be stored in file
set by ``outfile``)
"""
with GMTTempFile(suffix=".csv") as tmpfile:
with Session() as lib:
# Choose how data will be passed into the module
table_context = lib.virtualfile_from_data(
check_kind="vector", data=table, x=x, y=y, z=z
)
# Run blockm* on data table
with table_context as infile:
if outfile is None:
outfile = tmpfile.name
arg_str = " ".join([infile, build_arg_string(kwargs), "->" + outfile])
lib.call_module(module=block_method, args=arg_str)
# Read temporary csv output to a pandas table
if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame
try:
column_names = table.columns.to_list()
result = pd.read_csv(tmpfile.name, sep="\t", names=column_names)
except AttributeError: # 'str' object has no attribute 'columns'
result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">")
elif outfile != tmpfile.name: # return None if outfile set, output in outfile
result = None
return result
|
def _blockm(block_method, table, outfile, x, y, z, **kwargs):
r"""
Block average (x,y,z) data tables by mean, median, or mode estimation.
Reads arbitrarily located (x,y,z) triples [or optionally weighted
quadruples (x,y,z,w)] from a table and writes to the output a mean,
median, or mode (depending on ``block_method``) position and value for
every non-empty block in a grid region defined by the ``region`` and
``spacing`` parameters.
Parameters
----------
block_method : str
Name of the GMT module to call. Must be "blockmean" "blockmedian" or "blockmode".
Returns
-------
output : pandas.DataFrame or None
Return type depends on whether the ``outfile`` parameter is set:
- :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile``
is not set
- None if ``outfile`` is set (filtered output will be stored in file
set by ``outfile``)
"""
with GMTTempFile(suffix=".csv") as tmpfile:
with Session() as lib:
# Choose how data will be passed into the module
table_context = lib.virtualfile_from_data(
check_kind="vector", data=table, x=x, y=y, z=z
)
# Run blockm* on data table
with table_context as infile:
if outfile is None:
outfile = tmpfile.name
arg_str = " ".join([infile, build_arg_string(kwargs), "->" + outfile])
lib.call_module(module=block_method, args=arg_str)
# Read temporary csv output to a pandas table
if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame
try:
column_names = table.columns.to_list()
result = pd.read_csv(tmpfile.name, sep="\t", names=column_names)
except AttributeError: # 'str' object has no attribute 'columns'
result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">")
elif outfile != tmpfile.name: # return None if outfile set, output in outfile
result = None
return result
|
32,428 |
def air_acquire_command(client: Client, args: Dict[str, Any]) -> CommandResults:
'''Command handler for acquire command'''
endpoint = args.get('endpoint', None)
profile = args.get('profile', None)
caseid = args.get('caseid', None)
organization_id = args.get('organization_id', None)
result: Dict[str, Any] = client.air_acquire(endpoint, profile, caseid, organization_id)
return CommandResults(
outputs_prefix='Binalyze.Air.Acquisition',
outputs_key_field='endpoint',
outputs=result,
)
|
def air_acquire_command(client: Client, args: Dict[str, Any]) -> CommandResults:
'''Command handler for acquire command'''
endpoint = args.get('endpoint', None)
profile = args.get('profile', None)
caseid = args.get('caseid', None)
organization_id = args.get('organization_id', None)
result: Dict[str, Any] = client.air_acquire(endpoint, profile, caseid, organization_id)
return CommandResults(
outputs_prefix='BinalyzeAIR.Acquisition',
outputs_key_field='endpoint',
outputs=result,
)
|
32 |
def summary():
from internetarchive import search_items
params = {'page': 1, 'rows': 500}
fields = ['identifier','est_book_price','est_scan_price', 'scan_price',
'book_price', 'repub_state', 'imagecount', 'title',
'openlibrary_edition']
q = 'collection:openlibraryscanningteam'
config = dict(general=dict(secure=False))
s = search_items(q, fields=fields, params=params, config=config)
items = list(s)
statuses = {}
for i, book in enumerate(items):
if not book.get('book_price'):
items[i]['status'] = 0
statuses[0] = statuses.get(0, 0) + 1
elif int(book.get('repub_state', -1)) == -1:
items[i]['status'] = 1
statuses[1] = statuses.get(1, 0) + 1
elif int(book.get('repub_state', 0)) < 14:
items[i]['status'] = 2
statuses[2] = statuses.get(2, 0) + 1
else:
items[i]['status'] = 3
statuses[3] = statuses.get(3, 0) + 1
total_pages_scanned = sum(int(i.get('imagecount', 0)) for i in items)
total_unscanned_books = len([i for i in items if not i.get('imagecount', 0)])
total_cost_cents = sum(int(i.get('est_book_price', 0)) + int(i.get('est_scan_price', 0))
for i in items)
book_cost_cents = sum(int(i.get('book_price', 0)) for i in items)
est_book_cost_cents = sum(int(i.get('est_book_price', 0)) for i in items)
scan_cost_cents = (PAGE_COST_CENTS * total_pages_scanned) + (SETUP_COST_CENTS * len(items))
est_scan_cost_cents = sum(int(i.get('est_scan_price', 0)) for i in items)
return {
'books': items,
'statuses': statuses,
'total_pages_scanned': total_pages_scanned,
'total_unscanned_books': total_unscanned_books,
'total_cost_cents': total_cost_cents,
'book_cost_cents': book_cost_cents,
'est_book_cost_cents': est_book_cost_cents,
'delta_book_cost_cents': est_book_cost_cents - book_cost_cents,
'scan_cost_cents': scan_cost_cents,
'est_scan_cost_cents': est_scan_cost_cents,
'delta_scan_cost_cents': est_scan_cost_cents - scan_cost_cents,
}
|
def summary():
from internetarchive import search_items
params = {'page': 1, 'rows': 500}
fields = ['identifier','est_book_price','est_scan_price', 'scan_price',
'book_price', 'repub_state', 'imagecount', 'title',
'openlibrary_edition']
q = 'collection:openlibraryscanningteam'
config = dict(general=dict(secure=False))
s = search_items(q, fields=fields, params=params, config=config)
items = list(s)
statuses = {}
for book in items:
if not book.get('book_price'):
items[i]['status'] = 0
statuses[0] = statuses.get(0, 0) + 1
elif int(book.get('repub_state', -1)) == -1:
items[i]['status'] = 1
statuses[1] = statuses.get(1, 0) + 1
elif int(book.get('repub_state', 0)) < 14:
items[i]['status'] = 2
statuses[2] = statuses.get(2, 0) + 1
else:
items[i]['status'] = 3
statuses[3] = statuses.get(3, 0) + 1
total_pages_scanned = sum(int(i.get('imagecount', 0)) for i in items)
total_unscanned_books = len([i for i in items if not i.get('imagecount', 0)])
total_cost_cents = sum(int(i.get('est_book_price', 0)) + int(i.get('est_scan_price', 0))
for i in items)
book_cost_cents = sum(int(i.get('book_price', 0)) for i in items)
est_book_cost_cents = sum(int(i.get('est_book_price', 0)) for i in items)
scan_cost_cents = (PAGE_COST_CENTS * total_pages_scanned) + (SETUP_COST_CENTS * len(items))
est_scan_cost_cents = sum(int(i.get('est_scan_price', 0)) for i in items)
return {
'books': items,
'statuses': statuses,
'total_pages_scanned': total_pages_scanned,
'total_unscanned_books': total_unscanned_books,
'total_cost_cents': total_cost_cents,
'book_cost_cents': book_cost_cents,
'est_book_cost_cents': est_book_cost_cents,
'delta_book_cost_cents': est_book_cost_cents - book_cost_cents,
'scan_cost_cents': scan_cost_cents,
'est_scan_cost_cents': est_scan_cost_cents,
'delta_scan_cost_cents': est_scan_cost_cents - scan_cost_cents,
}
|
44,843 |
def _should_use_mlflowdbfs(root_uri):
# The `mlflowdbfs` scheme does not appear in the available schemes returned from
# the Hadoop FileSystem API until a read call has been issued.
from mlflow.utils._spark_utils import _get_active_spark_session
if (
not is_valid_dbfs_uri(root_uri)
or not is_databricks_acled_artifacts_uri(root_uri)
or not databricks_utils.is_in_databricks_runtime()
or environment_variables._DISABLE_MLFLOWDBFS.get() not in ["", "False", "false"]
):
return False
mlflowdbfs_read_exception_str = None
try:
_get_active_spark_session().read.load("mlflowdbfs:///artifact?run_id=foo&path=/bar")
except Exception as e:
# The load invocation is expected to throw an exception.
mlflowdbfs_read_exception_str = str(e)
try:
return _HadoopFileSystem.is_filesystem_available(_MLFLOWDBFS_SCHEME)
except Exception:
if "MlflowdbfsClient" in (mlflowdbfs_read_exception_str or ""):
# The HDFS filesystem logic used to determine mlflowdbfs availability on Databricks
# clusters may not work on certain Databricks cluster types due to unavailability of
# the _HadoopFileSystem.is_filesystem_available() API. As a temporary workaround,
# we check the contents of the expected exception raised by a dummy mlflowdbfs
# read for evidence that mlflowdbfs is available. If "MlflowdbfsClient" is present
# in the exception contents, we can safely assume that mlflowdbfs is available because
# `MlflowdbfsClient` is exclusively used by mlflowdbfs for performing MLflow
# file storage operations
#
# TODO: Remove this logic once the _HadoopFileSystem.is_filesystem_available() check
# below is determined to work on all Databricks cluster types
return True
else:
return False
|
def _should_use_mlflowdbfs(root_uri):
# The `mlflowdbfs` scheme does not appear in the available schemes returned from
# the Hadoop FileSystem API until a read call has been issued.
from mlflow.utils._spark_utils import _get_active_spark_session
if (
not is_valid_dbfs_uri(root_uri)
or not is_databricks_acled_artifacts_uri(root_uri)
or not databricks_utils.is_in_databricks_runtime()
or environment_variables._DISABLE_MLFLOWDBFS.get().lower() not in ["", "false"]
):
return False
mlflowdbfs_read_exception_str = None
try:
_get_active_spark_session().read.load("mlflowdbfs:///artifact?run_id=foo&path=/bar")
except Exception as e:
# The load invocation is expected to throw an exception.
mlflowdbfs_read_exception_str = str(e)
try:
return _HadoopFileSystem.is_filesystem_available(_MLFLOWDBFS_SCHEME)
except Exception:
if "MlflowdbfsClient" in (mlflowdbfs_read_exception_str or ""):
# The HDFS filesystem logic used to determine mlflowdbfs availability on Databricks
# clusters may not work on certain Databricks cluster types due to unavailability of
# the _HadoopFileSystem.is_filesystem_available() API. As a temporary workaround,
# we check the contents of the expected exception raised by a dummy mlflowdbfs
# read for evidence that mlflowdbfs is available. If "MlflowdbfsClient" is present
# in the exception contents, we can safely assume that mlflowdbfs is available because
# `MlflowdbfsClient` is exclusively used by mlflowdbfs for performing MLflow
# file storage operations
#
# TODO: Remove this logic once the _HadoopFileSystem.is_filesystem_available() check
# below is determined to work on all Databricks cluster types
return True
else:
return False
|
57,789 |
def main():
user_profile = None
params = demisto.params()
command = demisto.command()
args = demisto.args()
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
base_url = params.get('url')
if base_url[-1] != '/':
base_url += '/'
access_token = params.get('access_token')
directory_id = params.get('directory_id')
mapper_in = params.get('mapper_in')
mapper_out = params.get('mapper_out')
is_create_enabled = params.get("create_user_enabled")
is_disable_enabled = params.get("disable_user_enabled")
is_update_enabled = demisto.params().get("update_user_enabled")
create_if_not_exists = demisto.params().get("create_if_not_exists")
iam_command = IAMCommand(is_create_enabled, is_disable_enabled, is_update_enabled,
create_if_not_exists, mapper_in, mapper_out)
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'Bearer {access_token}'
}
client = Client(
base_url=base_url,
directory_id=directory_id,
verify=verify_certificate,
proxy=proxy,
headers=headers,
ok_codes=(200, 201)
)
demisto.debug(f'Command being called is {command}')
if command == 'iam-get-user':
user_profile = iam_command.get_user(client, args, 'username')
elif command == 'iam-create-user':
user_profile = iam_command.create_user(client, args)
elif command == 'iam-update-user':
user_profile = iam_command.update_user(client, args)
elif command == 'iam-disable-user':
user_profile = iam_command.disable_user(client, args)
if user_profile:
# user_profile.return_outputs()
return_results(user_profile)
try:
if command == 'test-module':
test_module(client)
elif command == 'get-mapping-fields':
return_results(get_mapping_fields(client))
except Exception:
# For any other integration command exception, return an error
return_error(f'Failed to execute {command} command. Traceback: {traceback.format_exc()}')
|
def main():
user_profile = None
params = demisto.params()
command = demisto.command()
args = demisto.args()
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
base_url = params.get('url')
if base_url[-1] != '/':
base_url += '/'
access_token = params.get('access_token')
directory_id = params.get('directory_id')
mapper_in = params.get('mapper_in')
mapper_out = params.get('mapper_out')
is_create_enabled = params.get("create_user_enabled")
is_disable_enabled = params.get("disable_user_enabled")
is_update_enabled = demisto.params().get("update_user_enabled")
create_if_not_exists = demisto.params().get("create_if_not_exists")
iam_command = IAMCommand(is_create_enabled, is_disable_enabled, is_update_enabled,
create_if_not_exists, mapper_in, mapper_out, attr='username')
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'Bearer {access_token}'
}
client = Client(
base_url=base_url,
directory_id=directory_id,
verify=verify_certificate,
proxy=proxy,
headers=headers,
ok_codes=(200, 201)
)
demisto.debug(f'Command being called is {command}')
if command == 'iam-get-user':
user_profile = iam_command.get_user(client, args, 'username')
elif command == 'iam-create-user':
user_profile = iam_command.create_user(client, args)
elif command == 'iam-update-user':
user_profile = iam_command.update_user(client, args)
elif command == 'iam-disable-user':
user_profile = iam_command.disable_user(client, args)
if user_profile:
# user_profile.return_outputs()
return_results(user_profile)
try:
if command == 'test-module':
test_module(client)
elif command == 'get-mapping-fields':
return_results(get_mapping_fields(client))
except Exception:
# For any other integration command exception, return an error
return_error(f'Failed to execute {command} command. Traceback: {traceback.format_exc()}')
|
8,128 |
def test_norm_clip():
# Tests that the default normalizer has clipping disabled
assert cor.plot_settings['norm'].clip is False
|
def test_norm_clip():
# Tests that the default normalizer has clipping disabled
assert not cor.plot_settings['norm'].clip
|
32,429 |
def update_alert_analyst_verdict(client: Client, args: dict) -> CommandResults:
"""
Apply a update analyst verdict action to a group of aleerts. Relevant for API version 2.1
"""
contents = []
context_entries = []
# Get arguments
alert_ids = argToList(args.get('alert_ids'))
action = args.get('action')
# Make request and get raw response
updated_alerts = client.update_alert_analyst_verdict_request(alert_ids, action)
# Parse response into context & content entries
if updated_alerts.get('affected') and int(updated_alerts.get('affected')) > 0:
updated = True
meta = f'Total of {updated_alerts.get("affected")} provided alerts analyst verdict were updated successfully'
else:
updated = False
meta = 'No alerts were updated'
for alert_id in alert_ids:
contents.append({
'Updated': updated,
'ID': alert_id,
'Analyst Verdict Action': action,
})
context_entries.append({
'Updated': updated,
'ID': alert_id,
'Updation': {
'Action': action
},
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Update alerts analyst verdict', contents, metadata=meta, removeNull=True),
outputs_prefix='SentinelOne.Alert',
outputs_key_field='ID',
outputs=context_entries,
raw_response=updated_alerts)
|
def update_alert_analyst_verdict(client: Client, args: dict) -> CommandResults:
"""
Apply a update analyst verdict action to a group of alerts. Relevant for API version 2.1
"""
contents = []
context_entries = []
# Get arguments
alert_ids = argToList(args.get('alert_ids'))
action = args.get('action')
# Make request and get raw response
updated_alerts = client.update_alert_analyst_verdict_request(alert_ids, action)
# Parse response into context & content entries
if updated_alerts.get('affected') and int(updated_alerts.get('affected')) > 0:
updated = True
meta = f'Total of {updated_alerts.get("affected")} provided alerts analyst verdict were updated successfully'
else:
updated = False
meta = 'No alerts were updated'
for alert_id in alert_ids:
contents.append({
'Updated': updated,
'ID': alert_id,
'Analyst Verdict Action': action,
})
context_entries.append({
'Updated': updated,
'ID': alert_id,
'Updation': {
'Action': action
},
})
return CommandResults(
readable_output=tableToMarkdown('Sentinel One - Update alerts analyst verdict', contents, metadata=meta, removeNull=True),
outputs_prefix='SentinelOne.Alert',
outputs_key_field='ID',
outputs=context_entries,
raw_response=updated_alerts)
|
53,606 |
def looks_like_typing_newtype(node):
func = node.func
if isinstance(func, Attribute):
return func.attrname == "NewType"
if isinstance(func, Name):
return func.name == "NewType"
return False
|
def looks_like_typing_newtype(node: nodes.Call) -> bool:
"""Checks whether a Call node is a NewType call"""
func = node.func
if isinstance(func, Attribute):
return func.attrname == "NewType"
if isinstance(func, Name):
return func.name == "NewType"
return False
|
18,202 |
def _commands(parser, args):
"""This is the 'regular' command, which can be multiple times.
See commands() below for --update-completion handling.
"""
formatter = formatters[args.format]
# check header first so we don't open out files unnecessarily
if args.header and not os.path.exists(args.header):
tty.die("No such file: '%s'" % args.header)
# if we're updating an existing file, only write output if a command
# or the header is newer than the file.
if args.update:
if os.path.exists(args.update):
files = [
spack.cmd.get_module(command).__file__.rstrip('c') # pyc -> py
for command in spack.cmd.all_commands()]
if args.header:
files.append(args.header)
last_update = os.path.getmtime(args.update)
if not any(os.path.getmtime(f) > last_update for f in files):
tty.msg('File is up to date: %s' % args.update)
return
tty.msg('Updating file: %s' % args.update)
with open(args.update, 'w') as f:
prepend_header(args, f)
formatter(args, f)
if args.update_completion:
fs.set_executable(args.update)
else:
prepend_header(args, sys.stdout)
formatter(args, sys.stdout)
|
def _commands(parser, args):
"""This is the 'regular' command, which can be called multiple times.
See commands() below for --update-completion handling.
"""
formatter = formatters[args.format]
# check header first so we don't open out files unnecessarily
if args.header and not os.path.exists(args.header):
tty.die("No such file: '%s'" % args.header)
# if we're updating an existing file, only write output if a command
# or the header is newer than the file.
if args.update:
if os.path.exists(args.update):
files = [
spack.cmd.get_module(command).__file__.rstrip('c') # pyc -> py
for command in spack.cmd.all_commands()]
if args.header:
files.append(args.header)
last_update = os.path.getmtime(args.update)
if not any(os.path.getmtime(f) > last_update for f in files):
tty.msg('File is up to date: %s' % args.update)
return
tty.msg('Updating file: %s' % args.update)
with open(args.update, 'w') as f:
prepend_header(args, f)
formatter(args, f)
if args.update_completion:
fs.set_executable(args.update)
else:
prepend_header(args, sys.stdout)
formatter(args, sys.stdout)
|
25,197 |
def _has_copy_method(node):
return any(
isinstance(node, klass)
for klass in [nodes.Dict, nodes.List, nodes.Set, objects.FrozenSet]
)
|
def _has_copy_method(node):
return isinstance(node, (nodes.Dict, nodes.List, nodes.Set, objects.FrozenSet))
|
29,809 |
def get_next_x_cron_runs(num_runs, schedule, start_datetime):
iter = croniter(schedule, start_datetime)
next_runs = []
for _ in range(num_runs):
next_runs.append(iter.get_next(datetime))
return next_runs
|
def list_upcoming_runs(cron_schedule: str, starting_from: datetime.datetime, num_runs: int = 5):
iter = croniter(schedule, start_datetime)
next_runs = []
for _ in range(num_runs):
next_runs.append(iter.get_next(datetime))
return next_runs
|
31,981 |
def convert_timeframe_string_to_json(time_to_convert: str) -> Dict[str, Any]:
"""Convert a timeframe string to a json requred for XQL queries.
Args:
time_to_convert (str): The time frame string to convert (supports seconds, minutes, hours, days, months, years, between).
Returns:
dict: The timeframe parameters in JSON.
"""
try:
time_to_convert_lower = time_to_convert.strip().lower()
if time_to_convert_lower.startswith('between '):
tokens = time_to_convert_lower[len('between '):].split(' and ')
if len(tokens) == 2:
time_from = dateparser.parse(tokens[0], settings={'TIMEZONE': 'UTC'})
time_to = dateparser.parse(tokens[1], settings={'TIMEZONE': 'UTC'})
return {'from': int(time_from.timestamp()) * 1000, 'to': int(time_to.timestamp()) * 1000}
else:
relative = dateparser.parse(time_to_convert, settings={'TIMEZONE': 'UTC'})
return {'relativeTime': int((datetime.utcnow() - relative).total_seconds()) * 1000}
raise ValueError(f'Invalid timeframe: {time_to_convert}')
except Exception as exc:
raise DemistoException(f'Please enter a valid time frame (seconds, minutes, hours, days, weeks, months, '
f'years, between).\n{str(exc)}')
|
def convert_timeframe_string_to_json(time_to_convert: str) -> Dict[str, int]:
"""Convert a timeframe string to a json requred for XQL queries.
Args:
time_to_convert (str): The time frame string to convert (supports seconds, minutes, hours, days, months, years, between).
Returns:
dict: The timeframe parameters in JSON.
"""
try:
time_to_convert_lower = time_to_convert.strip().lower()
if time_to_convert_lower.startswith('between '):
tokens = time_to_convert_lower[len('between '):].split(' and ')
if len(tokens) == 2:
time_from = dateparser.parse(tokens[0], settings={'TIMEZONE': 'UTC'})
time_to = dateparser.parse(tokens[1], settings={'TIMEZONE': 'UTC'})
return {'from': int(time_from.timestamp()) * 1000, 'to': int(time_to.timestamp()) * 1000}
else:
relative = dateparser.parse(time_to_convert, settings={'TIMEZONE': 'UTC'})
return {'relativeTime': int((datetime.utcnow() - relative).total_seconds()) * 1000}
raise ValueError(f'Invalid timeframe: {time_to_convert}')
except Exception as exc:
raise DemistoException(f'Please enter a valid time frame (seconds, minutes, hours, days, weeks, months, '
f'years, between).\n{str(exc)}')
|
41,572 |
def create_json_sidecar(output_data, sub_id):
# Path for each subject destination bids folder
path_folder_sub_id_bids = os.path.join(output_data, sub_id, 'microscopy')
# Create filename for json sidecar
item_out = sub_id + "_TEM.json"
# Define json sidecar content
data_json = {"PixelSize": [0.00236, 0.00236],
"FieldOfView": [8.88, 5.39],
"BodyPart": "BRAIN",
"BodyPartDetails": "splenium",
"SampleFixation": "2% paraformaldehyde, 2.5% glutaraldehyde",
"Environment": "exvivo"
}
# Write content to file
with open(os.path.join(path_folder_sub_id_bids, item_out), 'w') as json_file:
json.dump(data_json, json_file, indent=4)
|
def create_json_sidecar(output_data, sub_id):
# Path for each subject destination bids folder
path_folder_sub_id_bids = os.path.join(output_data, sub_id, 'micr')
# Create filename for json sidecar
item_out = sub_id + "_TEM.json"
# Define json sidecar content
data_json = {"PixelSize": [0.00236, 0.00236],
"FieldOfView": [8.88, 5.39],
"BodyPart": "BRAIN",
"BodyPartDetails": "splenium",
"SampleFixation": "2% paraformaldehyde, 2.5% glutaraldehyde",
"Environment": "exvivo"
}
# Write content to file
with open(os.path.join(path_folder_sub_id_bids, item_out), 'w') as json_file:
json.dump(data_json, json_file, indent=4)
|
57,928 |
def get_limit(args):
limit = arg_to_number(str(args.get("limit"))) if "limit" in args else None
page = arg_to_number(str(args.get("page"))) if "page" in args else None
page_size = arg_to_number(str(args.get("page_size"))) if "page_size" in args else None
if limit is None:
if page is not None and page_size is not None:
if page <= 0:
raise Exception('Chosen page number must be greater than 0')
limit = page_size * page
return limit, True, page_size
else:
limit = 50
return limit, False, page_size
|
def get_limit(args):
limit = arg_to_number(args.get('limit'))
page = arg_to_number(args.get('page'))
page_size = arg_to_number(args.get('page_size'))
if limit is None:
if page is not None and page_size is not None:
if page <= 0:
raise Exception('Chosen page number must be greater than 0')
limit = page_size * page
return limit, True, page_size
else:
limit = 50
return limit, False, page_size
|
12,356 |
def rate_per_ip(group, request):
ip = get_ip_or_random_ip(request)
if ip in settings.BLOCKED_IPS:
return '0/s'
return settings.RATELIMITS[group]
|
def rate_per_ip(group, request):
ip = get_ip_or_random_ip(request)
if ip in settings.BLOCKED_IPS:
return '0/s'
return settings.RATELIMITS.get(group, settings.DEFAULT_RATELIMIT)
|
58,001 |
def query_command(client: Client, args: Dict, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""
Executes the db2 query with the connection that was configured in the Client
Args:
client(Client): the client object with db connection
args(demisto.args): arguments for the query-command
"""
sql_query = str(args.get("query"))
limit = int(args.get("limit", 50))
skip = int(args.get("skip", 0))
bind_variable_name = args.get("bind_variables_name", "")
bind_variable_values = args.get("bind_variables_values", "")
try:
variables = bind_variables(bind_variable_name, bind_variable_values)
result, headers = client.execute_query(sql_query, variables)
converted_table = [dict(row) for row in result]
table = [{str(key): str(value) for key, value in dictionary.items()} for dictionary in converted_table]
table = table[skip: skip + limit]
human_readable = tableToMarkdown(name="Query result:", t=table, headers=headers, removeNull=True)
context = {"Result": table, "Query": sql_query, "DbName": f"{client.dbname}"}
entry_context: Dict = {"DB2(val.Query && val.Query === obj.Query)": {"DB2": context}}
client.close()
return human_readable, entry_context, table
except Exception as err:
client.close()
# In case there is no query executed and only an action e.g - insert, delete, update
# the result will raise an exception when we try to read the data from it
if str(err) == "Column information cannot be retrieved: ":
human_readable = "Command executed"
return human_readable, {}, []
raise DemistoException(err)
|
def query_command(client: Client, args: Dict, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""
Executes the db2 query with the connection that was configured in the Client
Args:
client(Client): the client object with db connection
args(demisto.args): arguments for the query-command
"""
sql_query = str(args.get("query"))
limit = int(args.get("limit", 50))
skip = int(args.get("skip", 0))
bind_variable_name = args.get("bind_variables_name", "")
bind_variable_values = args.get("bind_variables_values", "")
try:
variables = bind_variables(bind_variable_name, bind_variable_values)
result, headers = client.execute_query(sql_query, variables)
converted_table = [dict(row) for row in result]
table = [{str(key): str(value) for key, value in dictionary.items()} for dictionary in converted_table]
table = table[skip: skip + limit]
human_readable = tableToMarkdown(name="Query result:", t=table, headers=headers, removeNull=True)
context = {"Result": table, "Query": sql_query, "DbName": f"{client.dbname}"}
entry_context: Dict = {"DB2(val.Query && val.Query === obj.Query)": {"DB2": context}}
client.close()
return human_readable, entry_context, table
except Exception as err:
client.close()
demisto.error(f'error:\n {err}')
return CommandResults(readable_output='No results found')
|
36,128 |
def validate_positive_number_with_echo(ctx, param, value): # pylint: disable=unused-argument
"""Validate that the number passed to this parameter is a positive number.
Also echos a message to the terminal
:param ctx: the `click.Context`
:param param: the parameter
:param value: the value passed for the parameter
:raises `click.BadParameter`: if the value is not a positive number
"""
click.echo('Validating Number')
if not isinstance(value, (int, float)) or value < 0:
from click import BadParameter
raise BadParameter(f'{value} is not a valid positive number')
|
def validate_positive_number_with_echo(ctx, param, value): # pylint: disable=unused-argument
"""Validate that the number passed to this parameter is a positive number.
Also echos a message to the terminal
:param ctx: the `click.Context`
:param param: the parameter
:param value: the value passed for the parameter
:raises `click.BadParameter`: if the value is not a positive number
"""
click.echo(f'Validating {value}')
if not isinstance(value, (int, float)) or value < 0:
from click import BadParameter
raise BadParameter(f'{value} is not a valid positive number')
|
32,020 |
def calculate_pages(max_results: int) -> int:
max_pages = max_results // MAX_RESULTS_PER_PAGE
if max_results % MAX_RESULTS_PER_PAGE:
max_pages += 1
return max_pages
|
def calculate_pages(max_results: int) -> int:
return(math.ceil(max_results / MAX_RESULTS_PER_PAGE))
|
14,425 |
def audio_loop(clip, nloops=None, duration=None):
""" Loops over an audio clip.
Returns an audio clip that plays the given clip either
`nloops` times, or during `duration` seconds.
Examples
========
>>> from moviepy.editor import *
>>> videoclip = VideoFileClip('myvideo.mp4')
>>> music = AudioFileClip('music.ogg')
>>> audio = afx.audio_loop( music, duration=videoclip.duration)
>>> videoclip.set_audio(audio)
"""
try:
clip = clip.audio
except AttributeError:
# assume it's a an audioclip
pass
if duration is not None:
nloops = int( duration/ clip.duration)+1
return concatenate_audioclips(nloops*[clip]).set_duration(duration)
else:
return concatenate_audioclips(nloops*[clip])
|
def audio_loop(clip, nloops=None, duration=None):
""" Loops over an audio clip.
Returns an audio clip that plays the given clip either
`nloops` times, or during `duration` seconds.
Examples
========
>>> from moviepy.editor import *
>>> videoclip = VideoFileClip('myvideo.mp4')
>>> music = AudioFileClip('music.ogg')
>>> audio = afx.audio_loop( music, duration=videoclip.duration)
>>> videoclip.set_audio(audio)
"""
try:
clip = clip.audio
except AttributeError:
# assume it's already an audioclip
pass
if duration is not None:
nloops = int( duration/ clip.duration)+1
return concatenate_audioclips(nloops*[clip]).set_duration(duration)
else:
return concatenate_audioclips(nloops*[clip])
|
7,891 |
def test_activation(run_in_tmpdir, model):
# Determine (n.gamma) reaction rate using initial run
sp = model.run()
with openmc.StatePoint(sp) as sp:
tally = sp.tallies[1]
capture_rate = tally.get_values().ravel()[0]
# Create one-nuclide depletion chain
chain = openmc.deplete.Chain()
w186 = openmc.deplete.Nuclide('W186')
w186.add_reaction('(n,gamma)', None, 0.0, 1.0)
chain.add_nuclide(w186)
chain.export_to_xml('test_chain.xml')
# Create transport operator
op = openmc.deplete.Operator(
model.geometry, model.settings, 'test_chain.xml',
normalization_mode="source-rate"
)
# To determine the source rate necessary to reduce W186 density in half, we
# start with the single-nuclide transmutation equation:
#
# dn/dt = -f * sigma * phi * n
# n(t) = n0 * exp(-f * sigma * phi * t)
#
# where f is the source rate. The capture rate, r, is sigma * phi * n0,
# meaning that:
#
# n(t) = n0 * exp(-f * r * t / n0)
#
# To reduce the density by half, we would need:
#
# n(t)/n0 = exp(-f * r * t / n0) = 1/2
# f = n0 / (r * t) ln(2)
#
# So we need to know the initial number of atoms (n0), the capture rate (r),
# and choose an irradiation time (t)
w = model.geometry.get_materials_by_name('tungsten')[0]
atom_densities = w.get_nuclide_atom_densities()
atom_per_cc = 1e24 * atom_densities['W186'][1] # Density in atom/cm^3
n0 = atom_per_cc * w.volume # Absolute number of atoms
# Pick a random irradiation time and then determine necessary source rate to
# reduce material by half
t = uniform(1.0, 5.0) * 86400
source_rates = [n0/(capture_rate*t) * log(2.0)]
# Now activate the material
integrator = openmc.deplete.PredictorIntegrator(
op, [t], source_rates=source_rates
)
integrator.integrate()
# Get resulting number of atoms
results = openmc.deplete.ResultsList.from_hdf5('depletion_results.h5')
_, atoms = results.get_atoms(str(w.id), "W186")
assert atoms[0] == pytest.approx(n0)
assert atoms[1] / atoms[0] == pytest.approx(0.5, rel=1e-3)
|
def test_activation(run_in_tmpdir, model):
# Determine (n.gamma) reaction rate using initial run
sp = model.run()
with openmc.StatePoint(sp) as sp:
tally = sp.tallies[1]
capture_rate = tally.get_values().flat[0]
# Create one-nuclide depletion chain
chain = openmc.deplete.Chain()
w186 = openmc.deplete.Nuclide('W186')
w186.add_reaction('(n,gamma)', None, 0.0, 1.0)
chain.add_nuclide(w186)
chain.export_to_xml('test_chain.xml')
# Create transport operator
op = openmc.deplete.Operator(
model.geometry, model.settings, 'test_chain.xml',
normalization_mode="source-rate"
)
# To determine the source rate necessary to reduce W186 density in half, we
# start with the single-nuclide transmutation equation:
#
# dn/dt = -f * sigma * phi * n
# n(t) = n0 * exp(-f * sigma * phi * t)
#
# where f is the source rate. The capture rate, r, is sigma * phi * n0,
# meaning that:
#
# n(t) = n0 * exp(-f * r * t / n0)
#
# To reduce the density by half, we would need:
#
# n(t)/n0 = exp(-f * r * t / n0) = 1/2
# f = n0 / (r * t) ln(2)
#
# So we need to know the initial number of atoms (n0), the capture rate (r),
# and choose an irradiation time (t)
w = model.geometry.get_materials_by_name('tungsten')[0]
atom_densities = w.get_nuclide_atom_densities()
atom_per_cc = 1e24 * atom_densities['W186'][1] # Density in atom/cm^3
n0 = atom_per_cc * w.volume # Absolute number of atoms
# Pick a random irradiation time and then determine necessary source rate to
# reduce material by half
t = uniform(1.0, 5.0) * 86400
source_rates = [n0/(capture_rate*t) * log(2.0)]
# Now activate the material
integrator = openmc.deplete.PredictorIntegrator(
op, [t], source_rates=source_rates
)
integrator.integrate()
# Get resulting number of atoms
results = openmc.deplete.ResultsList.from_hdf5('depletion_results.h5')
_, atoms = results.get_atoms(str(w.id), "W186")
assert atoms[0] == pytest.approx(n0)
assert atoms[1] / atoms[0] == pytest.approx(0.5, rel=1e-3)
|
53,841 |
def managed_instance_update(
cmd,
instance,
administrator_login_password=None,
license_type=None,
vcores=None,
storage_size_in_gb=None,
assign_identity=False,
proxy_override=None,
public_data_endpoint_enabled=None,
tier=None,
family=None,
minimal_tls_version=None,
tags=None,
maintenance_configuration_id=None):
'''
Updates a managed instance. Custom update function to apply parameters to instance.
'''
# Once assigned, the identity cannot be removed
if instance.identity is None and assign_identity:
instance.identity = ResourceIdentity(type=IdentityType.system_assigned.value)
# Apply params to instance
instance.administrator_login_password = (
administrator_login_password or instance.administrator_login_password)
instance.license_type = (
license_type or instance.license_type)
instance.v_cores = (
vcores or instance.v_cores)
instance.storage_size_in_gb = (
storage_size_in_gb or instance.storage_size_in_gb)
instance.proxy_override = (
proxy_override or instance.proxy_override)
instance.minimal_tls_version = (
minimal_tls_version or instance.minimal_tls_version)
instance.sku.name = None
instance.sku.tier = (
tier or instance.sku.tier)
instance.sku.family = (
family or instance.sku.family)
instance.sku = _find_managed_instance_sku_from_capabilities(
cmd.cli_ctx,
instance.location,
instance.sku)
if public_data_endpoint_enabled is not None:
instance.public_data_endpoint_enabled = public_data_endpoint_enabled
if tags is not None:
instance.tags = tags
instance.maintenance_configuration_id = maintenance_configuration_id
return instance
|
def managed_instance_update(
cmd,
instance,
administrator_login_password=None,
license_type=None,
vcores=None,
storage_size_in_gb=None,
assign_identity=False,
proxy_override=None,
public_data_endpoint_enabled=None,
tier=None,
family=None,
minimal_tls_version=None,
tags=None,
maintenance_configuration_id=None):
'''
Updates a managed instance. Custom update function to apply parameters to instance.
'''
# Once assigned, the identity cannot be removed
if instance.identity is None and assign_identity:
instance.identity = ResourceIdentity(type=IdentityType.system_assigned.value)
# Apply params to instance
instance.administrator_login_password = (
administrator_login_password or instance.administrator_login_password)
instance.license_type = (
license_type or instance.license_type)
instance.v_cores = (
vcores or instance.v_cores)
instance.storage_size_in_gb = (
storage_size_in_gb or instance.storage_size_in_gb)
instance.proxy_override = (
proxy_override or instance.proxy_override)
instance.minimal_tls_version = (
minimal_tls_version or instance.minimal_tls_version)
instance.sku.name = None
instance.sku.tier = (
tier or instance.sku.tier)
instance.sku.family = (
family or instance.sku.family)
instance.sku = _find_managed_instance_sku_from_capabilities(
cmd.cli_ctx,
instance.location,
instance.sku)
if public_data_endpoint_enabled is not None:
instance.public_data_endpoint_enabled = public_data_endpoint_enabled
if tags is not None:
instance.tags = tags
if maintenance_configuration_id is not None:
instance.maintenance_configuration_id = maintenance_configuration_id
return instance
|
28,253 |
def _parser2(waveformlist: List[List]) -> Dict:
"""
Cast the waveformlist from _parser1 into a dict used by _parser3.
Args:
waveformlist: A list of lists of waveforms from
_parser1
Returns:
dict: A dictionary with keys waveform name and values for marker1,
marker2, and the waveform as np.arrays
"""
outdict = {}
for (fieldname, fieldvalue) in zip(waveformlist[0], waveformlist[1]):
if 'NAME' in fieldname:
name = fieldvalue
if 'DATA' in fieldname:
value = _unpacker(fieldvalue)
outdict.update({name: {'m1': value[1], 'm2': value[2],
'wfm': value[0]}})
return outdict
|
def _parser2(waveformlist: List[List]) -> Dict:
"""
Cast the waveformlist from _parser1 into a dict used by _parser3.
Args:
waveformlist: A list of lists of waveforms from ``_parser1``
Returns:
dict: A dictionary with keys waveform name and values for marker1,
marker2, and the waveform as np.arrays
"""
outdict = {}
for (fieldname, fieldvalue) in zip(waveformlist[0], waveformlist[1]):
if 'NAME' in fieldname:
name = fieldvalue
if 'DATA' in fieldname:
value = _unpacker(fieldvalue)
outdict.update({name: {'m1': value[1], 'm2': value[2],
'wfm': value[0]}})
return outdict
|
3,653 |
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
f = globals()['isintent_%s' % intent]
except KeyError:
c = False
else:
if f(var):
ret.append(intent)
return ret
|
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
f = globals()['isintent_%s' % intent]
except KeyError:
pass
else:
if f(var):
ret.append(intent)
return ret
|
58,581 |
def train_func():
p = TorchWorkerProfiler()
p.__enter__()
# Setup model.
model = torch.nn.Linear(1, 1)
model = train.torch.prepare_model(model)
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)
# Setup data.
input = torch.randn(1000, 1)
labels = input * 2
dataset = torch.utils.data.TensorDataset(input, labels)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32)
dataloader = train.torch.prepare_data_loader(dataloader)
# Train.
for epoch in range(5):
p.record_function_enter("train_epoch")
for X, y in dataloader:
pred = model(X)
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
p.record_function_exit("train_epoch")
p.record_function_enter("train_checkpoint")
state_dict = model.state_dict()
consume_prefix_in_state_dict_if_present(state_dict, "module.")
train.save_checkpoint(epoch=epoch, model_weights=state_dict)
p.record_function_exit("train_checkpoint")
p.step()
p.record_function_enter("train_report")
profile_results = p.get_and_clear_profile_traces()
train.report(epoch=epoch, **profile_results)
p.record_function_exit("train_report")
p.__exit__(None, None, None)
|
def train_func():
with TorchWorkerProfiler() as p:
# Setup model.
model = torch.nn.Linear(1, 1)
model = train.torch.prepare_model(model)
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)
# Setup data.
input = torch.randn(1000, 1)
labels = input * 2
dataset = torch.utils.data.TensorDataset(input, labels)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32)
dataloader = train.torch.prepare_data_loader(dataloader)
# Train.
for epoch in range(5):
p.record_function_enter("train_epoch")
for X, y in dataloader:
pred = model(X)
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
p.record_function_exit("train_epoch")
p.record_function_enter("train_checkpoint")
state_dict = model.state_dict()
consume_prefix_in_state_dict_if_present(state_dict, "module.")
train.save_checkpoint(epoch=epoch, model_weights=state_dict)
p.record_function_exit("train_checkpoint")
p.step()
p.record_function_enter("train_report")
profile_results = p.get_and_clear_profile_traces()
train.report(epoch=epoch, **profile_results)
p.record_function_exit("train_report")
|
31,487 |
def list_processes_command(credentials: Dict, sensor_id: int):
api = CBCloudAPI(**credentials)
session = api.select(endpoint_standard.Device, sensor_id).lr_session()
processes = session.list_processes()
if not processes:
return 'There is no active processes'
headers = ['path', 'pid', 'command_line', 'username']
processes_readable = [dict(
path=process['path'],
pid=process['pid'],
command_line=process['command_line'],
user_name=process['username']) for process in processes]
context_entry = dict(sensor_id=sensor_id, processes=processes)
readable_output = tableToMarkdown('Carbon Black Defense Live Response Processes',
headers=headers,
t=processes_readable,
headerTransform=string_to_table_header,
removeNull=True)
return CommandResults(
'CarbonBlackDefenseLR.Processes',
outputs_key_field='sensor_id',
outputs=context_entry,
readable_output=readable_output,
raw_response=processes
)
|
def list_processes_command(credentials: Dict, sensor_id: int):
api = CBCloudAPI(**credentials)
session = api.select(endpoint_standard.Device, sensor_id).lr_session()
processes = session.list_processes()
if not processes:
return 'There is no active processes'
headers = ['path', 'pid', 'command_line', 'username']
processes_readable = [dict(
path=process['path'],
pid=process['pid'],
command_line=process['command_line'],
user_name=process['username']) for process in processes]
context_entry = dict(sensor_id=sensor_id, processes=processes)
readable_output = tableToMarkdown('Carbon Black Defense Live Response Processes',
headers=headers,
t=processes_readable,
headerTransform=string_to_table_header,
removeNull=True)
return CommandResults(
'CarbonBlackDefenseLR.Processes',
outputs_key_field='sensor_id',
outputs=context_entry,
readable_output=readable_output,
raw_response=processes,
)
|
27,684 |
def importorskip(
modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
) -> Any:
"""Imports and returns the requested module ``modname``, or skip the current test
if the module cannot be imported.
:param str modname: the name of the module to import
:param str minversion: if given, the imported module ``__version__``
attribute must be at least this minimal version, otherwise the test is
still skipped.
:param str reason: if given, this reason is shown as the message when the
module cannot be imported.
:returns: The imported module. This should be assigned to its canonical
name.
Example::
docutils = pytest.importorskip("docutils")
"""
import warnings
__tracebackhide__ = True
compile(modname, "", "eval") # to catch syntaxerrors
with warnings.catch_warnings():
# make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
# import but without a __init__.py file
warnings.simplefilter("ignore")
try:
__import__(modname)
except ImportError as exc:
if reason is None:
reason = "could not import {!r}: {}".format(modname, exc)
raise Skipped(reason, allow_module_level=True) from None
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, "__version__", None)
if minversion is not None:
if verattr is None or Version(verattr) < Version(minversion):
raise Skipped(
"module %r has __version__ %r, required is: %r"
% (modname, verattr, minversion),
allow_module_level=True,
)
return mod
|
def importorskip(
modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
) -> Any:
"""Imports and returns the requested module ``modname``, or skip the current test
if the module cannot be imported.
:param str modname: the name of the module to import
:param str minversion: if given, the imported module's ``__version__``
attribute must be at least this minimal version, otherwise the test is
still skipped.
:param str reason: if given, this reason is shown as the message when the
module cannot be imported.
:returns: The imported module. This should be assigned to its canonical
name.
Example::
docutils = pytest.importorskip("docutils")
"""
import warnings
__tracebackhide__ = True
compile(modname, "", "eval") # to catch syntaxerrors
with warnings.catch_warnings():
# make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
# import but without a __init__.py file
warnings.simplefilter("ignore")
try:
__import__(modname)
except ImportError as exc:
if reason is None:
reason = "could not import {!r}: {}".format(modname, exc)
raise Skipped(reason, allow_module_level=True) from None
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, "__version__", None)
if minversion is not None:
if verattr is None or Version(verattr) < Version(minversion):
raise Skipped(
"module %r has __version__ %r, required is: %r"
% (modname, verattr, minversion),
allow_module_level=True,
)
return mod
|
31,705 |
def test_module():
params = demisto.params()
fetch_delta = params.get('first_fetch_delta', '10 minutes')
user_input_fetch_start_date, _ = parse_date_range(fetch_delta)
if datetime.now() - timedelta(days=7) - timedelta(minutes=5) >= user_input_fetch_start_date:
return 'Error: first fetch time delta should not be over one week.'
if params.get('self_deployed'):
if not params.get('auth_code') or not demisto.params().get('redirect_uri'):
return 'Error: in the self_deployed authentication flow the authentication code parameter and redirect uri cannot be empty.'
return 'The basic parameters are ok, authentication cannot be checked using the test module. ' \
'Please run ms-management-activity-list-subscriptions to test your credentials.'
|
def test_module():
params = demisto.params()
fetch_delta = params.get('first_fetch_delta', '10 minutes')
user_input_fetch_start_date, _ = parse_date_range(fetch_delta)
if datetime.now() - timedelta(days=7) - timedelta(minutes=5) >= user_input_fetch_start_date:
return 'Error: first fetch time delta should not be over one week.'
if params.get('self_deployed'):
if not params.get('auth_code') or not params.get('redirect_uri'):
return 'Error: in the self_deployed authentication flow the authentication code parameter and redirect uri cannot be empty.'
return 'The basic parameters are ok, authentication cannot be checked using the test module. ' \
'Please run ms-management-activity-list-subscriptions to test your credentials.'
|
48,989 |
def init_unix_connection_engine(db_config):
# [START cloud_sql_postgres_sqlalchemy_create_socket]
# Remember - storing secrets in plaintext is potentially unsafe. Consider using
# something like https://cloud.google.com/secret-manager/docs/overview to help keep
# secrets secret.
db_user = os.environ["DB_USER"]
db_pass = os.environ["DB_PASS"]
db_name = os.environ["DB_NAME"]
db_socket_dir = os.environ.get("DB_SOCKET_DIR", "/cloudsql")
instance_connection_name = os.environ["INSTANCE_CONNECTION_NAME"]
pool = sqlalchemy.create_engine(
# Equivalent URL:
# postgresql+pg8000://<db_user>:<db_pass>@/<db_name>
# ?unix_sock=<socket_path>/<cloud_sql_instance_name>/.s.PGSQL.5432
# Note: Some libraries (e.g. psycopg2) require the `unix_sock` key in the
# query parameter below to be changed to `host` in order to connect successfully.
sqlalchemy.engine.url.URL.create(
drivername="postgresql+pg8000",
username=db_user, # e.g. "my-database-user"
password=db_pass, # e.g. "my-database-password"
database=db_name, # e.g. "my-database-name"
query={
"unix_sock": "{}/{}/.s.PGSQL.5432".format(
db_socket_dir, # e.g. "/cloudsql"
instance_connection_name) # i.e "<PROJECT-NAME>:<INSTANCE-REGION>:<INSTANCE-NAME>"
}
),
**db_config
)
# [END cloud_sql_postgres_sqlalchemy_create_socket]
pool.dialect.description_encoding = None
return pool
|
def init_unix_connection_engine(db_config):
# [START cloud_sql_postgres_sqlalchemy_create_socket]
# Remember - storing secrets in plaintext is potentially unsafe. Consider using
# something like https://cloud.google.com/secret-manager/docs/overview to help keep
# secrets secret.
db_user = os.environ["DB_USER"]
db_pass = os.environ["DB_PASS"]
db_name = os.environ["DB_NAME"]
db_socket_dir = os.environ.get("DB_SOCKET_DIR", "/cloudsql")
instance_connection_name = os.environ["INSTANCE_CONNECTION_NAME"]
pool = sqlalchemy.create_engine(
# Equivalent URL:
# postgresql+pg8000://<db_user>:<db_pass>@/<db_name>
# ?unix_sock=<socket_path>/<cloud_sql_instance_name>/.s.PGSQL.5432
# Note: Some drivers require the `unix_sock` query parameter to use a different key.
# For example, 'psycopg2' uses the path set to `host` in order to connect successfully.
sqlalchemy.engine.url.URL.create(
drivername="postgresql+pg8000",
username=db_user, # e.g. "my-database-user"
password=db_pass, # e.g. "my-database-password"
database=db_name, # e.g. "my-database-name"
query={
"unix_sock": "{}/{}/.s.PGSQL.5432".format(
db_socket_dir, # e.g. "/cloudsql"
instance_connection_name) # i.e "<PROJECT-NAME>:<INSTANCE-REGION>:<INSTANCE-NAME>"
}
),
**db_config
)
# [END cloud_sql_postgres_sqlalchemy_create_socket]
pool.dialect.description_encoding = None
return pool
|
48,266 |
def request(url, user, passwd, timeout, data=None, method=None):
if data:
data = json.dumps(data)
# NOTE: fetch_url uses a password manager, which follows the
# standard request-then-challenge basic-auth semantics. However as
# JIRA allows some unauthorised operations it doesn't necessarily
# send the challenge, so the request occurs as the anonymous user,
# resulting in unexpected results. To work around this we manually
# inject the basic-auth header up-front to ensure that JIRA treats
# the requests as authorized for this user.
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict')))
response, info = fetch_url(module, url, data=data, method=method, timeout=timeout,
headers={'Content-Type': 'application/json',
'Authorization': "Basic %s" % auth})
if info['status'] not in (200, 201, 204):
module.fail_json(msg=info['msg'])
body = response.read()
if body:
return json.loads(body.decode('utf-8'))
else:
return {}
|
def request(url, user, passwd, timeout, data=None, method=None):
if data:
data = json.dumps(data)
# NOTE: fetch_url uses a password manager, which follows the
# standard request-then-challenge basic-auth semantics. However as
# JIRA allows some unauthorised operations it doesn't necessarily
# send the challenge, so the request occurs as the anonymous user,
# resulting in unexpected results. To work around this we manually
# inject the basic-auth header up-front to ensure that JIRA treats
# the requests as authorized for this user.
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict')))
response, info = fetch_url(module, url, data=data, method=method, timeout=timeout,
headers={'Content-Type': 'application/json',
'Authorization': "Basic %s" % auth})
if info['status'] not in (200, 201, 204):
module.fail_json(msg=info['msg'])
body = response.read()
if body:
return json.loads(to_text(body, errors='surrogate_or_strict'))
else:
return {}
|
36,390 |
def mock_open(mock=None, read_data='', data=()):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read`, `readline` and `readlines` of the
file handle to return. This is an empty string by default.
`data` is any value acceptable for `dict` constructor. The keys
represent file names. If a file name, found in `data`, is
mock-opened, and the value matching the file name in `data` will
be used instead of `read_data`. The default is an empty tuple.
"""
global file_spec
if file_spec is None:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(
set(dir(_io.BytesIO))))
if mock is None:
mock = MagicMock(name='open', spec=open)
handlers = {filename: _MockOpenHandleHandler(read_data)
for filename, read_data in dict(data).items()}
default_handler = _MockOpenHandleHandler(read_data)
def _side_effect(*args, **kwargs):
try:
filename = args[0]
handler = handlers[filename]
except (IndexError, KeyError):
default_handler.reset_data()
return DEFAULT
handler.reset_data()
return handler.handle
mock.side_effect = _side_effect
mock.return_value = default_handler.handle
return mock
|
def mock_open(mock=None, read_data='', data=()):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read`, `readline` and `readlines` of the
file handle to return. This is an empty string by default.
`data` is any value acceptable for the `dict` constructor. The keys
represent file names. If a file name, found in `data`, is
mock-opened, and the value matching the file name in `data` will
be used instead of `read_data`. The default is an empty tuple.
"""
global file_spec
if file_spec is None:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(
set(dir(_io.BytesIO))))
if mock is None:
mock = MagicMock(name='open', spec=open)
handlers = {filename: _MockOpenHandleHandler(read_data)
for filename, read_data in dict(data).items()}
default_handler = _MockOpenHandleHandler(read_data)
def _side_effect(*args, **kwargs):
try:
filename = args[0]
handler = handlers[filename]
except (IndexError, KeyError):
default_handler.reset_data()
return DEFAULT
handler.reset_data()
return handler.handle
mock.side_effect = _side_effect
mock.return_value = default_handler.handle
return mock
|
22,753 |
def probe_sni(name, host, port=443, timeout=300, # pylint: disable=too-many-arguments
method=_DEFAULT_SSL_METHOD, source_address=('', 0),
alpn_protocols=None):
"""Probe SNI server for SSL certificate.
:param bytes name: Byte string to send as the server name in the
client hello message.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:param int timeout: Timeout in seconds.
:param method: See `OpenSSL.SSL.Context` for allowed values.
:param tuple source_address: Enables multi-path probing (selection
of source interface). See `socket.creation_connection` for more
info. Available only in Python 2.7+.
:param alpn_protocols: Protocols to request using ALPN.
:type alpn_protocols: `list` of `bytes`
:raises acme.errors.Error: In case of any problems.
:returns: SSL certificate presented by the server.
:rtype: OpenSSL.crypto.X509
"""
context = SSL.Context(method)
context.set_timeout(timeout)
socket_kwargs = {'source_address': source_address}
try:
logger.debug(
"Attempting to connect to %s:%d%s.", host, port,
" from {0}:{1}".format(
source_address[0],
source_address[1]
) if source_address[0] else ""
)
socket_tuple = (host, port) # type: Tuple[str, int]
sock = socket.create_connection(socket_tuple, **socket_kwargs) # type: ignore
except socket.error as error:
raise errors.Error(error)
with contextlib.closing(sock) as client:
client_ssl = SSL.Connection(context, client)
client_ssl.set_connect_state()
client_ssl.set_tlsext_host_name(name) # pyOpenSSL>=0.13
if alpn_protocols is not None:
client_ssl.set_alpn_protos(alpn_protocols)
try:
client_ssl.do_handshake()
client_ssl.shutdown()
except SSL.Error as error:
raise errors.Error(error)
return client_ssl.get_peer_certificate()
|
def probe_sni(name, host, port=443, timeout=300, # pylint: disable=too-many-arguments
method=_DEFAULT_SSL_METHOD, source_address=('', 0),
alpn_protocols=None):
"""Probe SNI server for SSL certificate.
:param bytes name: Byte string to send as the server name in the
client hello message.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:param int timeout: Timeout in seconds.
:param method: See `OpenSSL.SSL.Context` for allowed values.
:param tuple source_address: Enables multi-path probing (selection
of source interface). See `socket.creation_connection` for more
info. Available only in Python 2.7+.
:param alpn_protocols: Protocols to request using ALPN.
:type alpn_protocols: `list` of `bytes`
:raises acme.errors.Error: In case of any problems.
:returns: SSL certificate presented by the server.
:rtype: OpenSSL.crypto.X509
"""
context = SSL.Context(method)
context.set_timeout(timeout)
socket_kwargs = {'source_address': source_address}
try:
logger.debug(
"Attempting to connect to %s:%d%s.", host, port,
" from {0}:{1}".format(
source_address[0],
source_address[1]
) if any(source_address) else ""
)
socket_tuple = (host, port) # type: Tuple[str, int]
sock = socket.create_connection(socket_tuple, **socket_kwargs) # type: ignore
except socket.error as error:
raise errors.Error(error)
with contextlib.closing(sock) as client:
client_ssl = SSL.Connection(context, client)
client_ssl.set_connect_state()
client_ssl.set_tlsext_host_name(name) # pyOpenSSL>=0.13
if alpn_protocols is not None:
client_ssl.set_alpn_protos(alpn_protocols)
try:
client_ssl.do_handshake()
client_ssl.shutdown()
except SSL.Error as error:
raise errors.Error(error)
return client_ssl.get_peer_certificate()
|
31,085 |
def feed_main(params, feed_name, prefix):
handle_proxy()
client = Client(**params)
indicator_type = params.get('indicator_type')
feedTags = argToList(params.get('feedTags'))
command = demisto.command()
if prefix and not prefix.endswith('-'):
prefix += '-'
if command != 'fetch-indicators':
demisto.info(f'Command being called is {demisto.command()}')
try:
if command == 'test-module':
return_outputs(test_module(client))
elif command == 'fetch-indicators':
indicators = fetch_indicators_command(client, indicator_type, feedTags,
params.get('auto_detect_type'))
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif command == f'{prefix}get-indicators':
# dummy command for testing
limit = int(demisto.args().get('limit', 10))
auto_detect = params.get('auto_detect_type')
indicators = fetch_indicators_command(client, indicator_type, feedTags, auto_detect)[:limit]
demisto.debug(f"tal indicators len in feed main {len(indicators)}")
hr = tableToMarkdown('Indicators', indicators, headers=['value', 'type', 'rawJSON'])
return_outputs(hr, {}, indicators)
except Exception as err:
err_msg = f'Error in {feed_name} integration [{err}]'
return_error(err_msg)
|
def feed_main(params, feed_name, prefix):
handle_proxy()
client = Client(**params)
indicator_type = params.get('indicator_type')
feedTags = argToList(params.get('feedTags'))
command = demisto.command()
if prefix and not prefix.endswith('-'):
prefix += '-'
if command != 'fetch-indicators':
demisto.info(f'Command being called is {demisto.command()}')
try:
if command == 'test-module':
return_results(test_module(client))
elif command == 'fetch-indicators':
indicators = fetch_indicators_command(client, indicator_type, feedTags,
params.get('auto_detect_type'))
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif command == f'{prefix}get-indicators':
# dummy command for testing
limit = int(demisto.args().get('limit', 10))
auto_detect = params.get('auto_detect_type')
indicators = fetch_indicators_command(client, indicator_type, feedTags, auto_detect)[:limit]
demisto.debug(f"tal indicators len in feed main {len(indicators)}")
hr = tableToMarkdown('Indicators', indicators, headers=['value', 'type', 'rawJSON'])
return_outputs(hr, {}, indicators)
except Exception as err:
err_msg = f'Error in {feed_name} integration [{err}]'
return_error(err_msg)
|
52,314 |
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
connectable = config.attributes.get('connection', None)
config_opts = dict(literal_binds=True)
config_opts.update(common_config_opts)
if connectable is None:
config_opts["url"] = config.get_main_option("sqlalchemy.url")
else:
config_opts["connection"] = connectable
context.configure(**config_opts)
with context.begin_transaction():
context.run_migrations()
|
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
connectable = config.attributes.get('connection', None)
config_opts = {}
config_opts.update(common_config_opts)
config_opts["literal_binds"] = True
if connectable is None:
config_opts["url"] = config.get_main_option("sqlalchemy.url")
else:
config_opts["connection"] = connectable
context.configure(**config_opts)
with context.begin_transaction():
context.run_migrations()
|
43,807 |
def _inner_net_flow_constraint_hamiltonian(
graph: nx.DiGraph, node: int
) -> Tuple[List[float], List[qml.operation.Observable]]:
r"""Calculates the squared inner portion of the Hamiltonian in :func:`net_flow_constraint`.
For a given :math:`i`, this function returns:
.. math::
\left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2}.
Args:
graph (nx.DiGraph): the graph specifying possible edges
node: a fixed node
Returns:
Tuple[List[float], List[qml.operation.Observable]]: The list of coefficients and list of
observables of the inner part of the net-flow constraint Hamiltonian.
"""
edges_to_qubits = edges_to_wires(graph)
coeffs = []
ops = []
out_edges = graph.out_edges(node)
in_edges = graph.in_edges(node)
coeffs.append(len(out_edges) - len(in_edges))
ops.append(qml.Identity(0))
for edge in out_edges:
wires = (edges_to_qubits[edge],)
coeffs.append(-1)
ops.append(qml.PauliZ(wires))
for edge in in_edges:
wires = (edges_to_qubits[edge],)
coeffs.append(1)
ops.append(qml.PauliZ(wires))
coeffs, ops = _square_hamiltonian_terms(coeffs, ops)
coeffs, ops = _collect_duplicates(coeffs, ops)
hamiltonian = qml.Hamiltonian(coeffs, ops)
return hamiltonian
|
def _inner_net_flow_constraint_hamiltonian(
graph: nx.DiGraph, node: int
) -> Tuple[List[float], List[qml.operation.Observable]]:
r"""Calculates the squared inner portion of the Hamiltonian in :func:`net_flow_constraint`.
For a given :math:`i`, this function returns:
.. math::
\left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2}.
Args:
graph (nx.DiGraph): the graph specifying possible edges
node (int): a fixed node
Returns:
Tuple[List[float], List[qml.operation.Observable]]: The list of coefficients and list of
observables of the inner part of the net-flow constraint Hamiltonian.
"""
edges_to_qubits = edges_to_wires(graph)
coeffs = []
ops = []
out_edges = graph.out_edges(node)
in_edges = graph.in_edges(node)
coeffs.append(len(out_edges) - len(in_edges))
ops.append(qml.Identity(0))
for edge in out_edges:
wires = (edges_to_qubits[edge],)
coeffs.append(-1)
ops.append(qml.PauliZ(wires))
for edge in in_edges:
wires = (edges_to_qubits[edge],)
coeffs.append(1)
ops.append(qml.PauliZ(wires))
coeffs, ops = _square_hamiltonian_terms(coeffs, ops)
coeffs, ops = _collect_duplicates(coeffs, ops)
hamiltonian = qml.Hamiltonian(coeffs, ops)
return hamiltonian
|
41,057 |
def _plot_and_save_attention(att_w, filename):
"""Plot and save attention."""
# dynamically import matplotlib due to not found error
from matplotlib.ticker import MaxNLocator
import os
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
w, h = plt.figaspect(1.0 / len(att_w))
fig = plt.Figure(figsize=(w * 2, h * 2))
axes = fig.subplots(1, len(att_w))
if len(att_w) == 1:
axes = [axes]
for ax, aw in zip(axes, att_w):
# plt.subplot(1, len(att_w), h)
ax.imshow(aw, aspect="auto")
ax.set_xlabel("Input")
ax.set_ylabel("Output")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
return fig
|
def _plot_and_save_attention(att_w, filename):
"""Plot and save an attention."""
# dynamically import matplotlib due to not found error
from matplotlib.ticker import MaxNLocator
import os
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
w, h = plt.figaspect(1.0 / len(att_w))
fig = plt.Figure(figsize=(w * 2, h * 2))
axes = fig.subplots(1, len(att_w))
if len(att_w) == 1:
axes = [axes]
for ax, aw in zip(axes, att_w):
# plt.subplot(1, len(att_w), h)
ax.imshow(aw, aspect="auto")
ax.set_xlabel("Input")
ax.set_ylabel("Output")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
return fig
|
30,154 |
def fetch_data(r):
resp = r.get(LIVE_PRODUCTION_API_URL)
data = resp.json()
timestamp = arrow.get(data["lastmodified"], "HH:mm:ss A DD-MM-YYYY", tzinfo=tz.gettz('Asia/Baghdad'))
return data["d"], timestamp
|
def fetch_data(r):
resp = r.get(LIVE_PRODUCTION_API_URL)
data = resp.json()
timestamp = arrow.get(data["lastmodified"], "HH:mm:ss A DD-MM-YYYY", tzinfo=tz.gettz('Asia/Baghdad'))
return data["d"], timestamp.datetime
|
51,442 |
def merge_indexes(
indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],
variables: Mapping[Hashable, Variable],
coord_names: List[Hashable],
append: bool = False,
) -> Tuple[Dict[Hashable, Variable], List[Hashable]]:
"""Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
"""
vars_to_replace: Dict[Hashable, Variable] = {}
vars_to_remove: List[Hashable] = []
dims_to_replace: Dict[Hashable, Hashable] = {}
error_msg = "{} is not the name of an existing variable."
for dim, var_names in indexes.items():
if isinstance(var_names, str) or not isinstance(var_names, Sequence):
var_names = [var_names]
names: List[Hashable] = []
codes: List[List[int]] = []
levels: List[List[int]] = []
current_index_variable = variables.get(dim)
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
if (
current_index_variable is not None
and var.dims != current_index_variable.dims
):
raise ValueError(
"dimension mismatch between %r %s and %r %s"
% (dim, current_index_variable.dims, n, var.dims)
)
if current_index_variable is not None and append:
current_index = current_index_variable.to_index()
if isinstance(current_index, pd.MultiIndex):
names.extend(current_index.names)
codes.extend(current_index.codes)
levels.extend(current_index.levels)
else:
names.append("%s_level_0" % dim)
cat = pd.Categorical(current_index.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
if not len(names) and len(var_names) == 1:
idx = pd.Index(variables[var_names[0]].values)
else: # MultiIndex
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
names.append(n)
cat = pd.Categorical(var.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
idx = pd.MultiIndex(levels, codes, names=names)
for n in names:
dims_to_replace[n] = dim
vars_to_replace[dim] = IndexVariable(dim, idx)
vars_to_remove.extend(var_names)
new_variables = {k: v for k, v in variables.items() if k not in vars_to_remove}
new_variables.update(vars_to_replace)
# update dimensions if necessary, GH: 3512
for k, v in new_variables.items():
if any(d in dims_to_replace for d in v.dims):
new_dims = [dims_to_replace.get(d, d) for d in v.dims]
new_variables[k] = v._replace(dims=new_dims)
new_coord_names = coord_names + [x for x in vars_to_replace if x not in coord_names]
new_coord_names = [x for x in new_coord_names if x not in vars_to_remove]
return new_variables, new_coord_names
|
def merge_indexes(
indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],
variables: Mapping[Hashable, Variable],
coord_names: List[Hashable],
append: bool = False,
) -> Tuple[Dict[Hashable, Variable], List[Hashable]]:
"""Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
"""
vars_to_replace: Dict[Hashable, Variable] = {}
vars_to_remove: List[Hashable] = []
dims_to_replace: Dict[Hashable, Hashable] = {}
error_msg = "{} is not the name of an existing variable."
for dim, var_names in indexes.items():
if isinstance(var_names, str) or not isinstance(var_names, Sequence):
var_names = [var_names]
names: List[Hashable] = []
codes: List[List[int]] = []
levels: List[List[int]] = []
current_index_variable = variables.get(dim)
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
if (
current_index_variable is not None
and var.dims != current_index_variable.dims
):
raise ValueError(
"dimension mismatch between %r %s and %r %s"
% (dim, current_index_variable.dims, n, var.dims)
)
if current_index_variable is not None and append:
current_index = current_index_variable.to_index()
if isinstance(current_index, pd.MultiIndex):
names.extend(current_index.names)
codes.extend(current_index.codes)
levels.extend(current_index.levels)
else:
names.append("%s_level_0" % dim)
cat = pd.Categorical(current_index.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
if not len(names) and len(var_names) == 1:
idx = pd.Index(variables[var_names[0]].values)
else: # MultiIndex
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
names.append(n)
cat = pd.Categorical(var.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
idx = pd.MultiIndex(levels, codes, names=names)
for n in names:
dims_to_replace[n] = dim
vars_to_replace[dim] = IndexVariable(dim, idx)
vars_to_remove.extend(var_names)
new_variables = {k: v for k, v in variables.items() if k not in vars_to_remove}
new_variables.update(vars_to_replace)
# update dimensions if necessary, GH: 3512
for k, v in new_variables.items():
if any(d in dims_to_replace for d in v.dims):
new_dims = [dims_to_replace.get(d, d) for d in v.dims]
new_variables[k] = v._replace(dims=new_dims)
new_coord_names = [x for x in coord_names if x not in vars_to_remove]
new_coord_names += [
x
for x in vars_to_replace
if x not in coord_names and x not in vars_to_remove
]
return new_variables, new_coord_names
|
10,491 |
def install_deb(m, debs, cache, force, no_remove, install_recommends, allow_unauthenticated, dpkg_options):
changed = False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file)
pkg_name = get_field_of_deb(m, deb_file, "Package")
pkg_version = get_field_of_deb(m, deb_file, "Version")
if len(apt_pkg.get_architectures()) > 1:
pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
pkg_key = "%s:%s" % (pkg_name, pkg_arch)
else:
pkg_key = pkg_name
try:
installed_pkg = apt.Cache()[pkg_key]
installed_version = installed_pkg.installed.version
if package_version_compare(pkg_version, installed_version) == 0:
# Does not need to down-/upgrade, move on to next package
continue
except Exception:
# Must not be installed, continue with installation
pass
# Check if package is installable
if not pkg.check() and not force:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception as e:
m.fail_json(msg="Unable to install package: %s" % to_native(e))
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if deps_to_install:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
no_remove=no_remove,
allow_unauthenticated=allow_unauthenticated,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if pkgs_to_install:
options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "diff" in retvals:
diff = retvals["diff"]
if 'prepared' in diff:
diff['prepared'] += '\n\n' + out
else:
diff = parse_diff(out)
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
|
def install_deb(m, debs, cache, force, no_remove, install_recommends, allow_unauthenticated, dpkg_options):
changed = False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file)
pkg_name = get_field_of_deb(m, deb_file, "Package")
pkg_version = get_field_of_deb(m, deb_file, "Version")
if len(apt_pkg.get_architectures()) > 1:
pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
pkg_key = "%s:%s" % (pkg_name, pkg_arch)
else:
pkg_key = pkg_name
try:
installed_pkg = apt.Cache()[pkg_key]
installed_version = installed_pkg.installed.version
if package_version_compare(pkg_version, installed_version) == 0:
# Does not need to down-/upgrade, move on to next package
continue
except Exception:
# Must not be installed, continue with installation
pass
# Check if package is installable
if not pkg.check() and not force:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception as e:
m.fail_json(msg="Unable to install package: %s" % to_native(e))
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if deps_to_install:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
fail_on_autoremove = fail_on_autoremove,
allow_unauthenticated=allow_unauthenticated,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if pkgs_to_install:
options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "diff" in retvals:
diff = retvals["diff"]
if 'prepared' in diff:
diff['prepared'] += '\n\n' + out
else:
diff = parse_diff(out)
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
|
41,768 |
def plot_parallel_coordinate(study: Study, params: Optional[List[str]] = None) -> go.Figure:
"""Plot the high-dimentional parameter relationships in a study.
Note that, If a parameter contains missing values, a trial with missing values is not plotted.
Example:
The following code snippet shows how to plot the high-dimentional parameter relationships.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_uniform('x', -100, 100)
y = trial.suggest_categorical('y', [-1, 0, 1])
return x ** 2 + y
study = optuna.create_study()
study.optimize(objective, n_trials=10)
optuna.visualization.plot_parallel_coordinate(study, params=['x', 'y'])
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values.
params:
Parameter list to visualize. The default is all parameters.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_check_plotly_availability()
return _get_parallel_coordinate_plot(study, params)
|
def plot_parallel_coordinate(study: Study, params: Optional[List[str]] = None) -> 'go.Figure':
"""Plot the high-dimentional parameter relationships in a study.
Note that, If a parameter contains missing values, a trial with missing values is not plotted.
Example:
The following code snippet shows how to plot the high-dimentional parameter relationships.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_uniform('x', -100, 100)
y = trial.suggest_categorical('y', [-1, 0, 1])
return x ** 2 + y
study = optuna.create_study()
study.optimize(objective, n_trials=10)
optuna.visualization.plot_parallel_coordinate(study, params=['x', 'y'])
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values.
params:
Parameter list to visualize. The default is all parameters.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_check_plotly_availability()
return _get_parallel_coordinate_plot(study, params)
|
28,578 |
def plot_forest(
data,
kind="forestplot",
model_names=None,
var_names=None,
filter_vars=None,
transform=None,
coords=None,
combined=False,
hdi_prob=None,
rope=None,
quartiles=True,
ess=False,
r_hat=False,
colors="cycle",
textsize=None,
linewidth=None,
markersize=None,
legend=True,
labeller=None,
ridgeplot_alpha=None,
ridgeplot_overlap=2,
ridgeplot_kind="auto",
ridgeplot_truncate=True,
ridgeplot_quantiles=None,
figsize=None,
ax=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""Forest plot to compare HDI intervals from a number of distributions.
Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.
Parameters
----------
data: obj or list[obj]
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
kind: str
Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
model_names: list[str], optional
List with names for the models in the list of data. Useful when plotting more that one
dataset.
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all variables plotted)
Prefix the variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like", interpret
var_names as substrings of the real variables names. If "regex", interpret var_names as
regular expressions on the real variables names. A la ``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e.the identity function)
coords: dict, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`.
combined: bool
Flag for combining multiple chains into a single chain. If ``False``(default), chains will
be plotted separately.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density.
Defaults to `0.94`.
rope: tuple or dictionary of tuples
Lower and upper values of the Region Of Practical Equivalence. If a list with one interval
only is provided, the **rope** will be displayed across the y-axis. If more than one
interval is provided the length of the list should match the number of variables.
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the hdi_prob intervals.
Defaults to ``True``.
r_hat: bool, optional
Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
ess: bool, optional
Flag for plotting the effective sample size. Defaults to ``False``.
colors: list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the matplotlibs
cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all
models. Defaults to 'cycle'.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
linewidth: int
Line width throughout. If None it will be autoscaled based on ``figsize``.
markersize: int
Markersize throughout. If None it will be autoscaled based on ``figsize``.
legend : bool, optional
Show a legend with the color encoded model information.
Defaults to ``True`` if there are multiple models
labeller : labeller instance, optional
Class providing the method ``make_model_label`` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ridgeplot_alpha: float
Transparency for ridgeplot fill. If **0**, border is colored by model, otherwise
a `black` outline is used.
ridgeplot_overlap: float
Overlap height for ridgeplots.
ridgeplot_kind: string
By default ("auto") continuous variables are plotted using KDEs and discrete ones using
histograms. To override this use "hist" to plot histograms and "density" for KDEs
ridgeplot_truncate: bool
Whether to truncate densities according to the value of hdi_prop. Defaults to ``True``.
ridgeplot_quantiles: list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to ``None``.
figsize: tuple
Figure size. If ``None``, it will be defined automatically.
ax: axes, optional
:class:`matplotlib.axes` or :class:`bokeh.plotting.figure`.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``.
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
gridspec: matplotlib GridSpec or bokeh figures
See Also
--------
plot_posterior: Plot Posterior densities in the style of John K. Kruschke’s book.
plot_density: Generate KDE plots for continuous variables and histograms for discrete ones.
Examples
--------
Forestpĺot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered_data = az.load_arviz_data('non_centered_eight')
>>> axes = az.plot_forest(non_centered_data,
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Forestplot with multiple datasets
.. plot::
:context: close-figs
>>> centered_data = az.load_arviz_data('centered_eight')
>>> axes = az.plot_forest([non_centered_data, centered_data],
>>> model_names = ["non centered eight", "centered eight"],
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools models')
Forestpĺot with ropes
.. plot::
:context: close-figs
>>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}
>>> axes = az.plot_forest(non_centered_data,
>>> rope=rope,
>>> var_names='~tau',
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_overlap=3,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot non-truncated and with quantiles
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_truncate=False,
>>> ridgeplot_quantiles=[.25, .5, .75],
>>> ridgeplot_overlap=0.7,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
"""
if not isinstance(data, (list, tuple)):
data = [data]
if len(data) == 1:
legend = False
if coords is None:
coords = {}
if labeller is None:
labeller = NoModelLabeller() if legend else BaseLabeller()
datasets = [convert_to_dataset(datum) for datum in reversed(data)]
if transform is not None:
datasets = [transform(dataset) for dataset in datasets]
datasets = get_coords(
datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords
)
var_names = _var_names(var_names, datasets, filter_vars)
ncols, width_ratios = 1, [3]
if ess:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
plot_forest_kwargs = dict(
ax=ax,
datasets=datasets,
var_names=var_names,
model_names=model_names,
combined=combined,
colors=colors,
figsize=figsize,
width_ratios=width_ratios,
linewidth=linewidth,
markersize=markersize,
kind=kind,
ncols=ncols,
hdi_prob=hdi_prob,
quartiles=quartiles,
rope=rope,
ridgeplot_overlap=ridgeplot_overlap,
ridgeplot_alpha=ridgeplot_alpha,
ridgeplot_kind=ridgeplot_kind,
ridgeplot_truncate=ridgeplot_truncate,
ridgeplot_quantiles=ridgeplot_quantiles,
textsize=textsize,
legend=legend,
labeller=labeller,
ess=ess,
r_hat=r_hat,
backend_kwargs=backend_kwargs,
backend_config=backend_config,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_forest", "forestplot", backend)
axes = plot(**plot_forest_kwargs)
return axes
|
def plot_forest(
data,
kind="forestplot",
model_names=None,
var_names=None,
filter_vars=None,
transform=None,
coords=None,
combined=False,
hdi_prob=None,
rope=None,
quartiles=True,
ess=False,
r_hat=False,
colors="cycle",
textsize=None,
linewidth=None,
markersize=None,
legend=True,
labeller=None,
ridgeplot_alpha=None,
ridgeplot_overlap=2,
ridgeplot_kind="auto",
ridgeplot_truncate=True,
ridgeplot_quantiles=None,
figsize=None,
ax=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""Forest plot to compare HDI intervals from a number of distributions.
Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.
Parameters
----------
data: obj or list[obj]
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
kind: str
Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
model_names: list[str], optional
List with names for the models in the list of data. Useful when plotting more that one
dataset.
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all variables plotted)
Prefix the variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like", interpret
var_names as substrings of the real variables names. If "regex", interpret var_names as
regular expressions on the real variables names. A la ``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e.the identity function)
coords: dict, optional
Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`.
combined: bool
Flag for combining multiple chains into a single chain. If ``False``(default), chains will
be plotted separately.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density.
Defaults to `0.94`.
rope: tuple or dictionary of tuples
Lower and upper values of the Region Of Practical Equivalence. If a list with one interval
only is provided, the **rope** will be displayed across the y-axis. If more than one
interval is provided the length of the list should match the number of variables.
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the hdi_prob intervals.
Defaults to ``True``.
r_hat: bool, optional
Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
ess: bool, optional
Flag for plotting the effective sample size. Defaults to ``False``.
colors: list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the matplotlibs
cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all
models. Defaults to 'cycle'.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
linewidth: int
Line width throughout. If None it will be autoscaled based on ``figsize``.
markersize: int
Markersize throughout. If None it will be autoscaled based on ``figsize``.
legend : bool, optional
Show a legend with the color encoded model information.
Defaults to ``True`` if there are multiple models
labeller : labeller instance, optional
Class providing the method ``make_model_label`` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ridgeplot_alpha: float
Transparency for ridgeplot fill. If **0**, border is colored by model, otherwise
a `black` outline is used.
ridgeplot_overlap: float
Overlap height for ridgeplots.
ridgeplot_kind: string
By default ("auto") continuous variables are plotted using KDEs and discrete ones using
histograms. To override this use "hist" to plot histograms and "density" for KDEs
ridgeplot_truncate: bool
Whether to truncate densities according to the value of hdi_prop. Defaults to ``True``.
ridgeplot_quantiles: list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to ``None``.
figsize: tuple
Figure size. If ``None``, it will be defined automatically.
ax: axes, optional
:class:`matplotlib.axes` or :class:`bokeh.plotting.figure`.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``.
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
gridspec: matplotlib GridSpec or bokeh figures
See Also
--------
plot_posterior: Plot Posterior densities in the style of John K. Kruschke’s book.
plot_density: Generate KDE plots for continuous variables and histograms for discrete ones.
Examples
--------
Forestpĺot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered_data = az.load_arviz_data('non_centered_eight')
>>> axes = az.plot_forest(non_centered_data,
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Forestplot with multiple datasets
.. plot::
:context: close-figs
>>> centered_data = az.load_arviz_data('centered_eight')
>>> axes = az.plot_forest([non_centered_data, centered_data],
>>> model_names = ["non centered eight", "centered eight"],
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools models')
Forestpĺot with ropes
.. plot::
:context: close-figs
>>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}
>>> axes = az.plot_forest(non_centered_data,
>>> rope=rope,
>>> var_names='~tau',
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_overlap=3,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot non-truncated and with quantiles
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_truncate=False,
>>> ridgeplot_quantiles=[.25, .5, .75],
>>> ridgeplot_overlap=0.7,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
"""
if not isinstance(data, (list, tuple)):
data = [data]
if len(data) == 1:
legend = False
if coords is None:
coords = {}
if labeller is None:
labeller = NoModelLabeller() if legend else BaseLabeller()
datasets = [convert_to_dataset(datum) for datum in reversed(data)]
if transform is not None:
datasets = [transform(dataset) for dataset in datasets]
datasets = get_coords(
datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords
)
var_names = _var_names(var_names, datasets, filter_vars)
ncols, width_ratios = 1, [3]
if ess:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
plot_forest_kwargs = dict(
ax=ax,
datasets=datasets,
var_names=var_names,
model_names=model_names,
combined=combined,
colors=colors,
figsize=figsize,
width_ratios=width_ratios,
linewidth=linewidth,
markersize=markersize,
kind=kind,
ncols=ncols,
hdi_prob=hdi_prob,
quartiles=quartiles,
rope=rope,
ridgeplot_overlap=ridgeplot_overlap,
ridgeplot_alpha=ridgeplot_alpha,
ridgeplot_kind=ridgeplot_kind,
ridgeplot_truncate=ridgeplot_truncate,
ridgeplot_quantiles=ridgeplot_quantiles,
textsize=textsize,
legend=legend,
labeller=labeller,
ess=ess,
r_hat=r_hat,
backend_kwargs=backend_kwargs,
backend_config=backend_config,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_forest", "forestplot", backend)
axes = plot(**plot_forest_kwargs)
return axes
|
2,721 |
def roc_auc_score(
y_true,
y_score,
*,
average="macro",
sample_weight=None,
max_fpr=None,
multi_class="raise",
labels=None,
):
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \
from prediction scores.
Note: this implementation can be used with binary, multiclass and
multilabel classification, but some restrictions apply (see Parameters).
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True labels or binary label indicators. The binary and multiclass cases
expect labels with shape (n_samples,) while the multilabel case expects
binary label indicators with shape (n_samples, n_classes).
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores.
* In the binary case, it corresponds to an array of shape
`(n_samples,)`. Both probability estimates and non-thresholded
decision values can be provided. The probability estimates correspond
to the **probability of the class with the greater label**,
i.e. `estimator.classes_[1]` and thus
`estimator.predict_proba(X, y)[:, 1]`. The decision values
corresponds to the output of `estimator.decision_function(X, y)`.
See more information in the :ref:`User guide <roc_auc_binary>`;
* In the multiclass case, it corresponds to an array of shape
`(n_samples, n_classes)` of probability estimates provided by the
`predict_proba` method. The probability estimates **must**
sum to 1 across the possible classes. In addition, the order of the
class scores must correspond to the order of ``labels``,
if provided, or else to the numerical or lexicographical order of
the labels in ``y_true``. See more information in the
:ref:`User guide <roc_auc_multiclass>`;
* In the multilabel case, it corresponds to an array of shape
`(n_samples, n_classes)`. Probability estimates are provided by the
`predict_proba` method and the non-thresholded decision values by
the `decision_function` method. The probability estimates correspond
to the **probability of the class with the greater label for each
output** of the classifier. See more information in the
:ref:`User guide <roc_auc_multilabel>`.
average : {'micro', 'macro', 'samples', 'weighted'} or None, \
default='macro'
If ``None``, the scores for each class are returned.
Otherwise, this determines the type of averaging performed on the data.
Note: multiclass ROC AUC currently only handles the 'macro' and
'weighted' averages. For multiclass targets, `average=None`
is only implemented for `multi_class='ovo'`.
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
max_fpr : float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC [2]_ over the range
[0, max_fpr] is returned. For the multiclass case, ``max_fpr``,
should be either equal to ``None`` or ``1.0`` as AUC ROC partial
computation currently is not supported for multiclass.
multi_class : {'raise', 'ovr', 'ovo'}, default='raise'
Only used for multiclass targets. Determines the type of configuration
to use. The default value raises an error, so either
``'ovr'`` or ``'ovo'`` must be passed explicitly.
``'ovr'``:
Stands for One-vs-rest. Computes the AUC of each class
against the rest [3]_ [4]_. This
treats the multiclass case in the same way as the multilabel case.
Sensitive to class imbalance even when ``average == 'macro'``,
because class imbalance affects the composition of each of the
'rest' groupings.
``'ovo'``:
Stands for One-vs-one. Computes the average AUC of all
possible pairwise combinations of classes [5]_.
Insensitive to class imbalance when
``average == 'macro'``.
labels : array-like of shape (n_classes,), default=None
Only used for multiclass targets. List of labels that index the
classes in ``y_score``. If ``None``, the numerical or lexicographical
order of the labels in ``y_true`` is used.
Returns
-------
auc : float
Area Under the Curve.
See Also
--------
average_precision_score : Area under the precision-recall curve.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] `Analyzing a portion of the ROC curve. McClish, 1989
<https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_
.. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving
probability estimation trees (Section 6.2), CeDER Working Paper
#IS-00-04, Stern School of Business, New York University.
.. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern
Recognition Letters, 27(8), 861-874.
<https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_
.. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area
Under the ROC Curve for Multiple Class Classification Problems.
Machine Learning, 45(2), 171-186.
<http://link.springer.com/article/10.1023/A:1010920819831>`_
Examples
--------
Binary case:
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.metrics import roc_auc_score
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X)[:, 1])
0.99...
>>> roc_auc_score(y, clf.decision_function(X))
0.99...
Multiclass case:
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear").fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr')
0.99...
Multilabel case:
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> X, y = make_multilabel_classification(random_state=0)
>>> clf = MultiOutputClassifier(clf).fit(X, y)
>>> # get a list of n_output containing probability arrays of shape
>>> # (n_samples, n_classes)
>>> y_pred = clf.predict_proba(X)
>>> # extract the positive columns for each output
>>> y_pred = np.transpose([pred[:, 1] for pred in y_pred])
>>> roc_auc_score(y, y_pred, average=None)
array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...])
>>> from sklearn.linear_model import RidgeClassifierCV
>>> clf = RidgeClassifierCV().fit(X, y)
>>> roc_auc_score(y, clf.decision_function(X), average=None)
array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...])
"""
y_type = type_of_target(y_true, input_name="y_true")
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type == "multiclass" or (
y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2
):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.0:
raise ValueError(
"Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr)
)
if multi_class == "raise":
raise ValueError("multi_class must be in ('ovo', 'ovr')")
return _multiclass_roc_auc_score(
y_true, y_score, labels, multi_class, average, sample_weight
)
elif y_type == "binary":
labels = np.unique(y_true)
y_true = label_binarize(y_true, classes=labels)[:, 0]
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
else: # multilabel-indicator
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
|
def roc_auc_score(
y_true,
y_score,
*,
average="macro",
sample_weight=None,
max_fpr=None,
multi_class="raise",
labels=None,
):
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \
from prediction scores.
Note: this implementation can be used with binary, multiclass and
multilabel classification, but some restrictions apply (see Parameters).
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True labels or binary label indicators. The binary and multiclass cases
expect labels with shape (n_samples,) while the multilabel case expects
binary label indicators with shape (n_samples, n_classes).
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores.
* In the binary case, it corresponds to an array of shape
`(n_samples,)`. Both probability estimates and non-thresholded
decision values can be provided. The probability estimates correspond
to the **probability of the class with the greater label**,
i.e. `estimator.classes_[1]` and thus
`estimator.predict_proba(X, y)[:, 1]`. The decision values
corresponds to the output of `estimator.decision_function(X, y)`.
See more information in the :ref:`User guide <roc_auc_binary>`;
* In the multiclass case, it corresponds to an array of shape
`(n_samples, n_classes)` of probability estimates provided by the
`predict_proba` method. The probability estimates **must**
sum to 1 across the possible classes. In addition, the order of the
class scores must correspond to the order of ``labels``,
if provided, or else to the numerical or lexicographical order of
the labels in ``y_true``. See more information in the
:ref:`User guide <roc_auc_multiclass>`;
* In the multilabel case, it corresponds to an array of shape
`(n_samples, n_classes)`. Probability estimates are provided by the
`predict_proba` method and the non-thresholded decision values by
the `decision_function` method. The probability estimates correspond
to the **probability of the class with the greater label for each
output** of the classifier. See more information in the
:ref:`User guide <roc_auc_multilabel>`.
average : {'micro', 'macro', 'samples', 'weighted'} or None, \
default='macro'
If ``None``, the scores for each class are returned.
Otherwise, this determines the type of averaging performed on the data.
Note: multiclass ROC AUC currently only handles the 'macro' and
'weighted' averages. For multiclass targets, `average=None`
is only implemented for `multi_class='ovo'`.
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
max_fpr : float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC [2]_ over the range
[0, max_fpr] is returned. For the multiclass case, ``max_fpr``,
should be either equal to ``None`` or ``1.0`` as AUC ROC partial
computation currently is not supported for multiclass.
multi_class : {'raise', 'ovr', 'ovo'}, default='raise'
Only used for multiclass targets. Determines the type of configuration
to use. The default value raises an error, so either
``'ovr'`` or ``'ovo'`` must be passed explicitly.
``'ovr'``:
Stands for One-vs-rest. Computes the AUC of each class
against the rest [3]_ [4]_. This
treats the multiclass case in the same way as the multilabel case.
Sensitive to class imbalance even when ``average == 'macro'``,
because class imbalance affects the composition of each of the
'rest' groupings.
``'ovo'``:
Stands for One-vs-one. Computes the average AUC of all
possible pairwise combinations of classes [5]_.
Insensitive to class imbalance when
``average == 'macro'``.
labels : array-like of shape (n_classes,), default=None
Only used for multiclass targets. List of labels that index the
classes in ``y_score``. If ``None``, the numerical or lexicographical
order of the labels in ``y_true`` is used.
Returns
-------
auc : float
Area Under the Curve score.
See Also
--------
average_precision_score : Area under the precision-recall curve.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] `Analyzing a portion of the ROC curve. McClish, 1989
<https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_
.. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving
probability estimation trees (Section 6.2), CeDER Working Paper
#IS-00-04, Stern School of Business, New York University.
.. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern
Recognition Letters, 27(8), 861-874.
<https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_
.. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area
Under the ROC Curve for Multiple Class Classification Problems.
Machine Learning, 45(2), 171-186.
<http://link.springer.com/article/10.1023/A:1010920819831>`_
Examples
--------
Binary case:
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.metrics import roc_auc_score
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X)[:, 1])
0.99...
>>> roc_auc_score(y, clf.decision_function(X))
0.99...
Multiclass case:
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear").fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr')
0.99...
Multilabel case:
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> X, y = make_multilabel_classification(random_state=0)
>>> clf = MultiOutputClassifier(clf).fit(X, y)
>>> # get a list of n_output containing probability arrays of shape
>>> # (n_samples, n_classes)
>>> y_pred = clf.predict_proba(X)
>>> # extract the positive columns for each output
>>> y_pred = np.transpose([pred[:, 1] for pred in y_pred])
>>> roc_auc_score(y, y_pred, average=None)
array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...])
>>> from sklearn.linear_model import RidgeClassifierCV
>>> clf = RidgeClassifierCV().fit(X, y)
>>> roc_auc_score(y, clf.decision_function(X), average=None)
array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...])
"""
y_type = type_of_target(y_true, input_name="y_true")
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type == "multiclass" or (
y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2
):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.0:
raise ValueError(
"Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr)
)
if multi_class == "raise":
raise ValueError("multi_class must be in ('ovo', 'ovr')")
return _multiclass_roc_auc_score(
y_true, y_score, labels, multi_class, average, sample_weight
)
elif y_type == "binary":
labels = np.unique(y_true)
y_true = label_binarize(y_true, classes=labels)[:, 0]
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
else: # multilabel-indicator
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
|
26,759 |
def build_finished(app, exception):
"""Sphpix "build_finished" event handler."""
if exception:
return
if not isinstance(app.builder, builders.StandaloneHTMLBuilder):
log.warning(
F"The plugin is support only 'html' builder, but you are using '{type(app.builder)}'. Skipping..."
)
return
script_url = app.config.redoc_script_url
output_filename = "script.js"
cache_filepath = fetch_and_cache(script_url, output_filename)
copy_file(cache_filepath, os.path.join(app.builder.outdir, '_static', "redoc.js"))
|
def build_finished(app, exception):
"""Sphinx "build_finished" event handler."""
if exception:
return
if not isinstance(app.builder, builders.StandaloneHTMLBuilder):
log.warning(
F"The plugin is support only 'html' builder, but you are using '{type(app.builder)}'. Skipping..."
)
return
script_url = app.config.redoc_script_url
output_filename = "script.js"
cache_filepath = fetch_and_cache(script_url, output_filename)
copy_file(cache_filepath, os.path.join(app.builder.outdir, '_static', "redoc.js"))
|
1,499 |
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
|
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) \
or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
|
8,451 |
def identify_jwst_miri_mrs(origin, *args, **kwargs):
"""
Check whether the given set of files is a JWST MIRI MRS spectral data product.
"""
input = args[2]
# if string, it can be either a directory or a glob pattern (this last
# one not implemented yet due to astropy choking when passed an invalid
# file path string).
if isinstance(input, str):
if os.path.isdir(input):
return True
if len(glob.glob(input)) > 0:
return True
# or it can be either a list of file names, or a list of file objects
elif len(input) > 1:
return True
return False
|
def identify_jwst_miri_mrs(origin, *args, **kwargs):
"""
Check whether the given set of files is a JWST MIRI MRS spectral data product.
"""
input = args[2]
# if string, it can be either a directory or a glob pattern (this last
# one not implemented yet due to astropy choking when passed an invalid
# file path string).
if isinstance(input, str):
if os.path.isdir(input):
return True
if len(glob.glob(input)) > 0:
return True
# or it can be either a list of file names, or a list of file objects
elif isinstance(input, (list, tuple)) and len(input) > 0:
return True
return False
|
58,482 |
def send_notification_for_subscriber(
notification: Dict,
bucket_name: str,
object_path: str,
version_id: str,
api_method: str,
action: str,
event_name: str,
headers,
):
bucket_name = normalize_bucket_name(bucket_name)
if not event_type_matches(notification["Event"], action, api_method) or not filter_rules_match(
notification.get("Filter"), object_path
):
return
key = unquote(object_path.replace("//", "/"))[1:]
s3_client = aws_stack.connect_to_service("s3")
object_data = {}
try:
object_data = s3_client.head_object(Bucket=bucket_name, Key=key)
except botocore.exceptions.ClientError:
pass
source_ip = headers.get("X-Forwarded-For", "127.0.0.1").split(",")[0]
# build event message
message = get_event_message(
event_name=event_name,
bucket_name=bucket_name,
file_name=key,
etag=object_data.get("ETag", ""),
file_size=object_data.get("ContentLength", 0),
version_id=version_id,
config_id=notification["Id"],
source_ip=source_ip,
)
message = json.dumps(message)
if notification.get("Queue"):
region = aws_stack.extract_region_from_arn(notification["Queue"])
sqs_client = aws_stack.connect_to_service("sqs", region_name=region)
try:
queue_url = aws_stack.sqs_queue_url_for_arn(notification["Queue"])
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=message,
MessageSystemAttributes=create_sqs_system_attributes(headers),
)
except Exception as e:
LOGGER.warning(
f"Unable to send notification for S3 bucket \"{bucket_name}\" to SQS queue \"{notification['Queue']}\": {e}",
)
if notification.get("Topic"):
region = aws_stack.extract_region_from_arn(notification["Topic"])
sns_client = aws_stack.connect_to_service("sns", region_name=region)
try:
sns_client.publish(
TopicArn=notification["Topic"],
Message=message,
Subject="Amazon S3 Notification",
)
except Exception as e:
LOGGER.warning(
f"Unable to send notification for S3 bucket \"{bucket_name}\" to SNS topic \"{notification['Topic']}\": {e}"
)
# CloudFunction and LambdaFunction are semantically identical
lambda_function_config = notification.get("CloudFunction") or notification.get("LambdaFunction")
if lambda_function_config:
# make sure we don't run into a socket timeout
region = aws_stack.extract_region_from_arn(lambda_function_config)
connection_config = botocore.config.Config(read_timeout=300)
lambda_client = aws_stack.connect_to_service(
"lambda", config=connection_config, region_name=region
)
try:
lambda_client.invoke(
FunctionName=lambda_function_config,
InvocationType="Event",
Payload=message,
)
except Exception:
LOGGER.warning(
f'Unable to send notification for S3 bucket "{bucket_name}" to Lambda function "{lambda_function_config}".'
)
if "EventBridge" in notification:
s3api_client = aws_stack.connect_to_service("s3")
region = (
s3api_client.get_bucket_location(Bucket=bucket_name)["LocationConstraint"]
or config.DEFAULT_REGION
)
events_client = aws_stack.connect_to_service("events", region_name=region)
entry = {
"Source": "aws.s3",
"Resources": [f"arn:aws:s3:::{bucket_name}"],
"Detail": {
"version": version_id or "0",
"bucket": {"name": bucket_name},
"object": {
"key": key,
"size": 4,
"etag": object_data.get("ETag", ""),
"sequencer": "0062E99A88DC407460",
},
"request-id": "RKREYG1RN2X92YX6",
"requester": "074255357339",
"source-ip-address": source_ip,
},
}
if action == "ObjectCreated":
entry["DetailType"] = "Object Created"
entry["Detail"]["reason"] = f"{api_method}Object"
if action == "ObjectRemoved":
entry["DetailType"] = "Object Deleted"
entry["Detail"]["reason"] = f"{api_method}Object"
if action == "ObjectTagging":
entry["DetailType"] = (
"Object Tags Added" if api_method == "Put" else "Object Tags Deleted"
)
entry["Detail"] = json.dumps(entry["Detail"])
try:
events_client.put_events(Entries=[entry])
except Exception as e:
LOGGER.warning(
f'Unable to send notification for S3 bucket "{bucket_name}" to EventBridge', e
)
if not filter(lambda x: notification.get(x), NOTIFICATION_DESTINATION_TYPES):
LOGGER.warning(
"Neither of %s defined for S3 notification.", "/".join(NOTIFICATION_DESTINATION_TYPES)
)
|
def send_notification_for_subscriber(
notification: Dict,
bucket_name: str,
object_path: str,
version_id: str,
api_method: str,
action: str,
event_name: str,
headers,
):
bucket_name = normalize_bucket_name(bucket_name)
if not event_type_matches(notification["Event"], action, api_method) or not filter_rules_match(
notification.get("Filter"), object_path
):
return
key = unquote(object_path.replace("//", "/"))[1:]
s3_client = aws_stack.connect_to_service("s3")
object_data = {}
try:
object_data = s3_client.head_object(Bucket=bucket_name, Key=key)
except botocore.exceptions.ClientError:
pass
source_ip = headers.get("X-Forwarded-For", "127.0.0.1").split(",")[0]
# build event message
message = get_event_message(
event_name=event_name,
bucket_name=bucket_name,
file_name=key,
etag=object_data.get("ETag", ""),
file_size=object_data.get("ContentLength", 0),
version_id=version_id,
config_id=notification["Id"],
source_ip=source_ip,
)
message = json.dumps(message)
if notification.get("Queue"):
region = aws_stack.extract_region_from_arn(notification["Queue"])
sqs_client = aws_stack.connect_to_service("sqs", region_name=region)
try:
queue_url = aws_stack.sqs_queue_url_for_arn(notification["Queue"])
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=message,
MessageSystemAttributes=create_sqs_system_attributes(headers),
)
except Exception as e:
LOGGER.warning(
f"Unable to send notification for S3 bucket \"{bucket_name}\" to SQS queue \"{notification['Queue']}\": {e}",
)
if notification.get("Topic"):
region = aws_stack.extract_region_from_arn(notification["Topic"])
sns_client = aws_stack.connect_to_service("sns", region_name=region)
try:
sns_client.publish(
TopicArn=notification["Topic"],
Message=message,
Subject="Amazon S3 Notification",
)
except Exception as e:
LOGGER.warning(
f"Unable to send notification for S3 bucket \"{bucket_name}\" to SNS topic \"{notification['Topic']}\": {e}"
)
# CloudFunction and LambdaFunction are semantically identical
lambda_function_config = notification.get("CloudFunction") or notification.get("LambdaFunction")
if lambda_function_config:
# make sure we don't run into a socket timeout
region = aws_stack.extract_region_from_arn(lambda_function_config)
connection_config = botocore.config.Config(read_timeout=300)
lambda_client = aws_stack.connect_to_service(
"lambda", config=connection_config, region_name=region
)
try:
lambda_client.invoke(
FunctionName=lambda_function_config,
InvocationType="Event",
Payload=message,
)
except Exception:
LOGGER.warning(
f'Unable to send notification for S3 bucket "{bucket_name}" to Lambda function "{lambda_function_config}".'
)
if "EventBridge" in notification:
s3api_client = aws_stack.connect_to_service("s3")
region = (
s3api_client.get_bucket_location(Bucket=bucket_name)["LocationConstraint"]
or config.DEFAULT_REGION
)
events_client = aws_stack.connect_to_service("events", region_name=region)
entry = {
"Source": "aws.s3",
"Resources": [f"arn:aws:s3:::{bucket_name}"],
"Detail": {
"version": version_id or "0",
"bucket": {"name": bucket_name},
"object": {
"key": key,
"size": 4,
"etag": object_data.get("ETag", ""),
"sequencer": "0062E99A88DC407460",
},
"request-id": "RKREYG1RN2X92YX6",
"requester": "074255357339",
"source-ip-address": source_ip,
},
}
if action == "ObjectCreated":
entry["DetailType"] = "Object Created"
entry["Detail"]["reason"] = f"{api_method}Object"
if action == "ObjectRemoved":
entry["DetailType"] = "Object Deleted"
entry["Detail"]["reason"] = f"{api_method}Object"
if action == "ObjectTagging":
entry["DetailType"] = (
"Object Tags Added" if api_method == "Put" else "Object Tags Deleted"
)
entry["Detail"] = json.dumps(entry["Detail"])
try:
events_client.put_events(Entries=[entry])
except Exception as e:
LOGGER.exception(
'Unable to send notification for S3 bucket "%s" to EventBridge', bucket_name
)
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = int(code)
return response
response.status_code = 200
cors = BackendState.cors_config(bucket_name)
if not cors:
response.status_code = 404
cors = {
"Error": {
"Code": "NoSuchCORSConfiguration",
"Message": "The CORS configuration does not exist",
"BucketName": bucket_name,
"RequestId": short_uid(),
"HostId": short_uid(),
}
}
body = xmltodict.unparse(cors)
response._content = body
return response
|
45,717 |
def rapsd(
field, fft_method=None, return_freq=False, d=1.0, normalize=False, **fft_kwargs
):
"""Compute radially averaged power spectral density (RAPSD) from the given
2D input field.
Parameters
----------
field: array_like
A 2d array of shape (m, n) containing the input field.
fft_method: object
A module or object implementing the same methods as numpy.fft and
scipy.fftpack. If set to None, field is assumed to represent the
shifted discrete Fourier transform of the input field, where the
origin is at the center of the array
(see numpy.fft.fftshift or scipy.fftpack.fftshift).
return_freq: bool
Whether to also return the Fourier frequencies.
d: scalar
Sample spacing (inverse of the sampling rate). Defaults to 1.
Applicable if return_freq is 'True'.
normalize: bool
If True, normalize the power spectrum so that it sums to one.
Returns
-------
out: ndarray
One-dimensional array containing the RAPSD. The length of the array is
int(l/2) (if l is even) or int(l/2)+1 (if l is odd), where l=max(m,n).
freq: ndarray
One-dimensional array containing the Fourier frequencies.
References
----------
:cite:`RC2011`
"""
if len(field.shape) != 2:
raise ValueError(
f"{len(field.shape)} dimensions are found, but the number "
"of dimensions should be 2"
)
if np.sum(np.isnan(field)) > 0:
raise ValueError("input array Z should not contain nans")
m, n = field.shape
yc, xc = arrays.compute_centred_coord_array(m, n)
r_grid = np.sqrt(xc * xc + yc * yc).round()
l = max(field.shape[0], field.shape[1])
if l % 2 == 1:
r_range = np.arange(0, int(l / 2) + 1)
else:
r_range = np.arange(0, int(l / 2))
if fft_method is not None:
psd = fft_method.fftshift(fft_method.fft2(field, **fft_kwargs))
psd = np.abs(psd) ** 2 / psd.size
else:
psd = field
result = []
for r in r_range:
mask = r_grid == r
psd_vals = psd[mask]
result.append(np.mean(psd_vals))
result = np.array(result)
if normalize:
result /= np.sum(result)
if return_freq:
freq = np.fft.fftfreq(l, d=d)
freq = freq[r_range]
return result, freq
else:
return result
|
def rapsd(
field, fft_method=None, return_freq=False, d=1.0, normalize=False, **fft_kwargs
):
"""Compute radially averaged power spectral density (RAPSD) from the given
2D input field.
Parameters
----------
field: array_like
A 2d array of shape (m, n) containing the input field.
fft_method: object
A module or object implementing the same methods as numpy.fft and
scipy.fftpack. If set to None, field is assumed to represent the
shifted discrete Fourier transform of the input field, where the
origin is at the center of the array
(see numpy.fft.fftshift or scipy.fftpack.fftshift).
return_freq: bool
Whether to also return the Fourier frequencies.
d: scalar
Sample spacing (inverse of the sampling rate). Defaults to 1.
Applicable if return_freq is 'True'.
normalize: bool
If True, normalize the power spectrum so that it sums to one.
Returns
-------
out: ndarray
One-dimensional array containing the RAPSD. The length of the array is
int(l/2) (if l is even) or int(l/2)+1 (if l is odd), where l=max(m,n).
freq: ndarray
One-dimensional array containing the Fourier frequencies.
References
----------
:cite:`RC2011`
"""
if len(field.shape) != 2:
raise ValueError(
f"{len(field.shape)} dimensions are found, but the number "
"of dimensions should be 2"
)
if np.sum(np.isnan(field)) > 0:
raise ValueError("input field should not contain nans")
m, n = field.shape
yc, xc = arrays.compute_centred_coord_array(m, n)
r_grid = np.sqrt(xc * xc + yc * yc).round()
l = max(field.shape[0], field.shape[1])
if l % 2 == 1:
r_range = np.arange(0, int(l / 2) + 1)
else:
r_range = np.arange(0, int(l / 2))
if fft_method is not None:
psd = fft_method.fftshift(fft_method.fft2(field, **fft_kwargs))
psd = np.abs(psd) ** 2 / psd.size
else:
psd = field
result = []
for r in r_range:
mask = r_grid == r
psd_vals = psd[mask]
result.append(np.mean(psd_vals))
result = np.array(result)
if normalize:
result /= np.sum(result)
if return_freq:
freq = np.fft.fftfreq(l, d=d)
freq = freq[r_range]
return result, freq
else:
return result
|
33,114 |
def rk4(f, x, t, dt, order=4):
"""Runge-Kutta (explicit, non-adaptive) numerical ODE solvers.
Parameters
----------
f : function
The forcing of the ODE must a function of the form f(t, x)
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Time step interval of the ODE solver
order : int, optional
The order of RK method. Default: 4
Returns
-------
ndarray
State vector at the new time step t+dt
"""
if order >=1: k1 = dt * f(t , x) # noqa
if order >=2: k2 = dt * f(t+dt/2, x+k1/2) # noqa
if order ==3: k3 = dt * f(t+dt , x+k2*2-k1) # noqa
if order ==4: # noqa
k3 = dt * f(t+dt/2, x+k2/2) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if order ==1: return x + k1 # noqa
elif order ==2: return x + k2 # noqa
elif order ==3: return x + (k1 + 4*k2 + k3)/6 # noqa
elif order ==4: return x + (k1 + 2*(k2 + k3) + k4)/6 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
def rk4(f, x, t, dt, order=4):
"""Runge-Kutta (explicit, non-adaptive) numerical ODE solvers.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Time step interval of the ODE solver
order : int, optional
The order of RK method. Default: 4
Returns
-------
ndarray
State vector at the new time step t+dt
"""
if order >=1: k1 = dt * f(t , x) # noqa
if order >=2: k2 = dt * f(t+dt/2, x+k1/2) # noqa
if order ==3: k3 = dt * f(t+dt , x+k2*2-k1) # noqa
if order ==4: # noqa
k3 = dt * f(t+dt/2, x+k2/2) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if order ==1: return x + k1 # noqa
elif order ==2: return x + k2 # noqa
elif order ==3: return x + (k1 + 4*k2 + k3)/6 # noqa
elif order ==4: return x + (k1 + 2*(k2 + k3) + k4)/6 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
20,001 |
def multi(img, coord, radius, spacing=None, nrows=None, ncols=None):
"""Create a labeled mask for color card chips
Inputs
img = Input image data.
coord = Two-element tuple of the center of the top left object.
radius = Radius of ROIs.
spacing = Two-element tuple of the horizontal and vertical spacing between ROIs.
nrows = Number of chip rows.
ncols = Number of chip columns.
Returns:
mask = Labeled mask ROIs
:param img: numpy.ndarray
:param coord: tuple, list
:param radius: int
:param spacing: tuple
:param nrows: int
:param ncols: int
:return mask: numpy.ndarray
"""
# Autoincrement the device counter
params.device += 1
# Initialize ROI list
rois = []
# Store user debug
debug = params.debug
# Temporarily disable debug
params.debug = None
# Get the height and width of the reference image
height, width = np.shape(img)[:2]
# Initialize a binary image of the circle
bin_img = np.zeros((height, width), dtype=np.uint8)
roi_contour = []
roi_hierarchy = []
# Grid of ROIs
if (type(coord) == tuple) and ((nrows and ncols) is not None):
# Loop over each row
for i in range(0, nrows):
# The upper left corner is the y starting coordinate + the ROI offset * the vertical spacing
y = coord[1] + i * spacing[1]
# Loop over each column
for j in range(0, ncols):
# The upper left corner is the x starting coordinate + the ROI offset * the
# horizontal spacing between chips
x = coord[0] + j * spacing[0]
# Create a chip ROI
rois.append(circle(img=img, x=x, y=y, r=radius))
# Draw the circle on the binary image
cv2.circle(bin_img, (x, y), radius, 255, -1)
# Make a list of contours and hierarchies
roi_contour.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[-2:][0])
roi_hierarchy.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[-2:][1])
# Create an array of contours and list of hierarchy for when debug is set to 'plot'
roi_contour1, roi_hierarchy1 = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)[-2:]
# User specified ROI centers
elif (type(coord) == list) and ((nrows and ncols) is None):
for i in range(0, len(coord)):
y = coord[i][1]
x = coord[i][0]
rois.append(circle(img=img, x=x, y=y, r=radius))
# Draw the circle on the binary image
cv2.circle(bin_img, (x, y), radius, 255, -1)
# Make a list of contours and hierarchies
roi_contour.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[-2:][0])
roi_hierarchy.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[-2:][1])
# Create an array of contours and list of hierarchy for when debug is set to 'plot'
roi_contour1, roi_hierarchy1 = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)[-2:]
else:
fatal_error("Function can either make a grid of ROIs (user must provide nrows, ncols, spacing, and coord) "
"or take custom ROI coordinates (user must provide a list of tuples to 'coord' parameter)")
# Reset debug
params.debug = debug
# Draw the ROIs if requested
if params.debug is not None:
_draw_roi(img=img, roi_contour=roi_contour1)
return roi_contour, roi_hierarchy
|
def multi(img, coord, radius, spacing=None, nrows=None, ncols=None):
"""Create a labeled mask for color card chips
Inputs
img = Input image data.
coord = Two-element tuple of the center of the top left object.
radius = Radius of ROIs.
spacing = Two-element tuple of the horizontal and vertical spacing between ROIs.
nrows = Number of rows in ROI layout.
ncols = Number of chip columns.
Returns:
mask = Labeled mask ROIs
:param img: numpy.ndarray
:param coord: tuple, list
:param radius: int
:param spacing: tuple
:param nrows: int
:param ncols: int
:return mask: numpy.ndarray
"""
# Autoincrement the device counter
params.device += 1
# Initialize ROI list
rois = []
# Store user debug
debug = params.debug
# Temporarily disable debug
params.debug = None
# Get the height and width of the reference image
height, width = np.shape(img)[:2]
# Initialize a binary image of the circle
bin_img = np.zeros((height, width), dtype=np.uint8)
roi_contour = []
roi_hierarchy = []
# Grid of ROIs
if (type(coord) == tuple) and ((nrows and ncols) is not None):
# Loop over each row
for i in range(0, nrows):
# The upper left corner is the y starting coordinate + the ROI offset * the vertical spacing
y = coord[1] + i * spacing[1]
# Loop over each column
for j in range(0, ncols):
# The upper left corner is the x starting coordinate + the ROI offset * the
# horizontal spacing between chips
x = coord[0] + j * spacing[0]
# Create a chip ROI
rois.append(circle(img=img, x=x, y=y, r=radius))
# Draw the circle on the binary image
cv2.circle(bin_img, (x, y), radius, 255, -1)
# Make a list of contours and hierarchies
roi_contour.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[-2:][0])
roi_hierarchy.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[-2:][1])
# Create an array of contours and list of hierarchy for when debug is set to 'plot'
roi_contour1, roi_hierarchy1 = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)[-2:]
# User specified ROI centers
elif (type(coord) == list) and ((nrows and ncols) is None):
for i in range(0, len(coord)):
y = coord[i][1]
x = coord[i][0]
rois.append(circle(img=img, x=x, y=y, r=radius))
# Draw the circle on the binary image
cv2.circle(bin_img, (x, y), radius, 255, -1)
# Make a list of contours and hierarchies
roi_contour.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[-2:][0])
roi_hierarchy.append(cv2.findContours(np.copy(bin_img), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[-2:][1])
# Create an array of contours and list of hierarchy for when debug is set to 'plot'
roi_contour1, roi_hierarchy1 = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)[-2:]
else:
fatal_error("Function can either make a grid of ROIs (user must provide nrows, ncols, spacing, and coord) "
"or take custom ROI coordinates (user must provide a list of tuples to 'coord' parameter)")
# Reset debug
params.debug = debug
# Draw the ROIs if requested
if params.debug is not None:
_draw_roi(img=img, roi_contour=roi_contour1)
return roi_contour, roi_hierarchy
|
14,151 |
def _build_lib(lib, dist, build_dir):
"""Use setuptools to build `lib` into `build_dir`."""
dist.ext_modules = [lib]
_build_ext = build_ext(dist)
_build_ext.build_lib = build_dir
_build_ext.build_temp = build_dir
_build_ext.finalize_options()
_build_ext.run()
out_lib = _build_ext.get_outputs()
lib_name = lib.name
lib_path = os.path.abspath(out_lib[0])
dir_name = os.path.dirname(lib_path)
ext_name = os.path.splitext(lib_path)[1][1:]
if os.name == "nt":
ext_name = "dll"
target = os.path.join(os.path.abspath(dir_name), lib_name + "." + ext_name)
if target != lib_path:
_rename_safe(
lib_path, os.path.join(os.path.abspath(dir_name), lib_name + "." + ext_name)
)
return lib_name + "." + ext_name
|
def _build_lib(lib, dist, build_dir):
"""Use setuptools to build *lib* into *build_dir*."""
dist.ext_modules = [lib]
_build_ext = build_ext(dist)
_build_ext.build_lib = build_dir
_build_ext.build_temp = build_dir
_build_ext.finalize_options()
_build_ext.run()
out_lib = _build_ext.get_outputs()
lib_name = lib.name
lib_path = os.path.abspath(out_lib[0])
dir_name = os.path.dirname(lib_path)
ext_name = os.path.splitext(lib_path)[1][1:]
if os.name == "nt":
ext_name = "dll"
target = os.path.join(os.path.abspath(dir_name), lib_name + "." + ext_name)
if target != lib_path:
_rename_safe(
lib_path, os.path.join(os.path.abspath(dir_name), lib_name + "." + ext_name)
)
return lib_name + "." + ext_name
|
28,033 |
def twodim_to_table(
lines: List[List[str]],
separate_head=True,
separate_footer=False
) -> Optional[str]:
""" Pretty-prints the given two-dimensional array's lines. """
str_parts = []
# Count the column width.
widths: List[int] = []
for line in lines:
for i, size in enumerate([len(str(x)) for x in line]):
while i >= len(widths):
widths.append(0)
if size > widths[i]:
widths[i] = size
# Generate the format string to pad the columns.
print_string = ""
for i, width in enumerate(widths):
print_string += "{" + str(i) + ":" + str(width) + "} | "
if not print_string:
return ""
print_string = print_string[:-3]
# Print the actual data.
str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1)))
for i, line in enumerate(lines):
try:
str_parts.append(print_string.format(*line))
except IndexError:
raise TypeError("One of the rows have a different number of "
"columns than the others")
if i == 0 and separate_head:
str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1)))
if separate_footer and i == len(lines) - 2:
str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1)))
str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1)))
return '\n'.join(str_parts)
|
def twodim_to_table(
lines: List[List[str]],
separate_head=True,
separate_footer: bool =False
) -> Optional[str]:
""" Pretty-prints the given two-dimensional array's lines. """
str_parts = []
# Count the column width.
widths: List[int] = []
for line in lines:
for i, size in enumerate([len(str(x)) for x in line]):
while i >= len(widths):
widths.append(0)
if size > widths[i]:
widths[i] = size
# Generate the format string to pad the columns.
print_string = ""
for i, width in enumerate(widths):
print_string += "{" + str(i) + ":" + str(width) + "} | "
if not print_string:
return ""
print_string = print_string[:-3]
# Print the actual data.
str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1)))
for i, line in enumerate(lines):
try:
str_parts.append(print_string.format(*line))
except IndexError:
raise TypeError("One of the rows have a different number of "
"columns than the others")
if i == 0 and separate_head:
str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1)))
if separate_footer and i == len(lines) - 2:
str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1)))
str_parts.append("-" * (sum(widths) + 3 * (len(widths) - 1)))
return '\n'.join(str_parts)
|
7,180 |
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61
Parameters
----------
size: int
filter size.
filter_name: str, optional
Filter used in frequency domain filtering. Ramp filter used by
default. Filters available: ramp, shepp-logan, cosine,
hamming, hann. Assign None to use no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.pi * np.linspace(0, 1, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
|
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61
Parameters
----------
size: int
filter size.
filter_name: str, optional
Filter used in frequency domain filtering. Ramp filter used by
default. Filters available: ramp, shepp-logan, cosine,
hamming, hann. Assign None to use no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.pi * np.linspace(0, 1, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
|
37,043 |
def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
"""
_outstream = to_file if to_file else sys.stdout
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
prev_msg = msg
if not quiet:
print('', file=_outstream)
|
def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
"""
_outstream = to_file if to_file else sys.stdout
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
prev_msg = msg
if not quiet:
print('', file=output)
|
11,913 |
def getmode(mode):
"""Gets a mode descriptor for the given mode."""
global _modes
if not _modes:
# initialize mode cache
modes = {}
for m, (basemode, basetype, bands, typestr) in {
# core modes
# Bits need to be extended to bytes
"1": ("L", "L", ("1",), "|b1"),
"L": ("L", "L", ("L",), "|u1"),
"I": ("L", "I", ("I",), Image._ENDIAN + "i4"),
"F": ("L", "F", ("F",), Image._ENDIAN + "f4"),
"P": ("P", "L", ("P",), "|u1"),
"RGB": ("RGB", "L", ("R", "G", "B"), "|u1"),
"RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"),
"RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"),
# UNDONE - unsigned |u1i1i1
"LAB": ("RGB", "L", ("L", "A", "B"), "|u1"),
"HSV": ("RGB", "L", ("H", "S", "V"), "|u1"),
# extra experimental modes
"RGBa": ("RGB", "L", ("R", "G", "B", "a"), None),
"BGR;15": ("RGB", "L", ("B", "G", "R"), None),
"BGR;16": ("RGB", "L", ("B", "G", "R"), None),
"BGR;24": ("RGB", "L", ("B", "G", "R"), None),
"BGR;32": ("RGB", "L", ("B", "G", "R"), None),
"LA": ("L", "L", ("L", "A"), "|u1"),
"La": ("L", "L", ("L", "a"), None),
"PA": ("RGB", "L", ("P", "A"), "|u1"),
}.items():
modes[m] = ModeDescriptor(m, bands, basemode, basetype, typestr)
# mapping modes
for i16mode, typestr in {
# I;16 == I;16L, and I;32 == I;32L
"I;16": "<u2",
"I;16S": "<i2",
"I;16L": "<u2",
"I;16LS": "<i2",
"I;16B": ">u2",
"I;16BS": ">i2",
"I;16N": Image._ENDIAN + "u2",
"I;16NS": Image._ENDIAN + "i2",
"I;32": "<u4",
"I;32B": ">u4",
"I;32L": "<u4",
"I;32S": "<i4",
"I;32BS": ">i4",
"I;32LS": "<i4",
}.items():
modes[i16mode] = ModeDescriptor(i16mode, ("I",), "L", "L", typestr)
# set global mode cache atomically
_modes = modes
return _modes[mode]
|
def getmode(mode):
"""Gets a mode descriptor for the given mode."""
global _modes
if not _modes:
# initialize mode cache
modes = {}
for m, (basemode, basetype, bands, typestr) in {
# core modes
# Bits need to be extended to bytes
"1": ("L", "L", ("1",), "|b1"),
"L": ("L", "L", ("L",), "|u1"),
"I": ("L", "I", ("I",), Image._ENDIAN + "i4"),
"F": ("L", "F", ("F",), Image._ENDIAN + "f4"),
"P": ("P", "L", ("P",), "|u1"),
"RGB": ("RGB", "L", ("R", "G", "B"), "|u1"),
"RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"),
"RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"),
# UNDONE - unsigned |u1i1i1
"LAB": ("RGB", "L", ("L", "A", "B"), "|u1"),
"HSV": ("RGB", "L", ("H", "S", "V"), "|u1"),
# extra experimental modes
"RGBa": ("RGB", "L", ("R", "G", "B", "a"), None),
"BGR;15": ("RGB", "L", ("B", "G", "R"), None),
"BGR;16": ("RGB", "L", ("B", "G", "R"), None),
"BGR;24": ("RGB", "L", ("B", "G", "R"), None),
"BGR;32": ("RGB", "L", ("B", "G", "R"), "u4"),
"LA": ("L", "L", ("L", "A"), "|u1"),
"La": ("L", "L", ("L", "a"), None),
"PA": ("RGB", "L", ("P", "A"), "|u1"),
}.items():
modes[m] = ModeDescriptor(m, bands, basemode, basetype, typestr)
# mapping modes
for i16mode, typestr in {
# I;16 == I;16L, and I;32 == I;32L
"I;16": "<u2",
"I;16S": "<i2",
"I;16L": "<u2",
"I;16LS": "<i2",
"I;16B": ">u2",
"I;16BS": ">i2",
"I;16N": Image._ENDIAN + "u2",
"I;16NS": Image._ENDIAN + "i2",
"I;32": "<u4",
"I;32B": ">u4",
"I;32L": "<u4",
"I;32S": "<i4",
"I;32BS": ">i4",
"I;32LS": "<i4",
}.items():
modes[i16mode] = ModeDescriptor(i16mode, ("I",), "L", "L", typestr)
# set global mode cache atomically
_modes = modes
return _modes[mode]
|
36,387 |
def runtest(ns, test_name):
"""Run a single test.
ns -- regrtest namespace of options
test_name -- the name of the test
Returns the tuple (result, test_time, xml_data), where result is one
of the constants:
INTERRUPTED KeyboardInterrupt
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
EMPTY_TEST_SUITE test ran no subtests.
TIMEOUT test failed due to timeout
If ns.xmlpath is not None, xml_data is a list containing each
generated testsuite element.
"""
try:
return _runtest(ns, test_name)
except:
if not ns.pgo:
msg = traceback.format_exc()
print(f"test {test_name} crashed -- {msg}",
file=sys.stderr, flush=True)
return TestResult(test_name, FAILED, 0.0, None)
|
def runtest(ns, test_name):
"""Run a single test.
ns -- regrtest namespace of options
test_name -- the name of the test
Returns the tuple (result, test_time, xml_data), where result is one
of the constants:
INTERRUPTED KeyboardInterrupt
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
EMPTY_TEST_SUITE test ran no subtests.
TIMEOUT test timed out
If ns.xmlpath is not None, xml_data is a list containing each
generated testsuite element.
"""
try:
return _runtest(ns, test_name)
except:
if not ns.pgo:
msg = traceback.format_exc()
print(f"test {test_name} crashed -- {msg}",
file=sys.stderr, flush=True)
return TestResult(test_name, FAILED, 0.0, None)
|
53,090 |
def ansiCodePageFromNVDALocale(localeName: str) -> Optional[str]:
"""Returns either ANSI code page for a given locale using GetLocaleInfoEx or None
if the given locale is not known to Windows."""
localeName = normalizeLocaleForWin32(localeName)
# Windows 10 returns English code page (1252) for locales not known to Windows
# even though documentation states that in case of an unknown locale 0 is returned.
# This means that it is impossible to differentiate locales that are unknown
# and locales using 1252 as ANSI code page.
# Use `englishCountryNameFromNVDALocale` to determine if the given locale is supported or not
# before attempting to retrieve code page.
if not englishCountryNameFromNVDALocale(localeName):
return None
buffLength = winKernel.kernel32.GetLocaleInfoEx(localeName, LOCALE_IDEFAULTANSICODEPAGE, None, 0)
if buffLength:
buf = ctypes.create_unicode_buffer(buffLength)
winKernel.kernel32.GetLocaleInfoEx(localeName, LOCALE_IDEFAULTANSICODEPAGE, buf, buffLength)
codePage = buf.value
if codePage == CP_ACP:
# Some locales such as Hindi are Unicode only i.e. they don't have specific ANSI code page.
# In such case code page should be set to the default ANSI code page of the system.
codePage = str(winKernel.kernel32.GetACP())
return codePage
return None
|
def ansiCodePageFromNVDALocale(localeName: str) -> Optional[str]:
"""Returns either ANSI code page for a given locale using GetLocaleInfoEx or None
if the given locale is not known to Windows."""
localeName = normalizeLocaleForWin32(localeName)
# Windows 10 returns English code page (1252) for locales not known to Windows
# even though documentation states that in case of an unknown locale 0 is returned.
# This means that it is impossible to differentiate locales that are unknown
# and locales using 1252 as ANSI code page.
# Use `englishCountryNameFromNVDALocale` to determine if the given locale is supported or not
# The full form langName_country either cannot be retrieved from Windows
# or Python cannot be set to that locale.
# Try just with the language name.
if "_" in localeName:
localeName = localeName.split("_")[0]
try:
localeString = localeStringFromLocaleCode(localeName)
log.debug(f"Win32 locale string from locale code is {localeString}")
except ValueError:
log.debugWarning(f"Locale {localeName} not supported by Windows")
buf = ctypes.create_unicode_buffer(buffLength)
winKernel.kernel32.GetLocaleInfoEx(localeName, LOCALE_IDEFAULTANSICODEPAGE, buf, buffLength)
codePage = buf.value
if codePage == CP_ACP:
# Some locales such as Hindi are Unicode only i.e. they don't have specific ANSI code page.
# In such case code page should be set to the default ANSI code page of the system.
codePage = str(winKernel.kernel32.GetACP())
return codePage
return None
|
2,730 |
def pairwise_kernels(
X, Y=None, metric="linear", *, filter_params=False, n_jobs=None, **kwds
):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or
(n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or
(n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
def pairwise_kernels(
X, Y=None, metric="linear", *, filter_params=False, n_jobs=None, **kwds
):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or
(n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
57,690 |
def test_get_indicators(mocker):
"""Tests get_indicators function
Given
'indicator_types': ['value', 'user-account']
When
- `fetch_indicators_command` or `fetch_indicators_command` are calling the get_indicators function
Then
- convert the result to indicators list
- validate the the indicators list
- validate the the new_last_id to set
"""
client = Client
mocker.patch.object(client.stix_observable, 'list', return_value=RESPONSE_DATA)
new_last_id, indicators = get_indicators(client, indicator_type=['value', 'user-account'])
assert len(indicators) == 2
assert new_last_id == 'YXJyYXljb25uZWN0aW9uOjI='
|
def test_get_indicators(mocker):
"""Tests get_indicators function
Given
The following indicator types: 'value', 'user-account' that were chosen by the user.
When
- `fetch_indicators_command` or `fetch_indicators_command` are calling the get_indicators function
Then
- convert the result to indicators list
- validate the the indicators list
- validate the the new_last_id to set
"""
client = Client
mocker.patch.object(client.stix_observable, 'list', return_value=RESPONSE_DATA)
new_last_id, indicators = get_indicators(client, indicator_type=['value', 'user-account'])
assert len(indicators) == 2
assert new_last_id == 'YXJyYXljb25uZWN0aW9uOjI='
|
30,097 |
def test_save_signatures_to_location_1_sig(runtmp):
# save to sigfile
sig2 = utils.get_test_data('2.fa.sig')
ss2 = sourmash.load_one_signature(sig2, ksize=31)
sig47 = utils.get_test_data('47.fa.sig')
ss47 = sourmash.load_one_signature(sig47, ksize=31)
outloc = runtmp.output('foo.sig')
with sourmash_args.SaveSignaturesToLocation(outloc) as save_sig:
print(save_sig)
save_sig.add(ss2)
save_sig.add(ss47)
saved = list(sourmash.load_file_as_signatures(outloc))
assert ss2 in saved
assert ss47 in saved
assert len(saved) == 2
|
def test_save_signatures_to_location_1_sig(runtmp):
# save to sigfile.sig
sig2 = utils.get_test_data('2.fa.sig')
ss2 = sourmash.load_one_signature(sig2, ksize=31)
sig47 = utils.get_test_data('47.fa.sig')
ss47 = sourmash.load_one_signature(sig47, ksize=31)
outloc = runtmp.output('foo.sig')
with sourmash_args.SaveSignaturesToLocation(outloc) as save_sig:
print(save_sig)
save_sig.add(ss2)
save_sig.add(ss47)
saved = list(sourmash.load_file_as_signatures(outloc))
assert ss2 in saved
assert ss47 in saved
assert len(saved) == 2
|
20,482 |
def merge_variants_with_same_attributes(env):
openupgrade.logged_query(
env.cr, """
WITH templates AS (
SELECT pt.id, pp.combination_indices, COUNT(*)
FROM product_product pp
JOIN product_template pt ON pt.id = pp.product_tmpl_id
WHERE pp.combination_indices IS NOT NULL
GROUP BY pt.id, pp.combination_indices
HAVING COUNT(*) > 1
)
SELECT pp.id, pp.product_tmpl_id
FROM product_product pp
JOIN product_template pt ON pt.id = pp.product_tmpl_id
JOIN templates ON (templates.id = pt.id
AND templates.combination_indices = pp.combination_indices)"""
)
templates = {}
for product_id, template_id in env.cr.fetchall():
if template_id in templates:
templates[template_id] += [product_id]
else:
templates[template_id] = [product_id]
for template_id in templates:
merge_records(
env, 'product.product', templates[template_id],
templates[template_id][0], field_spec=None, method='sql',
delete=True, exclude_columns=None, model_table='product_product')
|
def merge_variants_with_same_attributes(env):
openupgrade.logged_query(
env.cr, """
WITH templates AS (
SELECT pt.id, pp.combination_indices, COUNT(*)
FROM product_product pp
JOIN product_template pt ON pt.id = pp.product_tmpl_id
WHERE pp.combination_indices IS NOT NULL
GROUP BY pt.id, pp.combination_indices
HAVING COUNT(*) > 1
)
SELECT pp.id, pp.product_tmpl_id
FROM product_product pp
JOIN product_template pt ON pt.id = pp.product_tmpl_id
JOIN templates ON (templates.id = pt.id
AND templates.combination_indices = pp.combination_indices)"""
)
templates = {}
for product_id, template_id in env.cr.fetchall():
templates.setdefault(template_id, []).append(product_id)
for template_id in templates:
merge_records(
env, 'product.product', templates[template_id],
templates[template_id][0], field_spec=None, method='sql',
delete=True, exclude_columns=None, model_table='product_product')
|
27,716 |
def validate_basetemp(path):
# GH 7119
cwd = pathlib.Path.cwd()
if path == "" or path == "." or str(cwd).startswith(path):
msg = "basetemp should not be '' or . or any parent folder of the cwd"
raise argparse.ArgumentTypeError(msg)
return path
|
def validate_basetemp(path: str) -> str:
# GH 7119
cwd = pathlib.Path.cwd()
if path == "" or path == "." or str(cwd).startswith(path):
msg = "basetemp should not be '' or . or any parent folder of the cwd"
raise argparse.ArgumentTypeError(msg)
return path
|
31,989 |
def initialize_instance(args: Dict[str, str], params: Dict[str, str]):
global URL, API_KEY, USE_SSL, USE_URL_FILTERING, VSYS, DEVICE_GROUP, XPATH_SECURITY_RULES, XPATH_OBJECTS, \
XPATH_RULEBASE, TEMPLATE, PRE_POST
if not params.get('port'):
raise DemistoException('Set a port for the instance')
URL = params.get('server', '').rstrip('/:') + ':' + params.get('port', '') + '/api/'
API_KEY = str(params.get('key')) or (params.get('credentials') or {}).get('password') # type: ignore
USE_SSL = not params.get('insecure')
USE_URL_FILTERING = params.get('use_url_filtering')
# determine a vsys or a device-group
VSYS = params.get('vsys', '')
if args and args.get('device-group'):
DEVICE_GROUP = args.get('device-group') # type: ignore[assignment]
else:
DEVICE_GROUP = params.get('device_group', None) # type: ignore[arg-type]
if args and args.get('template'):
TEMPLATE = args.get('template') # type: ignore[assignment]
else:
TEMPLATE = params.get('template', None) # type: ignore[arg-type]
PRE_POST = args.get('pre_post', '')
# configuration check
if DEVICE_GROUP and VSYS:
raise DemistoException(
'Cannot configure both vsys and Device group. Set vsys for firewall, set Device group for Panorama.')
if not DEVICE_GROUP and not VSYS:
raise DemistoException('Set vsys for firewall or Device group for Panorama.')
# setting security xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if device_group_shared == 'shared':
XPATH_SECURITY_RULES = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_SECURITY_RULES = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/"
else:
XPATH_SECURITY_RULES = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/rulebase/security/rules/entry"
# setting objects xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if DEVICE_GROUP == 'shared':
XPATH_OBJECTS = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_OBJECTS = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/"
else:
XPATH_OBJECTS = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/" # ignore:
# setting security rulebase xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if DEVICE_GROUP == 'shared':
XPATH_RULEBASE = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_RULEBASE = "/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name=\'" + \
DEVICE_GROUP + "\']/"
else:
XPATH_RULEBASE = f"/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'{VSYS}\']/"
|
def initialize_instance(args: Dict[str, str], params: Dict[str, str]):
global URL, API_KEY, USE_SSL, USE_URL_FILTERING, VSYS, DEVICE_GROUP, XPATH_SECURITY_RULES, XPATH_OBJECTS, \
XPATH_RULEBASE, TEMPLATE, PRE_POST
if not params.get('port'):
raise DemistoException('Set a port for the instance')
URL = params.get('server', '').rstrip('/:') + ':' + params.get('port', '') + '/api/'
API_KEY = str(params.get('key')) or str((params.get('credentials') or {}).get('password', ''))
USE_SSL = not params.get('insecure')
USE_URL_FILTERING = params.get('use_url_filtering')
# determine a vsys or a device-group
VSYS = params.get('vsys', '')
if args and args.get('device-group'):
DEVICE_GROUP = args.get('device-group') # type: ignore[assignment]
else:
DEVICE_GROUP = params.get('device_group', None) # type: ignore[arg-type]
if args and args.get('template'):
TEMPLATE = args.get('template') # type: ignore[assignment]
else:
TEMPLATE = params.get('template', None) # type: ignore[arg-type]
PRE_POST = args.get('pre_post', '')
# configuration check
if DEVICE_GROUP and VSYS:
raise DemistoException(
'Cannot configure both vsys and Device group. Set vsys for firewall, set Device group for Panorama.')
if not DEVICE_GROUP and not VSYS:
raise DemistoException('Set vsys for firewall or Device group for Panorama.')
# setting security xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if device_group_shared == 'shared':
XPATH_SECURITY_RULES = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_SECURITY_RULES = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/"
else:
XPATH_SECURITY_RULES = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/rulebase/security/rules/entry"
# setting objects xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if DEVICE_GROUP == 'shared':
XPATH_OBJECTS = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_OBJECTS = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/"
else:
XPATH_OBJECTS = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/" # ignore:
# setting security rulebase xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if DEVICE_GROUP == 'shared':
XPATH_RULEBASE = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_RULEBASE = "/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name=\'" + \
DEVICE_GROUP + "\']/"
else:
XPATH_RULEBASE = f"/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'{VSYS}\']/"
|
43,978 |
def dipole(hf_file, core=None, active=None, mapping="jordan_wigner", cutoff=1.0e-12, wires=None):
r"""Computes the electric dipole moment operator in the Pauli basis.
The second quantized dipole moment operator :math:`\hat{D}` of a molecule is given by
.. math::
\hat{D} = \sum_{\alpha, \beta} \langle \alpha \vert {\bf r} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}] + \hat{D}_\mathrm{n}.
In the equation above, the indices :math:`\alpha, \beta` run over the basis of Hartree-Fock
molecular orbitals, the operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
electron creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert {\bf r} \vert \beta \rangle` denotes
the matrix elements of the position operator :math:`\hat{{\bf r}`. These matrix elements
are calculated as
.. math::
\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle = \sum_{i, j} C_{\alpha i}^*C_{\beta j}
\langle i \vert {\bf r} \vert j \rangle,
where :math:`\vert i \rangle` is the wave function of the atomic orbitals and
:math:`C_{\alpha i}` and :math:`\langle i \vert \hat{{\bf r}} \vert j \rangle`
are the representations of the molecular orbitals and the operator
:math:`\hat{{\bf r}}` in the atomic basis.
The contribution of the nuclei to the dipole operator is given by
.. math::
\hat{D}_\mathrm{n} = -\sum_{i=1}^{N_\mathrm{atoms}} Z_i {\bf R}_i \hat{I},
where :math:`Z_i` and :math:`{\bf R}_i` are, respectively, the atomic number and the
position vector of the :math:`i`-th atom of the molecule.
Args:
hf_file (str): Absolute path to the hdf5-formatted file with the Hartree-Fock
electronic structure. This file can be generated using the
:func:`~.meanfield` function.
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
mapping (str): Specifies the transformation to map the fermionic operator to the
Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
cutoff (float): Cutoff value for including the matrix elements
:math:`\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle`. The matrix elements
with absolute value less than ``cutoff`` are neglected.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
list[pennylane.Hamiltonian]: the qubit observables corresponding to the components
:math:`\hat{D}_x`, :math:`\hat{D}_y` and :math:`\hat{D}_z` of the dipole operator in
atomic units (Bohr radii).
**Example**
>>> dipole_obs = dipole("./h3p.hdf5")
>>> print(dipole_obs)
[<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=1, wires=[0]>]
>>> print(dipole_obs[0])
(-1.4861475511479285) [Z0]
+ (-1.4861475511479285) [Z1]
+ (-1.0207535180657459) [Z2]
+ (-1.0207535180657459) [Z3]
+ (-0.38409271341166346) [Z4]
+ (-0.38409271341166346) [Z5]
+ (2.9129875652506754) [I0]
+ (-1.0463884953059674) [Y0 Z1 Y2]
+ (-1.0463884953059674) [X0 Z1 X2]
+ (-1.0463884953059674) [Y1 Z2 Y3]
+ (-1.0463884953059674) [X1 Z2 X3]
+ (-0.2949628258407493) [Y2 Z3 Y4]
+ (-0.2949628258407493) [X2 Z3 X4]
+ (-0.2949628258407493) [Y3 Z4 Y5]
+ (-0.2949628258407493) [X3 Z4 X5]
+ (-0.10008920247855208) [Y0 Z1 Z2 Z3 Y4]
+ (-0.10008920247855208) [X0 Z1 Z2 Z3 X4]
+ (-0.10008920247855208) [Y1 Z2 Z3 Z4 Y5]
+ (-0.10008920247855208) [X1 Z2 Z3 Z4 X5]
"""
bohr_angs = 0.529177210903
atomic_numbers = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
}
hf = openfermion.MolecularData(filename=hf_file.strip())
if hf.multiplicity != 1:
raise ValueError(
"Currently, this functionality is constrained to closed-shell Hartree-Fock states;"
" got spin multiplicity 2S+1 = {}".format(hf.multiplicity)
)
for i in hf.geometry:
print(i[0])
if i[0] not in atomic_numbers:
raise ValueError(
"Currently, only first- or second-row elements of the periodic table are supported;"
" got element {}".format(i[0])
)
# Load dipole matrix elements in the atomic basis
# pylint: disable=import-outside-toplevel
from pyscf import gto
mol = gto.M(
atom=hf.geometry, basis=hf.basis, charge=hf.charge, spin=0.5 * (hf.multiplicity - 1)
)
dip_ao = mol.intor_symmetric("int1e_r", comp=3).real
# Transform dipole matrix elements to the MO basis
n_orbs = hf.n_orbitals
c_hf = hf.canonical_orbitals
dip_mo = np.zeros((3, n_orbs, n_orbs))
for comp in range(3):
for alpha in range(n_orbs):
for beta in range(alpha + 1):
dip_mo[comp, alpha, beta] = c_hf[alpha] @ dip_ao[comp] @ c_hf[beta]
dip_mo[comp] += dip_mo[comp].T - np.diag(np.diag(dip_mo[comp]))
# Compute the nuclear contribution
dip_n = np.zeros(3)
for comp in range(3):
for i in hf.geometry:
dip_n[comp] -= atomic_numbers[i[0]] * i[1][comp] / bohr_angs
# Build the observable
dip_obs = []
for i in range(3):
fermion_obs = one_particle(dip_mo[i], core=core, active=active, cutoff=cutoff)
dip_obs.append(observable([fermion_obs], init_term=dip_n[i], mapping=mapping, wires=wires))
return dip_obs
|
def dipole(hf_file, core=None, active=None, mapping="jordan_wigner", cutoff=1.0e-12, wires=None):
r"""Computes the electric dipole moment operator in the Pauli basis.
The second quantized dipole moment operator :math:`\hat{D}` of a molecule is given by
.. math::
\hat{D} = \sum_{\alpha, \beta} \langle \alpha \vert {\bf r} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}] + \hat{D}_\mathrm{n}.
In the equation above, the indices :math:`\alpha, \beta` run over the basis of Hartree-Fock
molecular orbitals, the operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
electron creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert {\bf r} \vert \beta \rangle` denotes
the matrix elements of the position operator :math:`\hat{{\bf r}`. These matrix elements
are calculated as
.. math::
\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle = \sum_{i, j} C_{\alpha i}^*C_{\beta j}
\langle i \vert {\bf r} \vert j \rangle,
where :math:`\vert i \rangle` is the wave function of the atomic orbitals and
:math:`C_{\alpha i}` and :math:`\langle i \vert \hat{{\bf r}} \vert j \rangle`
are the representations of the molecular orbitals and the operator
:math:`\hat{{\bf r}}` in the atomic basis.
The contribution of the nuclei to the dipole operator is given by
.. math::
\hat{D}_\mathrm{n} = -\sum_{i=1}^{N_\mathrm{atoms}} Z_i {\bf R}_i \hat{I},
where :math:`Z_i` and :math:`{\bf R}_i` are, respectively, the atomic number and the
position vector of the :math:`i`-th atom of the molecule.
Args:
hf_file (str): Absolute path to the hdf5-formatted file with the Hartree-Fock
electronic structure. This file can be generated using the
:func:`~.meanfield` function.
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
mapping (str): Specifies the transformation to map the fermionic operator to the
Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
cutoff (float): Cutoff value for including the matrix elements
:math:`\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle`. The matrix elements
with absolute value less than ``cutoff`` are neglected.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
list[pennylane.Hamiltonian]: the qubit observables corresponding to the components
:math:`\hat{D}_x`, :math:`\hat{D}_y` and :math:`\hat{D}_z` of the dipole operator in
atomic units (Bohr radii).
**Example**
>>> dipole_obs = dipole("./h3p.hdf5")
>>> print(dipole_obs)
[<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=1, wires=[0]>]
>>> print(dipole_obs[0])
(-1.4861475511479285) [Z0]
+ (-1.4861475511479285) [Z1]
+ (-1.0207535180657459) [Z2]
+ (-1.0207535180657459) [Z3]
+ (-0.38409271341166346) [Z4]
+ (-0.38409271341166346) [Z5]
+ (2.9129875652506754) [I0]
+ (-1.0463884953059674) [Y0 Z1 Y2]
+ (-1.0463884953059674) [X0 Z1 X2]
+ (-1.0463884953059674) [Y1 Z2 Y3]
+ (-1.0463884953059674) [X1 Z2 X3]
+ (-0.2949628258407493) [Y2 Z3 Y4]
+ (-0.2949628258407493) [X2 Z3 X4]
+ (-0.2949628258407493) [Y3 Z4 Y5]
+ (-0.2949628258407493) [X3 Z4 X5]
+ (-0.10008920247855208) [Y0 Z1 Z2 Z3 Y4]
+ (-0.10008920247855208) [X0 Z1 Z2 Z3 X4]
+ (-0.10008920247855208) [Y1 Z2 Z3 Z4 Y5]
+ (-0.10008920247855208) [X1 Z2 Z3 Z4 X5]
"""
bohr_angs = 0.529177210903
atomic_numbers = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
}
hf = openfermion.MolecularData(filename=hf_file.strip())
if hf.multiplicity != 1:
raise ValueError(
"Currently, this functionality is constrained to closed-shell Hartree-Fock states with multiplicity = 1;"
" got spin multiplicity 2S+1 = {}".format(hf.multiplicity)
)
for i in hf.geometry:
print(i[0])
if i[0] not in atomic_numbers:
raise ValueError(
"Currently, only first- or second-row elements of the periodic table are supported;"
" got element {}".format(i[0])
)
# Load dipole matrix elements in the atomic basis
# pylint: disable=import-outside-toplevel
from pyscf import gto
mol = gto.M(
atom=hf.geometry, basis=hf.basis, charge=hf.charge, spin=0.5 * (hf.multiplicity - 1)
)
dip_ao = mol.intor_symmetric("int1e_r", comp=3).real
# Transform dipole matrix elements to the MO basis
n_orbs = hf.n_orbitals
c_hf = hf.canonical_orbitals
dip_mo = np.zeros((3, n_orbs, n_orbs))
for comp in range(3):
for alpha in range(n_orbs):
for beta in range(alpha + 1):
dip_mo[comp, alpha, beta] = c_hf[alpha] @ dip_ao[comp] @ c_hf[beta]
dip_mo[comp] += dip_mo[comp].T - np.diag(np.diag(dip_mo[comp]))
# Compute the nuclear contribution
dip_n = np.zeros(3)
for comp in range(3):
for i in hf.geometry:
dip_n[comp] -= atomic_numbers[i[0]] * i[1][comp] / bohr_angs
# Build the observable
dip_obs = []
for i in range(3):
fermion_obs = one_particle(dip_mo[i], core=core, active=active, cutoff=cutoff)
dip_obs.append(observable([fermion_obs], init_term=dip_n[i], mapping=mapping, wires=wires))
return dip_obs
|
2,490 |
def test_parallel_classification():
# Check parallel classification.
# Classification
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, random_state=rng
)
ensemble = BaggingClassifier(
DecisionTreeClassifier(), n_jobs=3, random_state=0
).fit(X_train, y_train)
# predict_proba
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=1)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(
DecisionTreeClassifier(), n_jobs=1, random_state=0
).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(
SVC(decision_function_shape="ovr"), n_jobs=3, random_state=0
).fit(X_train, y_train)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=1)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(
SVC(decision_function_shape="ovr"), n_jobs=1, random_state=0
).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
|
def test_parallel_classification():
# Check parallel classification.
# Classification
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, random_state=0
)
ensemble = BaggingClassifier(
DecisionTreeClassifier(), n_jobs=3, random_state=0
).fit(X_train, y_train)
# predict_proba
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=1)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(
DecisionTreeClassifier(), n_jobs=1, random_state=0
).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(
SVC(decision_function_shape="ovr"), n_jobs=3, random_state=0
).fit(X_train, y_train)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=1)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(
SVC(decision_function_shape="ovr"), n_jobs=1, random_state=0
).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
|
160 |
def upgrade():
"""Add a new composed_by_bodhi column to releases."""
op.add_column(
'releases', sa.Column('composed_by_bodhi', sa.Boolean(), server_default='t', default=True)
)
|
def upgrade():
"""Add a new composed_by_bodhi column to releases."""
op.add_column(
'releases', sa.Column('composed_by_bodhi', sa.Boolean(), server_default=True, default=True)
)
|
51,426 |
def get_axis(figsize, size, aspect, ax, **kwargs):
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("matplotlib is required for plot.utils.get_axis")
if figsize is not None:
if ax is not None:
raise ValueError("cannot provide both `figsize` and " "`ax` arguments")
if size is not None:
raise ValueError("cannot provide both `figsize` and " "`size` arguments")
_, ax = plt.subplots(figsize=figsize)
elif size is not None:
if ax is not None:
raise ValueError("cannot provide both `size` and `ax` arguments")
if aspect is None:
width, height = mpl.rcParams["figure.figsize"]
aspect = width / height
figsize = (size * aspect, size)
_, ax = plt.subplots(figsize=figsize)
elif aspect is not None:
raise ValueError("cannot provide `aspect` argument without `size`")
if ax is None:
ax = plt.gca()
if "projection" in kwargs:
ax = plt.axes(projection=kwargs["projection"])
if "facecolor" in kwargs:
ax.set_facecolor(kwargs["facecolor"])
return ax
|
def get_axis(figsize, size, aspect, ax, **kwargs):
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("matplotlib is required for plot.utils.get_axis")
if figsize is not None:
if ax is not None:
raise ValueError("cannot provide both `figsize` and " "`ax` arguments")
if size is not None:
raise ValueError("cannot provide both `figsize` and " "`size` arguments")
_, ax = plt.subplots(figsize=figsize)
elif size is not None:
if ax is not None:
raise ValueError("cannot provide both `size` and `ax` arguments")
if aspect is None:
width, height = mpl.rcParams["figure.figsize"]
aspect = width / height
figsize = (size * aspect, size)
_, ax = plt.subplots(figsize=figsize)
elif aspect is not None:
raise ValueError("cannot provide `aspect` argument without `size`")
if "projection" in kwargs and ax is not None:
raise ...
elif "projection" in kwargs:
ax = plt.axes(projection=kwargs["projection"])
elif ax is None:
ax = plt.gca()
if "facecolor" in kwargs:
ax.set_facecolor(kwargs["facecolor"])
return ax
|
3,223 |
def get_filter(query=None, params=None):
"""
Returns an eventstore filter given the search text provided by the user and
URL params
"""
# NOTE: this function assumes project permissions check already happened
parsed_terms = []
if query is not None:
try:
parsed_terms = parse_search_query(query)
except ParseError as e:
raise InvalidSearchQuery(u"Parse error: %r (column %d)" % (e.expr.name, e.column()))
kwargs = {
"start": None,
"end": None,
"conditions": [],
"having": [],
"project_ids": [],
"group_ids": [],
}
def get_projects(params):
return {
p["slug"]: p["id"]
for p in Project.objects.filter(id__in=params.get("project_id", [])).values(
"id", "slug"
)
}
def to_list(value):
if isinstance(value, list):
return value
return [value]
projects = None
for term in parsed_terms:
if isinstance(term, SearchFilter):
name = term.key.name
if name == PROJECT_KEY:
if projects is None:
projects = get_projects(params)
condition = ["project_id", "=", projects.get(term.value.value)]
kwargs["conditions"].append(condition)
elif name == "issue.id" and term.value.value != "":
# A blank term value means that this is a has filter
kwargs["group_ids"].extend(to_list(term.value.value))
elif name == "issue" and term.value.value != "":
if params and "organization_id" in params:
try:
group = Group.objects.by_qualified_short_id(
params["organization_id"], term.value.value
)
kwargs["group_ids"].extend(to_list(group.id))
except Exception:
raise InvalidSearchQuery(
u"invalid value '{}' for 'issue:' filter".format(term.value.value)
)
elif name in FIELD_ALIASES:
converted_filter = convert_aggregate_filter_to_snuba_query(term, True)
if converted_filter:
kwargs["having"].append(converted_filter)
else:
converted_filter = convert_search_filter_to_snuba_query(term)
if converted_filter:
kwargs["conditions"].append(converted_filter)
elif isinstance(term, AggregateFilter):
converted_filter = convert_aggregate_filter_to_snuba_query(term, False)
if converted_filter:
kwargs["having"].append(converted_filter)
# Keys included as url params take precedent if same key is included in search
# They are also considered safe and to have had access rules applied unlike conditions
# from the query string.
if params:
for key in ("start", "end"):
kwargs[key] = params.get(key, None)
# OrganizationEndpoint.get_filter() uses project_id, but eventstore.Filter uses project_ids
if "project_id" in params:
kwargs["project_ids"] = params["project_id"]
if "environment" in params:
term = SearchFilter(SearchKey("environment"), "=", SearchValue(params["environment"]))
kwargs["conditions"].append(convert_search_filter_to_snuba_query(term))
if "group_ids" in params:
kwargs["group_ids"] = to_list(params["group_ids"])
# Deprecated alias, use `group_ids` instead
if "issue.id" in params:
kwargs["group_ids"] = to_list(params["issue.id"])
if "issue" in params:
if "organization_id" in params:
try:
group = Group.objects.by_qualified_short_id(
params["organization_id"], params["issue"]
)
kwargs["group_ids"] = to_list(group.id)
except Exception:
raise InvalidSearchQuery(
u"invalid value '{}' for 'issue' query param".format(term.value.value)
)
return eventstore.Filter(**kwargs)
|
def get_filter(query=None, params=None):
"""
Returns an eventstore filter given the search text provided by the user and
URL params
"""
# NOTE: this function assumes project permissions check already happened
parsed_terms = []
if query is not None:
try:
parsed_terms = parse_search_query(query)
except ParseError as e:
raise InvalidSearchQuery(u"Parse error: %r (column %d)" % (e.expr.name, e.column()))
kwargs = {
"start": None,
"end": None,
"conditions": [],
"having": [],
"project_ids": [],
"group_ids": [],
}
def get_projects(params):
return {
p["slug"]: p["id"]
for p in Project.objects.filter(id__in=params.get("project_id", [])).values(
"id", "slug"
)
}
def to_list(value):
if isinstance(value, list):
return value
return [value]
projects = None
for term in parsed_terms:
if isinstance(term, SearchFilter):
name = term.key.name
if name == PROJECT_KEY:
if projects is None:
projects = get_projects(params)
condition = ["project_id", "=", projects.get(term.value.value)]
kwargs["conditions"].append(condition)
elif name == "issue.id" and term.value.value != "":
# A blank term value means that this is a has filter
kwargs["group_ids"].extend(to_list(term.value.value))
elif name == "issue" and term.value.value != "":
if params and "organization_id" in params:
try:
group = Group.objects.by_qualified_short_id(
params["organization_id"], term.value.value
)
kwargs["group_ids"].extend(to_list(group.id))
except Exception:
raise InvalidSearchQuery(
u"Invalid value '{}' for 'issue:' filter".format(term.value.value)
)
elif name in FIELD_ALIASES:
converted_filter = convert_aggregate_filter_to_snuba_query(term, True)
if converted_filter:
kwargs["having"].append(converted_filter)
else:
converted_filter = convert_search_filter_to_snuba_query(term)
if converted_filter:
kwargs["conditions"].append(converted_filter)
elif isinstance(term, AggregateFilter):
converted_filter = convert_aggregate_filter_to_snuba_query(term, False)
if converted_filter:
kwargs["having"].append(converted_filter)
# Keys included as url params take precedent if same key is included in search
# They are also considered safe and to have had access rules applied unlike conditions
# from the query string.
if params:
for key in ("start", "end"):
kwargs[key] = params.get(key, None)
# OrganizationEndpoint.get_filter() uses project_id, but eventstore.Filter uses project_ids
if "project_id" in params:
kwargs["project_ids"] = params["project_id"]
if "environment" in params:
term = SearchFilter(SearchKey("environment"), "=", SearchValue(params["environment"]))
kwargs["conditions"].append(convert_search_filter_to_snuba_query(term))
if "group_ids" in params:
kwargs["group_ids"] = to_list(params["group_ids"])
# Deprecated alias, use `group_ids` instead
if "issue.id" in params:
kwargs["group_ids"] = to_list(params["issue.id"])
if "issue" in params:
if "organization_id" in params:
try:
group = Group.objects.by_qualified_short_id(
params["organization_id"], params["issue"]
)
kwargs["group_ids"] = to_list(group.id)
except Exception:
raise InvalidSearchQuery(
u"invalid value '{}' for 'issue' query param".format(term.value.value)
)
return eventstore.Filter(**kwargs)
|
14,271 |
def matrix_muliplayer(benchmark, sim):
toplevel_lang = "verilog"
extra_args = []
if sim == "ghdl":
extra_args = ["--std=08"]
toplevel_lang = "vhdl"
verilog_sources = []
vhdl_sources = []
proj_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"examples",
"matrix_multiplier",
)
if toplevel_lang == "verilog":
verilog_sources = [os.path.join(proj_dir, "hdl", "matrix_multiplier.sv")]
else:
vhdl_sources = [
os.path.join(proj_dir, "hdl", "matrix_multiplier_pkg.vhd"),
os.path.join(proj_dir, "hdl", "matrix_multiplier.vhd"),
]
runner = get_runner(sim)()
runner.build(
toplevel="matrix_multiplier",
verilog_sources=verilog_sources,
vhdl_sources=vhdl_sources,
extra_args=extra_args,
)
@benchmark
def run_test():
runner.test(
toplevel="matrix_multiplier",
toplevel_lang=toplevel_lang,
py_module="test_matrix_multiplier",
extra_args=extra_args,
python_search=[os.path.join(proj_dir, "tests")],
)
|
def matrix_multiplier(benchmark, sim):
toplevel_lang = "verilog"
extra_args = []
if sim == "ghdl":
extra_args = ["--std=08"]
toplevel_lang = "vhdl"
verilog_sources = []
vhdl_sources = []
proj_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"examples",
"matrix_multiplier",
)
if toplevel_lang == "verilog":
verilog_sources = [os.path.join(proj_dir, "hdl", "matrix_multiplier.sv")]
else:
vhdl_sources = [
os.path.join(proj_dir, "hdl", "matrix_multiplier_pkg.vhd"),
os.path.join(proj_dir, "hdl", "matrix_multiplier.vhd"),
]
runner = get_runner(sim)()
runner.build(
toplevel="matrix_multiplier",
verilog_sources=verilog_sources,
vhdl_sources=vhdl_sources,
extra_args=extra_args,
)
@benchmark
def run_test():
runner.test(
toplevel="matrix_multiplier",
toplevel_lang=toplevel_lang,
py_module="test_matrix_multiplier",
extra_args=extra_args,
python_search=[os.path.join(proj_dir, "tests")],
)
|
6,792 |
def get_doctypes_for_global_search():
doctypes = frappe.get_list("Global Search DocType", fields=["document_type"], order_by="idx ASC")
if not doctypes:
return []
priorities = [d.document_type for d in doctypes]
allowed_doctypes = ",".join(["'{0}'".format(dt) for dt in priorities])
frappe.cache().hset("global_search", "search_priorities", priorities)
frappe.cache().hset("global_search", "allowed_doctypes", allowed_doctypes)
return priorities, allowed_doctypes
|
def set_global_search_doctypes_cache():
doctypes = frappe.get_list("Global Search DocType", fields=["document_type"], order_by="idx ASC")
if not doctypes:
return []
priorities = [d.document_type for d in doctypes]
allowed_doctypes = ",".join(["'{0}'".format(dt) for dt in priorities])
frappe.cache().hset("global_search", "search_priorities", priorities)
frappe.cache().hset("global_search", "allowed_doctypes", allowed_doctypes)
return priorities, allowed_doctypes
|
57,823 |
def main():
global USER
global TOKEN
global PRIVATE_KEY
global INTEGRATION_ID
global INSTALLATION_ID
global REPOSITORY
global USE_SSL
global FETCH_TIME
global USER_SUFFIX
global ISSUE_SUFFIX
global RELEASE_SUFFIX
global PULLS_SUFFIX
global HEADERS
USER = demisto.params().get('user')
TOKEN = demisto.params().get('token', '')
creds: dict = demisto.params().get('credentials', {})
PRIVATE_KEY = creds.get('sshkey', '') if creds else ''
INTEGRATION_ID = demisto.params().get('integration_id')
INSTALLATION_ID = demisto.params().get('installation_id')
REPOSITORY = demisto.params().get('repository')
USE_SSL = not demisto.params().get('insecure', False)
FETCH_TIME = demisto.params().get('fetch_time', '3')
USER_SUFFIX = '/repos/{}/{}'.format(USER, REPOSITORY)
ISSUE_SUFFIX = USER_SUFFIX + '/issues'
RELEASE_SUFFIX = USER_SUFFIX + '/releases'
PULLS_SUFFIX = USER_SUFFIX + '/pulls'
if TOKEN == '' and PRIVATE_KEY != '':
try:
import jwt # noqa
except Exception:
return_error("You need to update the docket image so that the jwt package could be used")
generated_jwt_token = create_jwt(PRIVATE_KEY, INTEGRATION_ID)
TOKEN = get_installation_access_token(INSTALLATION_ID, generated_jwt_token)
if TOKEN == '' and PRIVATE_KEY == '':
return_error("Insert api token or private key")
HEADERS = {
'Authorization': "Bearer " + TOKEN
}
handle_proxy()
cmd = demisto.command()
LOG(f'command is {cmd}')
try:
if cmd in COMMANDS.keys():
COMMANDS[cmd]()
except Exception as e:
return_error(str(e))
|
def main():
global USER
global TOKEN
global PRIVATE_KEY
global INTEGRATION_ID
global INSTALLATION_ID
global REPOSITORY
global USE_SSL
global FETCH_TIME
global USER_SUFFIX
global ISSUE_SUFFIX
global RELEASE_SUFFIX
global PULLS_SUFFIX
global HEADERS
params = demisto.params()
USER = params.get('user')
TOKEN = params.get('token', '')
creds: dict = params.get('credentials', {})
PRIVATE_KEY = creds.get('sshkey', '') if creds else ''
INTEGRATION_ID = params.get('integration_id')
INSTALLATION_ID = params.get('installation_id')
REPOSITORY = params.get('repository')
USE_SSL = not params.get('insecure', False)
FETCH_TIME = params.get('fetch_time', '3')
USER_SUFFIX = '/repos/{}/{}'.format(USER, REPOSITORY)
ISSUE_SUFFIX = USER_SUFFIX + '/issues'
RELEASE_SUFFIX = USER_SUFFIX + '/releases'
PULLS_SUFFIX = USER_SUFFIX + '/pulls'
if TOKEN == '' and PRIVATE_KEY != '':
try:
import jwt # noqa
except Exception:
return_error("You need to update the docket image so that the jwt package could be used")
generated_jwt_token = create_jwt(PRIVATE_KEY, INTEGRATION_ID)
TOKEN = get_installation_access_token(INSTALLATION_ID, generated_jwt_token)
if TOKEN == '' and PRIVATE_KEY == '':
return_error("Insert api token or private key")
HEADERS = {
'Authorization': "Bearer " + TOKEN
}
handle_proxy()
cmd = demisto.command()
LOG(f'command is {cmd}')
try:
if cmd in COMMANDS.keys():
COMMANDS[cmd]()
except Exception as e:
return_error(str(e))
|
1,248 |
def proc_file(f, opts):
verbose(1, f"Loading {f}")
row = [f"@l{f}"]
try:
vol = nib.load(f)
h = vol.header
except Exception as e:
row += ['failed']
verbose(2, f"Failed to gather information -- {str(e)}")
return row
row += [str(safe_get(h, 'data_dtype')),
f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]",
f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"]
# Slope
if hasattr(h, 'has_data_slope') and \
(h.has_data_slope or h.has_data_intercept) and \
not h.get_slope_inter() in [(1.0, 0.0), (None, None)]:
row += ['@l*%.3g+%.3g' % h.get_slope_inter()]
else:
row += ['']
if hasattr(h, 'extensions') and len(h.extensions):
row += ['@l#exts: %d' % len(h.extensions)]
else:
row += ['']
if opts.header_fields:
# signals "all fields"
if opts.header_fields == 'all':
# TODO: might vary across file types, thus prior sensing
# would be needed
header_fields = h.keys()
else:
header_fields = opts.header_fields.split(',')
for f in header_fields:
if not f: # skip empty
continue
try:
row += [str(h[f])]
except (KeyError, ValueError):
row += [_err()]
try:
if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and
(h.get_qform() != h.get_sform()).any()):
row += ['sform']
else:
row += ['']
except Exception as e:
verbose(2, f"Failed to obtain qform or sform -- {str(e)}")
if isinstance(h, nib.AnalyzeHeader):
row += ['']
else:
row += [_err()]
if opts.stats or opts.counts:
# We are doomed to load data
try:
d = np.asarray(vol.dataobj)
if not opts.stats_zeros:
d = d[np.nonzero(d)]
else:
# at least flatten it -- functionality below doesn't
# depend on the original shape, so let's use a flat view
d = d.reshape(-1)
if opts.stats:
# just # of elements
row += ["@l[%d]" % np.prod(d.shape)]
# stats
row += [len(d) and f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' or '-']
if opts.counts:
items, inv = np.unique(d, return_inverse=True)
if len(items) > 1000 and not opts.all_counts:
counts = _err("%d uniques. Use --all-counts" % len(items))
else:
freq = np.bincount(inv)
counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq))
row += ["@l" + counts]
except IOError as e:
verbose(2, f"Failed to obtain stats/counts -- {str(e)}")
row += [_err()]
return row
|
def proc_file(f, opts):
verbose(1, f"Loading {f}")
row = [f"@l{f}"]
try:
vol = nib.load(f)
h = vol.header
except Exception as e:
row += ['failed']
verbose(2, f"Failed to gather information -- {str(e)}")
return row
row += [str(safe_get(h, 'data_dtype')),
f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]",
f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"]
# Slope
if hasattr(h, 'has_data_slope') and \
(h.has_data_slope or h.has_data_intercept) and \
not h.get_slope_inter() in [(1.0, 0.0), (None, None)]:
row += ['@l*%.3g+%.3g' % h.get_slope_inter()]
else:
row += ['']
if hasattr(h, 'extensions') and len(h.extensions):
row += ['@l#exts: %d' % len(h.extensions)]
else:
row += ['']
if opts.header_fields:
# signals "all fields"
if opts.header_fields == 'all':
# TODO: might vary across file types, thus prior sensing
# would be needed
header_fields = h.keys()
else:
header_fields = opts.header_fields.split(',')
for f in header_fields:
if not f: # skip empty
continue
try:
row += [str(h[f])]
except (KeyError, ValueError):
row += [_err()]
try:
if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and
(h.get_qform() != h.get_sform()).any()):
row += ['sform']
else:
row += ['']
except Exception as e:
verbose(2, f"Failed to obtain qform or sform -- {str(e)}")
if isinstance(h, nib.AnalyzeHeader):
row += ['']
else:
row += [_err()]
if opts.stats or opts.counts:
# We are doomed to load data
try:
d = np.asarray(vol.dataobj)
if not opts.stats_zeros:
d = d[np.nonzero(d)]
else:
# at least flatten it -- functionality below doesn't
# depend on the original shape, so let's use a flat view
d = d.reshape(-1)
if opts.stats:
# just # of elements
row += ["@l[%d]" % np.prod(d.shape)]
# stats
row += [f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' if len(d) else '-']
if opts.counts:
items, inv = np.unique(d, return_inverse=True)
if len(items) > 1000 and not opts.all_counts:
counts = _err("%d uniques. Use --all-counts" % len(items))
else:
freq = np.bincount(inv)
counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq))
row += ["@l" + counts]
except IOError as e:
verbose(2, f"Failed to obtain stats/counts -- {str(e)}")
row += [_err()]
return row
|
40,251 |
def meshgrid(x, y, indexing='xy'):
"""Construct coordinate matrices from two coordinate vectors.
Parameters
----------
x : list[float]
The values of the "x axis" of the grid.
y : list[float]
The values of the "y axis" of the grid.
indexing : {'xy', 'ij'}, optional
The indexing strategy determines the structure of the output.
Returns
-------
list[list[float]
The X values of the coordinate grid.
list[list[float]
The Y values of the coordinate grid.
Notes
-----
The output of this function consists of two "matrices", `X` and `Y`.
The structure of the matrices is determined by the choice of `indexing`.
Assuming ``m = len(x)`` and ``n = len(y)``.
If `indexing` is ``'xy'``,
the shape of both matrices is ``(n, m)``,
with `X` containing the elements of `x` in its rows, and `Y` the elements of `y` in its columns.
If `indexing` is ``'ij'``,
the shape of both matrices is ``(m, n)``,
with `X` containing the elements of `x` in its columns, and `Y` the elements of `y` in its rows.
References
----------
This function mimicks the functionality of ``numpy.meshgrid`` [1]_, but in a simpler form.
.. [1] `numpy.meshgrid`.
Available at https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html
Examples
--------
>>> from compas.utilities import linspace, meshgrid
>>> x = list(linspace(0, 1, 3))
>>> y = list(linspace(0, 1, 2))
>>> X, Y = meshgrid(x, y)
>>> X
[[0.0, 0.5, 1.0], [0.0, 0.5, 1.0]]
>>> Y
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
>>> X, Y = meshgrid(x, y, 'ij')
>>> X
[[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]]
>>> Y
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]]
"""
x = list(x)
y = list(y)
if indexing == 'xy':
X = [[x[j] for j in range(len(x))] for i in range(len(y))]
Y = [[y[i] for j in range(len(x))] for i in range(len(y))]
else:
X = [[x[i] for j in range(len(y))] for i in range(len(x))]
Y = [[y[j] for j in range(len(y))] for i in range(len(x))]
return X, Y
|
def meshgrid(x, y, indexing='xy'):
"""Construct coordinate matrices from two coordinate vectors.
Parameters
----------
x : list[float]
The values of the "x axis" of the grid.
y : list[float]
The values of the "y axis" of the grid.
indexing : {'xy', 'ij'}, optional
The indexing strategy determines the structure of the output.
Returns
-------
list[list[float]]
The X values of the coordinate grid.
list[list[float]
The Y values of the coordinate grid.
Notes
-----
The output of this function consists of two "matrices", `X` and `Y`.
The structure of the matrices is determined by the choice of `indexing`.
Assuming ``m = len(x)`` and ``n = len(y)``.
If `indexing` is ``'xy'``,
the shape of both matrices is ``(n, m)``,
with `X` containing the elements of `x` in its rows, and `Y` the elements of `y` in its columns.
If `indexing` is ``'ij'``,
the shape of both matrices is ``(m, n)``,
with `X` containing the elements of `x` in its columns, and `Y` the elements of `y` in its rows.
References
----------
This function mimicks the functionality of ``numpy.meshgrid`` [1]_, but in a simpler form.
.. [1] `numpy.meshgrid`.
Available at https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html
Examples
--------
>>> from compas.utilities import linspace, meshgrid
>>> x = list(linspace(0, 1, 3))
>>> y = list(linspace(0, 1, 2))
>>> X, Y = meshgrid(x, y)
>>> X
[[0.0, 0.5, 1.0], [0.0, 0.5, 1.0]]
>>> Y
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
>>> X, Y = meshgrid(x, y, 'ij')
>>> X
[[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]]
>>> Y
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]]
"""
x = list(x)
y = list(y)
if indexing == 'xy':
X = [[x[j] for j in range(len(x))] for i in range(len(y))]
Y = [[y[i] for j in range(len(x))] for i in range(len(y))]
else:
X = [[x[i] for j in range(len(y))] for i in range(len(x))]
Y = [[y[j] for j in range(len(y))] for i in range(len(x))]
return X, Y
|
30,770 |
def main():
# get the service API url
base_url = demisto.params().get('url')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
credentials = demisto.params().get('credentials')
username = credentials['identifier'] if credentials else ''
password = credentials['password'] if credentials else ''
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy)
command = demisto.command()
args = demisto.args()
commands = {'test-module': test_module,
'imperva-waf-ip-group-list': ip_group_list_command,
'imperva-waf-ip-group-list-entries': ip_group_list_entries_command,
'imperva-waf-ip-group-remove-entries': ip_group_remove_entries_command,
'imperva-waf-sites-list': sites_list_command,
'imperva-waf-server-group-list': server_groups_list_command,
'imperva-waf-server-group-list-policies': server_group_policies_list_command,
'imperva-waf-web-service-custom-policy-list': custom_policy_list_command,
'imperva-waf-web-service-custom-policy-get': get_custom_policy_command,
'imperva-waf-ip-group-create': create_ip_group_command,
'imperva-waf-ip-group-update-entries': update_ip_group_command,
'imperva-waf-ip-group-delete': delete_ip_group_command,
'imperva-waf-web-service-custom-policy-create': create_custom_policy_command,
'imperva-waf-web-service-custom-policy-update': update_custom_policy_command,
'imperva-waf-web-service-custom-policy-delete': delete_custom_policy_command
}
if command in commands:
return commands[command](client, args)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
# Log exceptions
except Exception as e:
LOG(e)
LOG.print_log()
return_error(f'Unexpected error: {str(e)}', error=traceback.format_exc())
|
def main():
# get the service API url
base_url = demisto.params().get('url')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
credentials = demisto.params().get('credentials')
username = credentials['identifier'] if credentials else ''
password = credentials['password'] if credentials else ''
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy)
command = demisto.command()
args = demisto.args()
commands = {'test-module': test_module,
'imperva-waf-ip-group-list': ip_group_list_command,
'imperva-waf-ip-group-list-entries': ip_group_list_entries_command,
'imperva-waf-ip-group-remove-entries': ip_group_remove_entries_command,
'imperva-waf-sites-list': sites_list_command,
'imperva-waf-server-group-list': server_groups_list_command,
'imperva-waf-server-group-list-policies': server_group_policies_list_command,
'imperva-waf-web-service-custom-policy-list': custom_policy_list_command,
'imperva-waf-web-service-custom-policy-get': get_custom_policy_command,
'imperva-waf-ip-group-create': create_ip_group_command,
'imperva-waf-ip-group-update-entries': update_ip_group_command,
'imperva-waf-ip-group-delete': delete_ip_group_command,
'imperva-waf-web-service-custom-policy-create': create_custom_policy_command,
'imperva-waf-web-service-custom-policy-update': update_custom_policy_command,
'imperva-waf-web-service-custom-policy-delete': delete_custom_policy_command,
}
if command in commands:
return commands[command](client, args)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
# Log exceptions
except Exception as e:
LOG(e)
LOG.print_log()
return_error(f'Unexpected error: {str(e)}', error=traceback.format_exc())
|
41,894 |
def get_all_study_summaries(storage: Union[str, storages.BaseStorage]) -> List[StudySummary]:
"""Get all history of studies stored in a specified storage.
Example:
.. testsetup::
import os
if os.path.exists("example.db"):
raise RuntimeError("'example.db' already exists. Please remove it.")
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -10, 10)
return (x - 2) ** 2
study = optuna.create_study(study_name="example-study", storage="sqlite:///example.db")
study.optimize(objective, n_trials=3)
study_sammary = optuna.study.get_all_study_summaries(storage="sqlite:///example.db")[0]
.. testcleanup::
os.remove("example.db")
Args:
storage:
Database URL such as ``sqlite:///example.db``. Please see also the documentation of
:func:`~optuna.study.create_study` for further details.
Returns:
List of study history summarized as :class:`~optuna.study.StudySummary` objects.
See also:
:func:`optuna.get_all_study_summaries` is an alias of
:func:`optuna.study.get_all_study_summaries`.
"""
storage = storages.get_storage(storage)
return storage.get_all_study_summaries()
|
def get_all_study_summaries(storage: Union[str, storages.BaseStorage]) -> List[StudySummary]:
"""Get all history of studies stored in a specified storage.
Example:
.. testsetup::
import os
if os.path.exists("example.db"):
raise RuntimeError("'example.db' already exists. Please remove it.")
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -10, 10)
return (x - 2) ** 2
study = optuna.create_study(study_name="example-study", storage="sqlite:///example.db")
study.optimize(objective, n_trials=3)
study_summary = optuna.study.get_all_study_summaries(storage="sqlite:///example.db")[0]
.. testcleanup::
os.remove("example.db")
Args:
storage:
Database URL such as ``sqlite:///example.db``. Please see also the documentation of
:func:`~optuna.study.create_study` for further details.
Returns:
List of study history summarized as :class:`~optuna.study.StudySummary` objects.
See also:
:func:`optuna.get_all_study_summaries` is an alias of
:func:`optuna.study.get_all_study_summaries`.
"""
storage = storages.get_storage(storage)
return storage.get_all_study_summaries()
|
33,663 |
def Replay(*,
local_buffer: LocalReplayBuffer = None,
actors: List["ActorHandle"] = None,
async_queue_depth=4):
"""Replay experiences from the given buffer or actors.
This should be combined with the StoreToReplayActors operation using the
Concurrently() operator.
Arguments:
local_buffer (LocalReplayBuffer): Local buffer to use. Only one of this
and replay_actors can be specified.
actors (list): List of replay actors. Only one of this and
local_buffer can be specified.
async_queue_depth (int): In async mode, the max number of async
requests in flight per actor.
Examples:
>>> actors = [ReplayActor.remote() for _ in range(4)]
>>> replay_op = Replay(actors=actors)
>>> next(replay_op)
SampleBatch(...)
"""
if bool(local_buffer) == bool(actors):
raise ValueError(
"Exactly one of local_buffer and replay_actors must be given.")
if actors:
replay = from_actors(actors)
return replay.gather_async(async_queue_depth=async_queue_depth).filter(
lambda x: x is not None)
def gen_replay(_):
while True:
item = local_buffer.replay()
if item is None:
yield _NextValueNotReady()
else:
yield item
return LocalIterator(gen_replay, SharedMetrics())
|
def Replay(*,
local_buffer: LocalReplayBuffer = None,
actors: List["ActorHandle"] = None,
async_queue_depth=4):
"""Replay experiences from the given buffer or actors.
This should be combined with the StoreToReplayBuffer operation using the
Concurrently() operator.
Arguments:
local_buffer (LocalReplayBuffer): Local buffer to use. Only one of this
and replay_actors can be specified.
actors (list): List of replay actors. Only one of this and
local_buffer can be specified.
async_queue_depth (int): In async mode, the max number of async
requests in flight per actor.
Examples:
>>> actors = [ReplayActor.remote() for _ in range(4)]
>>> replay_op = Replay(actors=actors)
>>> next(replay_op)
SampleBatch(...)
"""
if bool(local_buffer) == bool(actors):
raise ValueError(
"Exactly one of local_buffer and replay_actors must be given.")
if actors:
replay = from_actors(actors)
return replay.gather_async(async_queue_depth=async_queue_depth).filter(
lambda x: x is not None)
def gen_replay(_):
while True:
item = local_buffer.replay()
if item is None:
yield _NextValueNotReady()
else:
yield item
return LocalIterator(gen_replay, SharedMetrics())
|
30,823 |
def enable_disable_user_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
parsed_scim = map_scim(scim)
user_id = parsed_scim.get('id')
username = parsed_scim.get('userName')
email = parsed_scim.get('email')
if not (user_id or username or email):
raise Exception('You must provide either the id,, email or username of the user')
if user_id:
user_term = user_id
else:
user_term = email if email else username
if demisto.command() == 'enable-user':
format_pre_text = 'Enable'
active = True
res = client.activate_user(user_term)
elif demisto.command() == 'disable-user':
format_pre_text = 'Disable'
active = False
res = client.deactivate_user(user_term)
if res.status_code == 204:
generic_iam_context = OutputContext(success=True, iden=user_id, username=username, active=active)
elif res.status_code == 404:
res_json = res.json()
generic_iam_context = OutputContext(success=False, iden=user_id, username=username, errorCode=404,
errorMessage=res_json.get('message'), details=res_json)
else:
res_json = res.json()
generic_iam_context = OutputContext(success=False, iden=user_id, username=username,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown(f'{format_pre_text} Zoom User:', generic_iam_context.data)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
def enable_disable_user_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
parsed_scim = map_scim(scim)
user_id = parsed_scim.get('id')
username = parsed_scim.get('userName')
email = parsed_scim.get('email')
if not (user_id or username or email):
raise Exception('You must provide either the id, email or username of the user')
if user_id:
user_term = user_id
else:
user_term = email if email else username
if demisto.command() == 'enable-user':
format_pre_text = 'Enable'
active = True
res = client.activate_user(user_term)
elif demisto.command() == 'disable-user':
format_pre_text = 'Disable'
active = False
res = client.deactivate_user(user_term)
if res.status_code == 204:
generic_iam_context = OutputContext(success=True, iden=user_id, username=username, active=active)
elif res.status_code == 404:
res_json = res.json()
generic_iam_context = OutputContext(success=False, iden=user_id, username=username, errorCode=404,
errorMessage=res_json.get('message'), details=res_json)
else:
res_json = res.json()
generic_iam_context = OutputContext(success=False, iden=user_id, username=username,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown(f'{format_pre_text} Zoom User:', generic_iam_context.data)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
5,679 |
def _er_matrix(n, p):
x = np.triu(np.random.rand(n, n))
m = x + x.T
m[range(n), range(n)] = 0
return (m < p).astype(int)
|
def _er_matrix(n, p):
x = np.triu(np.random.rand(n, n))
m = x + x.T
np.fill_diagonal(m, 0)
return (m < p).astype(int)
|
29,917 |
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(
stderr_handler,
args.quiet,
runtimeContext.debug,
args.enable_color,
args.timestamps,
)
if args.version:
print(versionfunc(), file=stdout)
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)), file=stdout)
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help(stderr)
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream, prov_log_handler = setup_provenance(
args, argsl, runtimeContext
)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
print(print_pack(loadingContext, uri), file=stdout)
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
print(
json_dumps(
processobj,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
try:
tool = make_tool(uri, loadingContext)
except GraphTargetMissingException as main_missing_exc:
if args.validate:
logging.warn(
"File contains $graph of multiple objects and no default "
"process (#main). Validating all objects:"
)
for entry in workflowobj["$graph"]:
entry_id = entry["id"]
make_tool(entry_id, loadingContext)
print(f"{entry_id} is valid CWL.", file=stdout)
else:
raise main_missing_exc
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
if args.print_rdf or args.provenance:
output = stdout
if args.provenance:
# No output variable set... and maybe no args.rdf_serializer?
workflow_provenance = args.provenance + "/workflow.ttl"
# Sets up a turtle file for the workflow information (not yet in the provenance folder as it does
# not exist and creating it will give issues).
output = open("workflow.ttl", "w")
print("Writing workflow rdf to " + workflow_provenance)
print(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer),
file=output,
)
# close the output
if args.provenance:
output.close()
# Only print_rdf exits this way
if args.print_rdf:
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
print_targets(tool, stdout, loadingContext)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
print(
json_dumps(
tool.tool,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.log_dir = args.log_dir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {} # type: CWLObjectType
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
runtime_context=runtimeContext,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
) # str
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File",), MutationManager().unset_generation)
print(
json_dumps(out, indent=4, ensure_ascii=False, default=str),
file=stdout,
)
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
# Copy workflow.ttl to args.provenance
if os.path.isfile("workflow.ttl"):
shutil.copy("workflow.ttl", args.provenance + "/workflow/workflow.ttl")
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
|
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(
stderr_handler,
args.quiet,
runtimeContext.debug,
args.enable_color,
args.timestamps,
)
if args.version:
print(versionfunc(), file=stdout)
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)), file=stdout)
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help(stderr)
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream, prov_log_handler = setup_provenance(
args, argsl, runtimeContext
)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
print(print_pack(loadingContext, uri), file=stdout)
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
print(
json_dumps(
processobj,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
try:
tool = make_tool(uri, loadingContext)
except GraphTargetMissingException as main_missing_exc:
if args.validate:
logging.warn(
"File contains $graph of multiple objects and no default "
"process (#main). Validating all objects:"
)
for entry in workflowobj["$graph"]:
entry_id = entry["id"]
make_tool(entry_id, loadingContext)
print(f"{entry_id} is valid CWL.", file=stdout)
else:
raise main_missing_exc
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
if args.print_rdf or args.provenance:
output = stdout
if args.provenance:
# No output variable set... and maybe no args.rdf_serializer?
workflow_provenance = args.provenance + "/workflow.ttl"
# Sets up a turtle file for the workflow information (not yet in the provenance folder as it does
# not exist and creating it will give issues).
output = open("workflow.ttl", "w")
_logger.info("Writing workflow rdf to %s", workflow_provenance)
print(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer),
file=output,
)
# close the output
if args.provenance:
output.close()
# Only print_rdf exits this way
if args.print_rdf:
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
print_targets(tool, stdout, loadingContext)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
print(
json_dumps(
tool.tool,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.log_dir = args.log_dir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {} # type: CWLObjectType
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
runtime_context=runtimeContext,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
) # str
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File",), MutationManager().unset_generation)
print(
json_dumps(out, indent=4, ensure_ascii=False, default=str),
file=stdout,
)
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
# Copy workflow.ttl to args.provenance
if os.path.isfile("workflow.ttl"):
shutil.copy("workflow.ttl", args.provenance + "/workflow/workflow.ttl")
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
|
51,281 |
def _compile_hourly_statistics_summary_sum_legacy_stmt(
start_time: datetime, end_time: datetime
) -> StatementLambdaElement:
"""Generate the legacy sum statement for hourly statistics."""
stmt = lambda_stmt(lambda: select(*QUERY_STATISTICS_SUMMARY_SUM_LEGACY))
stmt += (
lambda q: q.filter(StatisticsShortTerm.start >= start_time)
.filter(StatisticsShortTerm.start < end_time)
.order_by(StatisticsShortTerm.metadata_id, StatisticsShortTerm.start.desc())
)
return stmt
|
def _compile_hourly_statistics_summary_sum_legacy_stmt(
start_time: datetime, end_time: datetime
) -> StatementLambdaElement:
"""Generate the legacy sum statement for hourly statistics.
This is used for databases not supporting row number.
"""
stmt = lambda_stmt(lambda: select(*QUERY_STATISTICS_SUMMARY_SUM_LEGACY))
stmt += (
lambda q: q.filter(StatisticsShortTerm.start >= start_time)
.filter(StatisticsShortTerm.start < end_time)
.order_by(StatisticsShortTerm.metadata_id, StatisticsShortTerm.start.desc())
)
return stmt
|
44,078 |
def graph_to_tape(graph: MultiDiGraph) -> QuantumTape:
"""
Converts a directed multigraph to the corresponding quantum tape.
Args:
graph (MultiDiGraph): directed multigraph containing measure to be
converted to a tape
Returns:
tape (QuantumTape): the quantum tape corresponding to the input
**Example**
Consider the following ... :
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the subgraphs and corresponding tapes by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> subgraphs, communication_graph = qcut.fragment_graph(graph)
>>> tapes = [qcut.graph_to_tape(sg) for sg in subgraphs]
>>> tapes
[<QuantumTape: wires=[0], params=1>, <QuantumTape: wires=[0, 1], params=1>,
<QuantumTape: wires=[1], params=1>, <QuantumTape: wires=[0], params=0>,
<QuantumTape: wires=[1], params=0>]
"""
wires = Wires.all_wires([n.wires for n in graph.nodes])
ordered_ops = sorted(
[(order, op) for op, order in graph.nodes(data="order")], key=lambda x: x[0]
)
wire_map = {w: w for w in wires}
with QuantumTape() as tape:
for _, op in ordered_ops:
new_wires = [wire_map[w] for w in op.wires]
op._wires = Wires(new_wires) # TODO: find a better way to update operation wires
apply(op)
if isinstance(op, MeasureNode):
measured_wire = op.wires[0]
new_wire = _find_new_wire(wires)
wires += new_wire
wire_map[measured_wire] = new_wire
return tape
|
def graph_to_tape(graph: MultiDiGraph) -> QuantumTape:
"""
Converts a directed multigraph to the corresponding quantum tape.
Args:
graph (MultiDiGraph): directed multigraph containing measure to be
converted to a tape
Returns:
QuantumTape: the quantum tape corresponding to the input graph
**Example**
Consider the following ... :
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the subgraphs and corresponding tapes by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> subgraphs, communication_graph = qcut.fragment_graph(graph)
>>> tapes = [qcut.graph_to_tape(sg) for sg in subgraphs]
>>> tapes
[<QuantumTape: wires=[0], params=1>, <QuantumTape: wires=[0, 1], params=1>,
<QuantumTape: wires=[1], params=1>, <QuantumTape: wires=[0], params=0>,
<QuantumTape: wires=[1], params=0>]
"""
wires = Wires.all_wires([n.wires for n in graph.nodes])
ordered_ops = sorted(
[(order, op) for op, order in graph.nodes(data="order")], key=lambda x: x[0]
)
wire_map = {w: w for w in wires}
with QuantumTape() as tape:
for _, op in ordered_ops:
new_wires = [wire_map[w] for w in op.wires]
op._wires = Wires(new_wires) # TODO: find a better way to update operation wires
apply(op)
if isinstance(op, MeasureNode):
measured_wire = op.wires[0]
new_wire = _find_new_wire(wires)
wires += new_wire
wire_map[measured_wire] = new_wire
return tape
|
14,630 |
def _setup_config_parser(config_path, validate=True):
"""
Returns a config parser at a given path. Only implemented as a separate
function to simplify testing.
Parameters
----------
config_path : str
The path to the configuration file.
validate : bool, optional, default=True
Whether to validate the configuration file.
Returns
-------
config : SKLLConfigParser
A SKLL configuration object.
Raises
------
IOError
If the configuration file does not exist.
"""
# initialize config parser with the given defaults
config = SKLLConfigParser()
# Read file if it exists
if not exists(config_path):
raise IOError(errno.ENOENT, "Configuration file does not exist",
config_path)
config.read(config_path)
if validate:
config.validate()
return config
|
def _setup_config_parser(config_path, validate=True):
"""
Returns a config parser at a given path. Only implemented as a separate
function to simplify testing.
Parameters
----------
config_path : str
The path to the configuration file.
validate : bool, default=True
Whether to validate the configuration file.
Returns
-------
config : SKLLConfigParser
A SKLL configuration object.
Raises
------
IOError
If the configuration file does not exist.
"""
# initialize config parser with the given defaults
config = SKLLConfigParser()
# Read file if it exists
if not exists(config_path):
raise IOError(errno.ENOENT, "Configuration file does not exist",
config_path)
config.read(config_path)
if validate:
config.validate()
return config
|
31,847 |
def main():
"""
Initiate integration command
"""
command = demisto.command()
LOG(f'Command being called is {command}')
params = demisto.params()
# init credentials
user_name = params.get('credentials', {}).get('identifier')
api_key = params.get('credentials', {}).get('password')
server_url = params.get('url', '').strip('/')
reliability = params.get('integrationReliability', DBotScoreReliability.B)
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception("Please provide a valid value for the Source Reliability parameter.")
commands = {
# reputation commands
'ip': ips_reputation_command,
'domain': domains_reputation_command,
'file': files_reputation_command,
'url': urls_reputation_command,
'threatstream-email-reputation': get_email_reputation,
'threatstream-import-indicator-with-approval': import_ioc_with_approval,
'threatstream-import-indicator-without-approval': import_ioc_without_approval,
'threatstream-get-analysis-status': get_submission_status,
'threatstream-get-passive-dns': get_passive_dns,
'threatstream-get-model-list': get_model_list,
'threatstream-get-model-description': get_model_description,
'threatstream-get-indicators-by-model': get_iocs_by_model,
'threatstream-get-indicators': get_indicators,
'threatstream-supported-platforms': get_supported_platforms,
'threatstream-analysis-report': get_report,
'threatstream-create-model': create_model,
'threatstream-update-model': update_model,
'threatstream-submit-to-sandbox': submit_report,
'threatstream-add-tag-to-model': add_tag_to_model
}
try:
client = Client(
base_url=f'{server_url}/api/',
user_name=user_name,
api_key=api_key,
verify=not params.get('insecure', False),
proxy=params.get('proxy', False),
reliability=reliability,
should_create_relationships=params.get('create_relationships', True),
)
args = prepare_args(demisto.args(), command, params)
if command == 'test-module':
result = test_module(client)
elif command in REPUTATION_COMMANDS:
result = commands[command](client, DBotScoreCalculator(params), **args) # type: ignore
else:
result = commands[command](client, **args) # type: ignore
return_results(result)
except Exception as err:
return_error(f'{str(err)}, traceback {traceback.format_exc()}')
|
def main():
"""
Initiate integration command
"""
command = demisto.command()
LOG(f'Command being called is {command}')
params = demisto.params()
# init credentials
user_name = params.get('credentials', {}).get('identifier')
api_key = params.get('credentials', {}).get('password')
server_url = params.get('url', '').strip('/')
reliability = params.get('integrationReliability', DBotScoreReliability.B)
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception("Please provide a valid value for the Source Reliability parameter.")
commands = {
# reputation commands
'ip': ips_reputation_command,
'domain': domains_reputation_command,
'file': files_reputation_command,
'url': urls_reputation_command,
'threatstream-email-reputation': get_email_reputation,
'threatstream-import-indicator-with-approval': import_ioc_with_approval,
'threatstream-import-indicator-without-approval': import_ioc_without_approval,
'threatstream-get-analysis-status': get_submission_status,
'threatstream-get-passive-dns': get_passive_dns,
'threatstream-get-model-list': get_model_list,
'threatstream-get-model-description': get_model_description,
'threatstream-get-indicators-by-model': get_iocs_by_model,
'threatstream-get-indicators': get_indicators,
'threatstream-supported-platforms': get_supported_platforms,
'threatstream-analysis-report': get_report,
'threatstream-create-model': create_model,
'threatstream-update-model': update_model,
'threatstream-submit-to-sandbox': submit_report,
'threatstream-add-tag-to-model': add_tag_to_model,
}
try:
client = Client(
base_url=f'{server_url}/api/',
user_name=user_name,
api_key=api_key,
verify=not params.get('insecure', False),
proxy=params.get('proxy', False),
reliability=reliability,
should_create_relationships=params.get('create_relationships', True),
)
args = prepare_args(demisto.args(), command, params)
if command == 'test-module':
result = test_module(client)
elif command in REPUTATION_COMMANDS:
result = commands[command](client, DBotScoreCalculator(params), **args) # type: ignore
else:
result = commands[command](client, **args) # type: ignore
return_results(result)
except Exception as err:
return_error(f'{str(err)}, traceback {traceback.format_exc()}')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.