id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
57,143 |
def update_task_dict_with_resolver_settings(task_entry):
"""Makes changes to the given task dict according to resolver settings.
Args:
task_entry: improvements_domain.TaskEntry. The TaskEntry domain object
whose dict is to be changed.
Returns:
TaskEntryDict. Updated TaskEntry dict.
"""
resolver_settings = (
task_entry.resolver_id and
user_services.get_user_settings(
task_entry.resolver_id, strict=True)) # type: ignore[no-untyped-call]
task_entry_dict = task_entry.to_dict()
task_entry_dict['resolver_username'] = (
resolver_settings and resolver_settings.username)
task_entry_dict['resolver_profile_picture_data_url'] = (
resolver_settings and
resolver_settings.profile_picture_data_url)
return task_entry_dict
|
def update_task_dict_with_resolver_settings(task_entry):
"""Makes changes to the given task dict according to resolver settings.
Args:
task_entry: improvements_domain.TaskEntry. The TaskEntry domain object
whose dict is to be changed.
Returns:
TaskEntryDict. Updated TaskEntry dict.
"""
resolver_settings = (
task_entry.resolver_id and
user_services.get_user_settings(task_entry.resolver_id, strict=True)) # type: ignore[no-untyped-call]
task_entry_dict = task_entry.to_dict()
task_entry_dict['resolver_username'] = (
resolver_settings and resolver_settings.username)
task_entry_dict['resolver_profile_picture_data_url'] = (
resolver_settings and
resolver_settings.profile_picture_data_url)
return task_entry_dict
|
9,571 |
def check_cli(module, cli):
"""
This method checks for pim ssm config using the vrouter-show command.
If a user already exists on the given switch, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_vrouter_name']
show = cli
cli += 'vrouter-show name %s ' % name
cli += 'format name no-show-headers'
out = module.run_command(cli, use_unsafe_shell=True)
if out:
pass
else:
return False
cli = show
cli += ' vrouter-show name '
cli += '%s format proto-multi no-show-headers' % name
out = module.run_command(cli, use_unsafe_shell=True)[1]
return True if 'none' not in out else False
|
def check_cli(module, cli):
"""
This method checks for pim ssm config using the vrouter-show command.
If a user already exists on the given switch, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_vrouter_name']
show = cli
cli += 'vrouter-show name %s format name no-show-headers' % name
cli += 'format name no-show-headers'
out = module.run_command(cli, use_unsafe_shell=True)
if out:
pass
else:
return False
cli = show
cli += ' vrouter-show name '
cli += '%s format proto-multi no-show-headers' % name
out = module.run_command(cli, use_unsafe_shell=True)[1]
return True if 'none' not in out else False
|
54,988 |
def min_vertex_cover(graph, constrained=True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the Minimum Vertex Cover problem,
for a given graph.
The goal of the Minimum Vertex Cover problem is to find the smallest
`vertex cover <https://en.wikipedia.org/wiki/Vertex_cover>`__ of a graph (a collection of nodes such that
every edge in the graph has one of the nodes as an endpoint).
Args:
graph (nx.Graph): a graph defining the pairs of wires on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian):
.. UsageDetails::
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in `[arXiv:1709.03489] <https://arxiv.org/abs/1709.03489>`__.
The constrained MinVertexCover cost Hamiltonian is defined as:
.. math:: H_C \ = \ - \displaystyle\sum_{v \in V(G)} Z_{v}
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator applied to the :math:`i`-th
vertex.
The returned mixer Hamiltonian is `~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|1\rangle` state
**Unconstrained**
The Minimum Vertex Cover cost Hamiltonian is defined as:
.. math:: H_C \ = \ \frac{(i, j) \in E(G)} (Z_i Z_j \ + \ Z_i \ + \ Z_j) \ - \ \displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the edges of :math:`G`, :math:`V(G)` is the set of vertices, and :math:`Z_i` is the Pauli-Z operator
acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is `~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states
"""
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
if constrained:
return (bit_driver(graph.nodes, 0), qaoa.bit_flip_mixer(graph, 1))
cost_h = edge_driver(graph, ['11', '10', '01']) + bit_driver(graph.nodes, 0)
mixer_h = qaoa.x_mixer(graph.nodes)
return (cost_h, mixer_h)
|
def min_vertex_cover(graph, constrained=True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the Minimum Vertex Cover problem,
for a given graph.
The goal of the Minimum Vertex Cover problem is to find the smallest
`vertex cover <https://en.wikipedia.org/wiki/Vertex_cover>`__ of a graph --- a collection of nodes such that
every edge in the graph has one of the nodes as an endpoint.
Args:
graph (nx.Graph): a graph defining the pairs of wires on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian):
.. UsageDetails::
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in `[arXiv:1709.03489] <https://arxiv.org/abs/1709.03489>`__.
The constrained MinVertexCover cost Hamiltonian is defined as:
.. math:: H_C \ = \ - \displaystyle\sum_{v \in V(G)} Z_{v}
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator applied to the :math:`i`-th
vertex.
The returned mixer Hamiltonian is `~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|1\rangle` state
**Unconstrained**
The Minimum Vertex Cover cost Hamiltonian is defined as:
.. math:: H_C \ = \ \frac{(i, j) \in E(G)} (Z_i Z_j \ + \ Z_i \ + \ Z_j) \ - \ \displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the edges of :math:`G`, :math:`V(G)` is the set of vertices, and :math:`Z_i` is the Pauli-Z operator
acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is `~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states
"""
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
if constrained:
return (bit_driver(graph.nodes, 0), qaoa.bit_flip_mixer(graph, 1))
cost_h = edge_driver(graph, ['11', '10', '01']) + bit_driver(graph.nodes, 0)
mixer_h = qaoa.x_mixer(graph.nodes)
return (cost_h, mixer_h)
|
57,810 |
def get_services_command(client: Client, args: Dict[str, Any]) -> CommandResults:
total_results, max_page_size = calculate_limits(args.get('limit', None))
provider = ','.join(argToList(args.get('provider')))
business_units = ','.join(argToList(args.get('business_unit')))
service_type = ','.join(argToList(args.get('service_type')))
tags = ','.join(argToList(args.get('tag')))
content_search = args.get('content_search')
inet_search = args.get('inet_search')
domain_search = args.get('domain_search')
arg_list = argToList(args.get('port_number'))
# this will trigger exceptions if data is invalid
all(check_int(i, 'port_number', 0, 65535, True) for i in arg_list)
port_number = ','.join(arg_list)
arg_list = argToList(args.get('country_code'))
if arg_list and not all(i.isalpha() and len(i) == 2 for i in arg_list):
raise ValueError('country_code must be an ISO-3166 two character country code')
country_code = ','.join([i.upper() for i in arg_list])
arg_list = argToList(args.get('activity_status'))
if arg_list and not all(i in ISSUE_ACTIVITY_STATUS for i in arg_list):
raise ValueError(f'activity_status must include: {", ".join(ISSUE_ACTIVITY_STATUS)}')
activity_status = ','.join(arg_list)
arg_list = argToList(args.get('discovery_type'))
if arg_list and not all(i in SERVICE_DISCOVERY_TYPE for i in arg_list):
raise ValueError(f'discovery_type must include: {", ".join(SERVICE_DISCOVERY_TYPE)}')
discovery_type = ','.join(arg_list)
arg_list = argToList(args.get('sort'))
if arg_list and not all(i in SERVICE_SORT_OPTIONS for i in arg_list):
raise ValueError(f'sort must include: {", ".join(SERVICE_SORT_OPTIONS)}')
sort = ','.join(arg_list)
services = list(
islice(
client.get_services(limit=max_page_size, content_search=content_search, provider=provider,
business_units=business_units, service_type=service_type,
inet_search=inet_search, domain_search=domain_search, port_number=port_number,
activity_status=activity_status, discovery_type=discovery_type,
tags=tags, country_code=country_code, sort=sort),
total_results
)
)
if len(services) < 1:
return CommandResults(readable_output='No Services Found')
readable_output = tableToMarkdown(
name='Expanse Services',
t=services,
headers=EXPANSE_SERVICE_READABLE_HEADER_LIST,
headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output, outputs_prefix="Expanse.Service", outputs_key_field="id", outputs=services
)
|
def get_services_command(client: Client, args: Dict[str, Any]) -> CommandResults:
total_results, max_page_size = calculate_limits(args.get('limit', None))
provider = ','.join(argToList(args.get('provider')))
business_units = ','.join(argToList(args.get('business_unit')))
service_type = ','.join(argToList(args.get('service_type')))
tags = ','.join(argToList(args.get('tag')))
content_search = args.get('content_search')
inet_search = args.get('inet_search')
domain_search = args.get('domain_search')
arg_list = argToList(args.get('port_number'))
# this will trigger exceptions if data is invalid
all(check_int(i, 'port_number', 0, 65535, True) for i in arg_list)
port_number = ','.join(arg_list)
arg_list = argToList(args.get('country_code'))
if arg_list and not all(i.isalpha() and len(i) == 2 for i in arg_list):
raise ValueError('country_code must be an ISO-3166 two character country code')
country_code = ','.join([i.upper() for i in arg_list])
arg_list = argToList(args.get('activity_status'))
if arg_list and not all(i in ISSUE_ACTIVITY_STATUS for i in arg_list):
raise ValueError(f'activity_status must include: {", ".join(ISSUE_ACTIVITY_STATUS)}')
activity_status = ','.join(arg_list)
arg_list = argToList(args.get('discovery_type'))
if arg_list and not all(i in SERVICE_DISCOVERY_TYPE for i in arg_list):
raise ValueError(f'discovery_type must include: {", ".join(SERVICE_DISCOVERY_TYPE)}')
discovery_type = ','.join(arg_list)
arg_list = argToList(args.get('sort'))
if arg_list and not all(i in SERVICE_SORT_OPTIONS for i in arg_list):
raise ValueError(f'sort must include: {", ".join(SERVICE_SORT_OPTIONS)}')
sort = ','.join(arg_list)
services = list(
islice(
client.get_services(limit=max_page_size, content_search=content_search, provider=provider,
business_units=business_units, service_type=service_type,
inet_search=inet_search, domain_search=domain_search, port_number=port_number,
activity_status=activity_status, discovery_type=discovery_type,
tags=tags, country_code=country_code, sort=sort),
total_results
)
)
if len(services) < 1:
return CommandResults(readable_output='No Services Found')
readable_output = tableToMarkdown(
name='Expanse Services',
t=services,
headers=EXPANSE_SERVICE_READABLE_HEADER_LIST,
headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Expanse.Service",
outputs_key_field="id",
outputs=services
)
|
22,202 |
def run(environment_path=None):
if expression is None:
raise Exception("Python library cwltool must available to evaluate expressions.")
if environment_path is None:
environment_path = os.environ.get("GALAXY_EXPRESSION_INPUTS")
with open(environment_path, "r") as f:
raw_inputs = json.load(f)
outputs = raw_inputs["outputs"]
inputs = raw_inputs.copy()
del inputs["outputs"]
result = evaluate(None, inputs)
for output in outputs:
path = output["path"]
from_expression = "$(" + output["from_expression"] + ")"
output_value = expression.interpolate(from_expression, result)
with open(path, "w") as f:
json.dump(output_value, f)
|
def run(environment_path=None):
if expression is None:
raise Exception("Python library cwltool must be available to evaluate expressions.")
if environment_path is None:
environment_path = os.environ.get("GALAXY_EXPRESSION_INPUTS")
with open(environment_path, "r") as f:
raw_inputs = json.load(f)
outputs = raw_inputs["outputs"]
inputs = raw_inputs.copy()
del inputs["outputs"]
result = evaluate(None, inputs)
for output in outputs:
path = output["path"]
from_expression = "$(" + output["from_expression"] + ")"
output_value = expression.interpolate(from_expression, result)
with open(path, "w") as f:
json.dump(output_value, f)
|
44,164 |
def poly_quad_expectations(cov, mu, wires, device_wires, params, hbar=2.0):
r"""Calculates the expectation and variance for an arbitrary
polynomial of quadrature operators.
Args:
cov (array): covariance matrix
mu (array): vector of means
wires (Wires): wires to calculate the expectation for
device_wires (Wires): corresponding wires on the device
params (array): a :math:`(2N+1)\times (2N+1)` array containing the linear
and quadratic coefficients of the quadrature operators
:math:`(\I, \x_0, \p_0, \x_1, \p_1,\dots)`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: the mean and variance of the quadrature-polynomial observable
"""
Q = params[0]
# HACK, we need access to the Poly instance in order to expand the matrix!
# TODO: maybe we should make heisenberg_obs a class method or a static method to avoid this being a 'hack'?
# static analysis can misidentify qml.ops as the set instance qml.ops.qubit.ops
op = qml.ops.PolyXP(Q, wires=wires) # pylint:disable=no-member
Q = op.heisenberg_obs(device_wires)
if Q.ndim == 1:
d = np.r_[Q[1::2], Q[2::2]]
return d.T @ mu + Q[0], d.T @ cov @ d
# convert to the (I, x1,x2,..., p1,p2...) ordering
M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))
M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))
d1 = M[1:, 0]
d2 = M[0, 1:]
A = M[1:, 1:]
d = d1 + d2
k = M[0, 0]
d2 = 2 * A @ mu + d
k2 = mu.T @ A @ mu + mu.T @ d + k
ex = np.trace(A @ cov) + k2
var = 2 * np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2
modes = np.arange(2 * len(device_wires)).reshape(2, -1).T
groenewald_correction = np.sum([np.linalg.det(hbar * A[:, m][n]) for m in modes for n in modes])
var -= groenewald_correction
return ex, var
|
def poly_quad_expectations(cov, mu, wires, device_wires, params, hbar=2.0):
r"""Calculates the expectation and variance for an arbitrary
polynomial of quadrature operators.
Args:
cov (array): covariance matrix
mu (array): vector of means
wires (Wires): wires to calculate the expectation for
device_wires (Wires): corresponding wires on the device
params (array): a :math:`(2N+1)\times (2N+1)` array containing the linear
and quadratic coefficients of the quadrature operators
:math:`(\I, \x_0, \p_0, \x_1, \p_1,\dots)`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: the mean and variance of the quadrature-polynomial observable
"""
Q = params[0]
# HACK, we need access to the Poly instance in order to expand the matrix!
# TODO: maybe we should make heisenberg_obs a class method or a static method to avoid this being a 'hack'?
# Linting check disabled as static analysis can misidentify qml.ops as the set instance qml.ops.qubit.ops
op = qml.ops.PolyXP(Q, wires=wires) # pylint:disable=no-member
Q = op.heisenberg_obs(device_wires)
if Q.ndim == 1:
d = np.r_[Q[1::2], Q[2::2]]
return d.T @ mu + Q[0], d.T @ cov @ d
# convert to the (I, x1,x2,..., p1,p2...) ordering
M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))
M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))
d1 = M[1:, 0]
d2 = M[0, 1:]
A = M[1:, 1:]
d = d1 + d2
k = M[0, 0]
d2 = 2 * A @ mu + d
k2 = mu.T @ A @ mu + mu.T @ d + k
ex = np.trace(A @ cov) + k2
var = 2 * np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2
modes = np.arange(2 * len(device_wires)).reshape(2, -1).T
groenewald_correction = np.sum([np.linalg.det(hbar * A[:, m][n]) for m in modes for n in modes])
var -= groenewald_correction
return ex, var
|
52,107 |
def main():
usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
v=pyinstrument.__version__,
pyv=sys.version_info,
)
parser = optparse.OptionParser(usage=usage, version=version_string)
parser.allow_interspersed_args = False
def dash_m_callback(option, opt, value, parser):
parser.values.module_name = value
# everything after the -m argument should be passed to that module
parser.values.module_args = parser.rargs + parser.largs
parser.rargs[:] = []
parser.largs[:] = []
parser.add_option('', '--load-prev',
dest='load_prev', action='store', metavar='ID',
help="Instead of running a script, load a previous report")
parser.add_option('-m', '',
dest='module_name', action='callback', callback=dash_m_callback,
type="str",
help="run library module as a script, like 'python -m module'")
parser.add_option('', '--path',
dest='path', action='store_true',
help="Lookup scriptfile to profile in the PATH")
parser.add_option('-o', '--outfile',
dest="outfile", action='store',
help="save to <outfile>", default=None)
parser.add_option('-r', '--renderer',
dest='renderer', action='store', type='string',
help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
"import path to a renderer class"),
default='text')
parser.add_option('', '--html',
dest="output_html", action='store_true',
help=optparse.SUPPRESS_HELP, default=False) # deprecated shortcut for --renderer=html
parser.add_option('-t', '--timeline',
dest='timeline', action='store_true',
help="render as a timeline - preserve ordering and don't condense repeated calls")
parser.add_option('', '--hide',
dest='hide_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
"'*{sep}lib{sep}*'.").format(sep=os.sep),
default='*{sep}lib{sep}*'.format(sep=os.sep))
parser.add_option('', '--hide-regex',
dest='hide_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
"enough control."))
parser.add_option('', '--show',
dest='show_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to "
"show, regardless of --hide or --hide-regex. For example, use "
"--show '*/<library>/*' to show frames within a library that "
"would otherwise be hidden."))
parser.add_option('', '--show-regex',
dest='show_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to always show. "
"Useful if --show doesn't give enough control."))
parser.add_option('', '--show-all',
dest='show_all', action='store_true',
help="show everything", default=False)
parser.add_option('', '--unicode',
dest='unicode', action='store_true',
help='(text renderer only) force unicode text output')
parser.add_option('', '--no-unicode',
dest='unicode', action='store_false',
help='(text renderer only) force ascii text output')
parser.add_option('', '--color',
dest='color', action='store_true',
help='(text renderer only) force ansi color text output')
parser.add_option('', '--no-color',
dest='color', action='store_false',
help='(text renderer only) force no color text output')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
options, args = parser.parse_args()
if args == [] and options.module_name is None and options.load_prev is None:
parser.print_help()
sys.exit(2)
if options.module_name is not None and options.path:
parser.error("The options --module and --path are mutually exclusive.")
if not options.hide_regex:
options.hide_regex = fnmatch.translate(options.hide_fnmatch)
if not options.show_regex and options.show_fnmatch:
options.show_regex = fnmatch.translate(options.show_fnmatch)
if options.show_all:
options.show_regex = r'.*'
if options.load_prev:
session = load_report(options.load_prev)
else:
if options.module_name is not None:
if not (sys.path[0] and os.path.samefile(sys.path[0], '.')):
# when called with '-m', search the cwd for that module
sys.path.insert(0, os.path.abspath('.'))
sys.argv[:] = [options.module_name] + options.module_args
code = "run_module(modname, run_name='__main__')"
globs = {
'run_module': runpy.run_module,
'modname': options.module_name
}
elif options.path is not None:
sys.argv[:] = args
progname = shutil.which(args[0])
if progname is None:
sys.exit('Error: program {} not found in PATH!'.format(args[0]))
code = "run_path(progname, run_name='__main__')"
globs = {
'run_path': runpy.run_path,
'progname': progname
}
else:
sys.argv[:] = args
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
profiler = Profiler()
profiler.start()
try:
exec_(code, globs, None)
except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
session = profiler.last_session
if options.output_html:
options.renderer = 'html'
output_to_temp_file = (options.renderer == 'html'
and not options.outfile
and file_is_a_tty(sys.stdout))
if options.outfile:
f = codecs.open(options.outfile, 'w', 'utf-8')
should_close_f_after_writing = True
elif not output_to_temp_file:
if PY2:
f = codecs.getwriter('utf-8')(sys.stdout)
else:
f = sys.stdout
should_close_f_after_writing = False
renderer_kwargs = {'processor_options': {
'hide_regex': options.hide_regex,
'show_regex': options.show_regex,
}}
if options.timeline is not None:
renderer_kwargs['timeline'] = options.timeline
if options.renderer == 'text':
unicode_override = options.unicode != None
color_override = options.color != None
unicode = options.unicode if unicode_override else file_supports_unicode(f)
color = options.color if color_override else file_supports_color(f)
renderer_kwargs.update({'unicode': unicode, 'color': color})
renderer_class = get_renderer_class(options.renderer)
renderer = renderer_class(**renderer_kwargs)
# remove this frame from the trace
renderer.processors.append(remove_first_pyinstrument_frame_processor)
if output_to_temp_file:
output_filename = renderer.open_in_browser(session)
print('stdout is a terminal, so saved profile output to %s' % output_filename)
else:
f.write(renderer.render(session))
if should_close_f_after_writing:
f.close()
if options.renderer == 'text':
_, report_identifier = save_report(session)
print('To view this report with different options, run:')
print(' pyinstrument --load-prev %s [options]' % report_identifier)
print('')
|
def main():
usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
v=pyinstrument.__version__,
pyv=sys.version_info,
)
parser = optparse.OptionParser(usage=usage, version=version_string)
parser.allow_interspersed_args = False
def dash_m_callback(option, opt, value, parser):
parser.values.module_name = value
# everything after the -m argument should be passed to that module
parser.values.module_args = parser.rargs + parser.largs
parser.rargs[:] = []
parser.largs[:] = []
parser.add_option('', '--load-prev',
dest='load_prev', action='store', metavar='ID',
help="Instead of running a script, load a previous report")
parser.add_option('-m', '',
dest='module_name', action='callback', callback=dash_m_callback,
type="str",
help="run library module as a script, like 'python -m module'")
parser.add_option('', '--from-path',
dest='path', action='store_true',
help="Instead of the working directory, look for scriptfile in the PATH environment variable")
parser.add_option('-o', '--outfile',
dest="outfile", action='store',
help="save to <outfile>", default=None)
parser.add_option('-r', '--renderer',
dest='renderer', action='store', type='string',
help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
"import path to a renderer class"),
default='text')
parser.add_option('', '--html',
dest="output_html", action='store_true',
help=optparse.SUPPRESS_HELP, default=False) # deprecated shortcut for --renderer=html
parser.add_option('-t', '--timeline',
dest='timeline', action='store_true',
help="render as a timeline - preserve ordering and don't condense repeated calls")
parser.add_option('', '--hide',
dest='hide_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
"'*{sep}lib{sep}*'.").format(sep=os.sep),
default='*{sep}lib{sep}*'.format(sep=os.sep))
parser.add_option('', '--hide-regex',
dest='hide_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
"enough control."))
parser.add_option('', '--show',
dest='show_fnmatch', action='store', metavar='EXPR',
help=("glob-style pattern matching the file paths whose frames to "
"show, regardless of --hide or --hide-regex. For example, use "
"--show '*/<library>/*' to show frames within a library that "
"would otherwise be hidden."))
parser.add_option('', '--show-regex',
dest='show_regex', action='store', metavar='REGEX',
help=("regex matching the file paths whose frames to always show. "
"Useful if --show doesn't give enough control."))
parser.add_option('', '--show-all',
dest='show_all', action='store_true',
help="show everything", default=False)
parser.add_option('', '--unicode',
dest='unicode', action='store_true',
help='(text renderer only) force unicode text output')
parser.add_option('', '--no-unicode',
dest='unicode', action='store_false',
help='(text renderer only) force ascii text output')
parser.add_option('', '--color',
dest='color', action='store_true',
help='(text renderer only) force ansi color text output')
parser.add_option('', '--no-color',
dest='color', action='store_false',
help='(text renderer only) force no color text output')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
options, args = parser.parse_args()
if args == [] and options.module_name is None and options.load_prev is None:
parser.print_help()
sys.exit(2)
if options.module_name is not None and options.path:
parser.error("The options --module and --path are mutually exclusive.")
if not options.hide_regex:
options.hide_regex = fnmatch.translate(options.hide_fnmatch)
if not options.show_regex and options.show_fnmatch:
options.show_regex = fnmatch.translate(options.show_fnmatch)
if options.show_all:
options.show_regex = r'.*'
if options.load_prev:
session = load_report(options.load_prev)
else:
if options.module_name is not None:
if not (sys.path[0] and os.path.samefile(sys.path[0], '.')):
# when called with '-m', search the cwd for that module
sys.path.insert(0, os.path.abspath('.'))
sys.argv[:] = [options.module_name] + options.module_args
code = "run_module(modname, run_name='__main__')"
globs = {
'run_module': runpy.run_module,
'modname': options.module_name
}
elif options.path is not None:
sys.argv[:] = args
progname = shutil.which(args[0])
if progname is None:
sys.exit('Error: program {} not found in PATH!'.format(args[0]))
code = "run_path(progname, run_name='__main__')"
globs = {
'run_path': runpy.run_path,
'progname': progname
}
else:
sys.argv[:] = args
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
profiler = Profiler()
profiler.start()
try:
exec_(code, globs, None)
except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
session = profiler.last_session
if options.output_html:
options.renderer = 'html'
output_to_temp_file = (options.renderer == 'html'
and not options.outfile
and file_is_a_tty(sys.stdout))
if options.outfile:
f = codecs.open(options.outfile, 'w', 'utf-8')
should_close_f_after_writing = True
elif not output_to_temp_file:
if PY2:
f = codecs.getwriter('utf-8')(sys.stdout)
else:
f = sys.stdout
should_close_f_after_writing = False
renderer_kwargs = {'processor_options': {
'hide_regex': options.hide_regex,
'show_regex': options.show_regex,
}}
if options.timeline is not None:
renderer_kwargs['timeline'] = options.timeline
if options.renderer == 'text':
unicode_override = options.unicode != None
color_override = options.color != None
unicode = options.unicode if unicode_override else file_supports_unicode(f)
color = options.color if color_override else file_supports_color(f)
renderer_kwargs.update({'unicode': unicode, 'color': color})
renderer_class = get_renderer_class(options.renderer)
renderer = renderer_class(**renderer_kwargs)
# remove this frame from the trace
renderer.processors.append(remove_first_pyinstrument_frame_processor)
if output_to_temp_file:
output_filename = renderer.open_in_browser(session)
print('stdout is a terminal, so saved profile output to %s' % output_filename)
else:
f.write(renderer.render(session))
if should_close_f_after_writing:
f.close()
if options.renderer == 'text':
_, report_identifier = save_report(session)
print('To view this report with different options, run:')
print(' pyinstrument --load-prev %s [options]' % report_identifier)
print('')
|
54,256 |
def concat(dfs, ignore_meta_conflict=False, **kwargs):
"""Concatenate a series of IamDataFrame-like objects
Parameters
----------
dfs : iterable of IamDataFrames
A list of :class:`IamDataFrame` instances
ignore_meta_conflict : bool, default False
If False, raise an error if any meta columns present in `dfs` are not identical.
If True, values in earlier elements of `dfs` take precendence.
kwargs
Passed to :class:`IamDataFrame(other, **kwargs) <IamDataFrame>`
for any item of `dfs` which isn't already an IamDataFrame.
Returns
-------
IamDataFrame
Raises
------
TypeError
If `dfs` is not a list.
ValueError
If time domain or other timeseries data index dimension don't match.
"""
if not islistable(dfs) or isinstance(dfs, pd.DataFrame):
raise TypeError(
f"First argument must be an iterable, "
f"you passed an object of type '{dfs.__class__.__name__}'!"
)
dfs_iter = iter(dfs)
# cast to IamDataFrame if necessary
def as_iamdataframe(df):
return df if isinstance(df, IamDataFrame) else IamDataFrame(df, **kwargs)
df = as_iamdataframe(next(dfs_iter))
ret_data, ret_meta = [df._data], df.meta
index, time_col = df._data.index.names, df.time_col
for df in dfs_iter:
# skip merging meta if element is a pd.DataFrame
_meta_merge = not isinstance(df, pd.DataFrame)
df = as_iamdataframe(df)
if df.time_col != time_col:
raise ValueError("Items have incompatible time format ('year' vs. 'time')!")
if df._data.index.names != index:
raise ValueError(
"Items have incompatible timeseries data index dimensions!"
)
ret_data.append(df._data)
if _meta_merge:
ret_meta = merge_meta(ret_meta, df.meta, ignore_meta_conflict)
# return as new IamDataFrame, this will verify integrity as part of `__init__()`
return IamDataFrame(pd.concat(ret_data, verify_integrity=False), meta=ret_meta)
|
def concat(dfs, ignore_meta_conflict=False, **kwargs):
"""Concatenate a series of IamDataFrame-like objects
Parameters
----------
dfs : iterable of IamDataFrames
A list of :class:`IamDataFrame` instances
ignore_meta_conflict : bool, default False
If False, raise an error if any meta columns present in `dfs` are not identical.
If True, values in earlier elements of `dfs` take precendence.
kwargs
Passed to :class:`IamDataFrame(other, **kwargs) <IamDataFrame>`
for any item of `dfs` which isn't already an IamDataFrame.
Returns
-------
IamDataFrame
Raises
------
TypeError
If `dfs` is not a list.
ValueError
If time domain or other timeseries data index dimension don't match.
"""
if not islistable(dfs) or isinstance(dfs, pd.DataFrame):
raise TypeError(
f"First argument must be an iterable, "
f"you passed an object of type '{dfs.__class__.__name__}'!"
)
dfs_iter = iter(dfs)
# cast to IamDataFrame if necessary
def as_iamdataframe(df):
return df if isinstance(df, IamDataFrame) else IamDataFrame(df, **kwargs)
df = next(dfs_iter, None)
if df is None:
raise ValueError("No objects to concatenate")
df = as_iamdataframe(df)
ret_data, ret_meta = [df._data], df.meta
index, time_col = df._data.index.names, df.time_col
for df in dfs_iter:
# skip merging meta if element is a pd.DataFrame
_meta_merge = not isinstance(df, pd.DataFrame)
df = as_iamdataframe(df)
if df.time_col != time_col:
raise ValueError("Items have incompatible time format ('year' vs. 'time')!")
if df._data.index.names != index:
raise ValueError(
"Items have incompatible timeseries data index dimensions!"
)
ret_data.append(df._data)
if _meta_merge:
ret_meta = merge_meta(ret_meta, df.meta, ignore_meta_conflict)
# return as new IamDataFrame, this will verify integrity as part of `__init__()`
return IamDataFrame(pd.concat(ret_data, verify_integrity=False), meta=ret_meta)
|
37,388 |
def control(operation: Union[Gate, ControlledGate],
num_ctrl_qubits: Optional[int] = 1,
label: Optional[Union[None, str]] = None,
ctrl_state: Optional[Union[None, int, str]] = None) -> ControlledGate:
"""Return controlled version of gate using controlled rotations. This function
first checks the name of the operation to see if it knows of a method from which
to generate a controlled version. Currently these are `x`, `rx`, `ry`, and `rz`.
If a method is not directly known, it calls the unroller to convert to `u1`, `u3`,
and `cx` gates.
Args:
operation: The gate used to create the ControlledGate.
num_ctrl_qubits: The number of controls to add to gate (default=1).
label: An optional gate label.
ctrl_state: The control state in decimal or as
a bitstring (e.g. '111'). If specified as a bitstring the length
must equal num_ctrl_qubits, MSB on left. If None, use
2**num_ctrl_qubits-1.
Returns:
Controlled version of gate.
Raises:
CircuitError: gate contains non-gate in definition
"""
from math import pi
# pylint: disable=cyclic-import
import qiskit.circuit.controlledgate as controlledgate
q_control = QuantumRegister(num_ctrl_qubits, name='control')
q_target = QuantumRegister(operation.num_qubits, name='target')
q_ancillae = None # TODO: add
controlled_circ = QuantumCircuit(q_control, q_target,
name='c_{}'.format(operation.name))
global_phase = 0
if operation.name == 'x' or (
isinstance(operation, controlledgate.ControlledGate) and
operation.base_gate.name == 'x'):
controlled_circ.mct(q_control[:] + q_target[:-1], q_target[-1], q_ancillae)
if operation.definition is not None and operation.definition.global_phase:
global_phase += operation.definition.global_phase
else:
basis = ['p', 'u', 'x', 'z', 'rx', 'ry', 'rz', 'cx']
unrolled_gate = _unroll_gate(operation, basis_gates=basis)
if unrolled_gate.definition.global_phase:
global_phase += unrolled_gate.definition.global_phase
for gate, qreg, _ in unrolled_gate.definition.data:
if gate.name == 'x':
controlled_circ.mct(q_control, q_target[qreg[0].index],
q_ancillae)
elif gate.name == 'rx':
controlled_circ.mcrx(gate.definition.data[0][0].params[0],
q_control, q_target[qreg[0].index],
use_basis_gates=True)
elif gate.name == 'ry':
controlled_circ.mcry(gate.definition.data[0][0].params[0],
q_control, q_target[qreg[0].index],
q_ancillae, mode='noancilla',
use_basis_gates=True)
elif gate.name == 'rz':
controlled_circ.mcrz(gate.definition.data[0][0].params[0],
q_control, q_target[qreg[0].index],
use_basis_gates=True)
elif gate.name == 'p':
from qiskit.circuit.library import MCPhaseGate
controlled_circ.append(MCPhaseGate(gate.params[0], num_ctrl_qubits),
q_control[:] + [q_target[qreg[0].index]])
elif gate.name == 'cx':
controlled_circ.mct(q_control[:] + [q_target[qreg[0].index]],
q_target[qreg[1].index],
q_ancillae)
elif gate.name == 'u':
theta, phi, lamb = gate.params
if num_ctrl_qubits == 1:
if theta == 0 and phi == 0:
controlled_circ.cp(lamb, q_control[0], q_target[qreg[0].index])
else:
controlled_circ.cu(theta, phi, lamb, 0, q_control[0],
q_target[qreg[0].index])
else:
if phi == -pi / 2 and lamb == pi / 2:
controlled_circ.mcrx(theta, q_control, q_target[qreg[0].index],
use_basis_gates=True)
elif phi == 0 and lamb == 0:
controlled_circ.mcry(theta, q_control, q_target[qreg[0].index],
q_ancillae, use_basis_gates=True)
elif theta == 0 and phi == 0:
controlled_circ.mcrz(lamb, q_control, q_target[qreg[0].index],
use_basis_gates=True)
else:
controlled_circ.mcrz(lamb, q_control, q_target[qreg[0].index],
use_basis_gates=True)
controlled_circ.mcry(theta, q_control, q_target[qreg[0].index],
q_ancillae, use_basis_gates=True)
controlled_circ.mcrz(phi, q_control, q_target[qreg[0].index],
use_basis_gates=True)
elif gate.name == 'z':
controlled_circ.h(q_target[qreg[0].index])
controlled_circ.mct(q_control, q_target[qreg[0].index],
q_ancillae)
controlled_circ.h(q_target[qreg[0].index])
else:
raise CircuitError('gate contains non-controllable instructions: {}'.format(
gate.name))
if gate.definition is not None and gate.definition.global_phase:
global_phase += gate.definition.global_phase
# apply controlled global phase
if global_phase:
if len(q_control) < 2:
controlled_circ.p(global_phase, q_control)
else:
controlled_circ.mcp(global_phase,
q_control[:-1], q_control[-1])
if isinstance(operation, controlledgate.ControlledGate):
new_num_ctrl_qubits = num_ctrl_qubits + operation.num_ctrl_qubits
new_ctrl_state = operation.ctrl_state << num_ctrl_qubits | ctrl_state
base_name = operation.base_gate.name
base_gate = operation.base_gate
else:
new_num_ctrl_qubits = num_ctrl_qubits
new_ctrl_state = ctrl_state
base_name = operation.name
base_gate = operation
# In order to maintain some backward compatibility with gate names this
# uses a naming convention where if the number of controls is <=2 the gate
# is named like "cc<base_gate.name>", else it is named like
# "c<num_ctrl_qubits><base_name>".
if new_num_ctrl_qubits > 2:
ctrl_substr = 'c{:d}'.format(new_num_ctrl_qubits)
else:
ctrl_substr = ('{0}' * new_num_ctrl_qubits).format('c')
new_name = '{}{}'.format(ctrl_substr, base_name)
cgate = controlledgate.ControlledGate(new_name,
controlled_circ.num_qubits,
operation.params,
label=label,
num_ctrl_qubits=new_num_ctrl_qubits,
definition=controlled_circ,
ctrl_state=new_ctrl_state,
base_gate=base_gate)
return cgate
|
def control(operation: Union[Gate, ControlledGate],
num_ctrl_qubits: Optional[int] = 1,
label: Optional[Union[None, str]] = None,
ctrl_state: Optional[Union[None, int, str]] = None) -> ControlledGate:
"""Return controlled version of gate using controlled rotations. This function
first checks the name of the operation to see if it knows of a method from which
to generate a controlled version. Currently these are `x`, `rx`, `ry`, and `rz`.
If a method is not directly known, it calls the unroller to convert to `u1`, `u3`,
and `cx` gates.
Args:
operation: The gate used to create the ControlledGate.
num_ctrl_qubits: The number of controls to add to gate (default=1).
label: An optional gate label.
ctrl_state: The control state in decimal or as
a bitstring (e.g. '111'). If specified as a bitstring the length
must equal num_ctrl_qubits, MSB on left. If None, use
2**num_ctrl_qubits-1.
Returns:
Controlled version of gate.
Raises:
CircuitError: gate contains non-gate in definition
"""
from math import pi
# pylint: disable=cyclic-import
import qiskit.circuit.controlledgate as controlledgate
q_control = QuantumRegister(num_ctrl_qubits, name='control')
q_target = QuantumRegister(operation.num_qubits, name='target')
q_ancillae = None # TODO: add
controlled_circ = QuantumCircuit(q_control, q_target,
name='c_{}'.format(operation.name))
global_phase = 0
if operation.name == 'x' or (
isinstance(operation, controlledgate.ControlledGate) and
operation.base_gate.name == 'x'):
controlled_circ.mct(q_control[:] + q_target[:-1], q_target[-1], q_ancillae)
if operation.definition is not None and operation.definition.global_phase:
global_phase += operation.definition.global_phase
else:
basis = ['p', 'u', 'x', 'z', 'rx', 'ry', 'rz', 'cx']
unrolled_gate = _unroll_gate(operation, basis_gates=basis)
if unrolled_gate.definition.global_phase:
global_phase += unrolled_gate.definition.global_phase
for gate, qreg, _ in unrolled_gate.definition.data:
if gate.name == 'x':
controlled_circ.mct(q_control, q_target[qreg[0].index],
q_ancillae)
elif gate.name == 'rx':
controlled_circ.mcrx(gate.definition.data[0][0].params[0],
q_control, q_target[qreg[0].index],
use_basis_gates=True)
elif gate.name == 'ry':
controlled_circ.mcry(gate.definition.data[0][0].params[0],
q_control, q_target[qreg[0].index],
q_ancillae, mode='noancilla',
use_basis_gates=True)
elif gate.name == 'rz':
controlled_circ.mcrz(gate.definition.data[0][0].params[0],
q_control, q_target[qreg[0].index],
use_basis_gates=True)
elif gate.name == 'p':
from qiskit.circuit.library import MCPhaseGate
controlled_circ.append(MCPhaseGate(gate.params[0], num_ctrl_qubits),
q_control[:] + [q_target[qreg[0].index]])
elif gate.name == 'cx':
controlled_circ.mct(q_control[:] + [q_target[qreg[0].index]],
q_target[qreg[1].index],
q_ancillae)
elif gate.name == 'u':
theta, phi, lamb = gate.params
if num_ctrl_qubits == 1:
if theta == 0 and phi == 0:
controlled_circ.cp(lamb, q_control[0], q_target[qreg[0].index])
else:
controlled_circ.cu(theta, phi, lamb, 0, q_control[0],
q_target[qreg[0].index])
else:
if phi == -pi / 2 and lamb == pi / 2:
controlled_circ.mcrx(theta, q_control, q_target[qreg[0].index],
use_basis_gates=True)
elif phi == 0 and lamb == 0:
controlled_circ.mcry(theta, q_control, q_target[qreg[0].index],
q_ancillae, use_basis_gates=True)
elif theta == 0 and phi == 0:
controlled_circ.mcrz(lamb, q_control, q_target[qreg[0].index],
use_basis_gates=True)
else:
controlled_circ.mcrz(lamb, q_control, q_target[qreg[0].index],
use_basis_gates=True)
controlled_circ.mcry(theta, q_control, q_target[qreg[0].index],
q_ancillae, use_basis_gates=True)
controlled_circ.mcrz(phi, q_control, q_target[qreg[0].index],
use_basis_gates=True)
elif gate.name == 'z':
controlled_circ.h(q_target[qreg[0].index])
controlled_circ.mcx(q_control, q_target[qreg[0].index],
q_ancillae)
controlled_circ.h(q_target[qreg[0].index])
else:
raise CircuitError('gate contains non-controllable instructions: {}'.format(
gate.name))
if gate.definition is not None and gate.definition.global_phase:
global_phase += gate.definition.global_phase
# apply controlled global phase
if global_phase:
if len(q_control) < 2:
controlled_circ.p(global_phase, q_control)
else:
controlled_circ.mcp(global_phase,
q_control[:-1], q_control[-1])
if isinstance(operation, controlledgate.ControlledGate):
new_num_ctrl_qubits = num_ctrl_qubits + operation.num_ctrl_qubits
new_ctrl_state = operation.ctrl_state << num_ctrl_qubits | ctrl_state
base_name = operation.base_gate.name
base_gate = operation.base_gate
else:
new_num_ctrl_qubits = num_ctrl_qubits
new_ctrl_state = ctrl_state
base_name = operation.name
base_gate = operation
# In order to maintain some backward compatibility with gate names this
# uses a naming convention where if the number of controls is <=2 the gate
# is named like "cc<base_gate.name>", else it is named like
# "c<num_ctrl_qubits><base_name>".
if new_num_ctrl_qubits > 2:
ctrl_substr = 'c{:d}'.format(new_num_ctrl_qubits)
else:
ctrl_substr = ('{0}' * new_num_ctrl_qubits).format('c')
new_name = '{}{}'.format(ctrl_substr, base_name)
cgate = controlledgate.ControlledGate(new_name,
controlled_circ.num_qubits,
operation.params,
label=label,
num_ctrl_qubits=new_num_ctrl_qubits,
definition=controlled_circ,
ctrl_state=new_ctrl_state,
base_gate=base_gate)
return cgate
|
40,085 |
def make_response_error(errmsg, in_httpcode=None, details=None):
if details is None:
details = {}
if not in_httpcode:
httpcode = 500
else:
httpcode = in_httpcode
msg = str(errmsg)
ret = {
'message': msg,
'httpcode': int(httpcode),
'detail': details
}
if 'error_codes' not in ret['detail']:
ret['detail']['error_codes'] = []
if isinstance(errmsg, Exception):
if 'anchore_error_json' in errmsg.__dict__:
# Try to load it as json
try:
anchore_error_json = errmsg.__dict__.get('anchore_error_json', None)
if type(anchore_error_json) == dict:
err_json = anchore_error_json
else:
err_json = json.loads(anchore_error_json)
except (TypeError, ValueError):
# Then it may just be a string, we cannot do anything with it
logger.debug('Failed to parse anchore_error_json as json')
return ret
if {'message', 'httpcode', 'detail'}.issubset(set(err_json)):
ret.update(err_json)
try:
if {'error_code'}.issubset(set(err_json)) and err_json.get('error_code', None):
if 'error_codes' not in ret['detail']:
ret['detail']['error_codes'] = []
ret['detail']['error_codes'].append(err_json.get('error_code'))
except KeyError:
logger.warn("unable to marshal error details: source error {}".format(errmsg.__dict__))
return ret
|
def make_response_error(errmsg, in_httpcode=None, details=None):
if details is None:
details = {}
if not in_httpcode:
httpcode = 500
else:
httpcode = in_httpcode
msg = str(errmsg)
ret = {
'message': msg,
'httpcode': int(httpcode),
'detail': details
}
if 'error_codes' not in ret['detail']:
ret['detail']['error_codes'] = []
if isinstance(errmsg, Exception):
if 'anchore_error_json' in errmsg.__dict__:
# Try to load it as json
try:
anchore_error_json = errmsg.__dict__.get('anchore_error_json', None)
if isinstance(anchore_error_json, dict):
err_json = anchore_error_json
else:
err_json = json.loads(anchore_error_json)
except (TypeError, ValueError):
# Then it may just be a string, we cannot do anything with it
logger.debug('Failed to parse anchore_error_json as json')
return ret
if {'message', 'httpcode', 'detail'}.issubset(set(err_json)):
ret.update(err_json)
try:
if {'error_code'}.issubset(set(err_json)) and err_json.get('error_code', None):
if 'error_codes' not in ret['detail']:
ret['detail']['error_codes'] = []
ret['detail']['error_codes'].append(err_json.get('error_code'))
except KeyError:
logger.warn("unable to marshal error details: source error {}".format(errmsg.__dict__))
return ret
|
37,599 |
def _get_ucry_cz(nqubits, angles):
"""
Get uniformally controlled Ry gate in in CZ-Ry.
"""
# avoids circular import
from qiskit.transpiler.passes.basis.unroller import Unroller
qc = QuantumCircuit(nqubits)
qc.ucry(angles, list(range(nqubits - 1)), [nqubits - 1])
dag = circuit_to_dag(qc)
unroll = Unroller(["ry", "cx"])
dag2 = unroll.run(dag)
cz = CZGate()
cxtype = type(CXGate())
node = None
for node in dag2.op_nodes(op=cxtype):
dag2.substitute_node(node, cz, inplace=True)
last_node = _get_last_op_node(dag2)
if node.name != "cz":
raise ValueError("last node is not cz as expected")
dag2.remove_op_node(last_node)
qc2 = dag_to_circuit(dag2)
return qc2
|
def _get_ucry_cz(nqubits, angles):
"""
Get uniformally controlled Ry gate in in CZ-Ry.
"""
# avoids circular import
from qiskit.transpiler.passes.basis.unroller import Unroller
qc = QuantumCircuit(nqubits)
qc.ucry(angles, list(range(nqubits - 1)), [nqubits - 1])
dag = circuit_to_dag(qc)
unroll = Unroller(["ry", "cx"])
dag2 = unroll.run(dag)
cz = CZGate()
cxtype = CXGate
node = None
for node in dag2.op_nodes(op=cxtype):
dag2.substitute_node(node, cz, inplace=True)
last_node = _get_last_op_node(dag2)
if node.name != "cz":
raise ValueError("last node is not cz as expected")
dag2.remove_op_node(last_node)
qc2 = dag_to_circuit(dag2)
return qc2
|
28,554 |
def plot_bpv(
data,
kind="u_value",
t_stat="median",
bpv=True,
mean=True,
reference="samples",
n_ref=100,
hdi_prob=0.94,
color="C0",
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
ax=None,
backend=None,
plot_ref_kwargs=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior/prior predictive data.
kind : str
Type of plot to display (u_value, p_value, t_stat). Defaults to u_value.
t_stat : str, float, or callable
T statistics to compute from the observations and predictive distributions. Allowed strings
are "mean", "median" or "std". Defaults to "median". Alternative a quantile can be passed
as a float (or str) in the interval (0, 1). Finally a user defined function is also
acepted, see examples section for details.
bpv : bool
If True (default) add the bayesian p_value to the legend when kind = t_stat.
mean : bool
Whether or not to plot the mean T statistic. Defaults to True.
reference : str
How to compute the distributions used as reference for u_values or p_values. Allowed values
are "analytical" (default) and "samples". Use `None` to do not plot any reference.
n_ref : int, optional
Number of reference distributions to sample when `reference=samples`
hdi_prob: float, optional
Probability for the highest density interval for the analytical reference distribution when
computing u_values. Should be in the interval (0, 1]. Defaults to
0.94.
color : str
Matplotlib color
figsize : tuple
Figure size. If None it will be defined automatically.
textsize : float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names : list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the variables by `~`
when you want to exclude them from the plot.
filter_vars : {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
legend : bool
Add legend to figure. By default True.
ax : numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend : str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
plot_ref_kwargs : dict, optional
Extra keyword arguments to control how reference is represented. Passed to `plt.plot` or
`plt.axhspan`(when `kind=u_value` and `reference=analytical`).
backend_kwargs : bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
Plot Bayesian p_values.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="p_value")
Plot custom t statistic comparison.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="t_stat", t_stat=lambda x:np.percentile(x, q=50, axis=-1))
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in ("{}_predictive".format(group), "observed_data"):
if not hasattr(data, groups):
raise TypeError('`data` argument must have the group "{group}"'.format(group=groups))
if kind.lower() not in ("t_stat", "u_value", "p_value"):
raise TypeError("`kind` argument must be either `t_stat`, `u_value`, or `p_value`")
if reference is not None:
if reference.lower() not in ("analytical", "samples"):
raise TypeError(
"`reference` argument must be either `analytical`, `samples`, or `None`"
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_t_stats",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, _, _, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
if plot_ref_kwargs is None:
plot_ref_kwargs = {}
if kind == "p_value" and reference == "analytical":
plot_ref_kwargs.setdefault("color", "k")
plot_ref_kwargs.setdefault("linestyle", "--")
else:
plot_ref_kwargs.setdefault("alpha", 0.1)
plot_ref_kwargs.setdefault("color", color)
if backend == "bokeh":
color = to_hex(color)
plot_ref_kwargs.pop("color")
if kind == "p_value" and reference == "analytical":
plot_ref_kwargs.pop("linestyle")
plot_ref_kwargs.setdefault("line_dash", "dashed")
plot_ref_kwargs.setdefault("color", "black")
else:
plot_ref_kwargs.setdefault("color", color)
bpvplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
total_pp_samples=total_pp_samples,
kind=kind,
bpv=bpv,
t_stat=t_stat,
reference=reference,
n_ref=n_ref,
hdi_prob=hdi_prob,
mean=mean,
color=color,
figsize=figsize,
ax_labelsize=ax_labelsize,
markersize=markersize,
linewidth=linewidth,
plot_ref_kwargs=plot_ref_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_bpv", "bpvplot", backend)
axes = plot(**bpvplot_kwargs)
return axes
|
def plot_bpv(
data,
kind="u_value",
t_stat="median",
bpv=True,
mean=True,
reference="samples",
n_ref=100,
hdi_prob=0.94,
color="C0",
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
ax=None,
backend=None,
plot_ref_kwargs=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior/prior predictive data.
kind : str
Type of plot to display (u_value, p_value, t_stat). Defaults to u_value.
t_stat : str, float, or callable
T statistics to compute from the observations and predictive distributions. Allowed strings
are "mean", "median" or "std". Defaults to "median". Alternative a quantile can be passed
as a float (or str) in the interval (0, 1). Finally a user defined function is also
acepted, see examples section for details.
bpv : bool
If True (default) add the bayesian p_value to the legend when kind = t_stat.
mean : bool
Whether or not to plot the mean T statistic. Defaults to True.
reference : str
How to compute the distributions used as reference for u_values or p_values. Allowed values
are "analytical" (default) and "samples". Use `None` to do not plot any reference. Defaults to "samples".
n_ref : int, optional
Number of reference distributions to sample when `reference=samples`
hdi_prob: float, optional
Probability for the highest density interval for the analytical reference distribution when
computing u_values. Should be in the interval (0, 1]. Defaults to
0.94.
color : str
Matplotlib color
figsize : tuple
Figure size. If None it will be defined automatically.
textsize : float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names : list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the variables by `~`
when you want to exclude them from the plot.
filter_vars : {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
legend : bool
Add legend to figure. By default True.
ax : numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend : str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
plot_ref_kwargs : dict, optional
Extra keyword arguments to control how reference is represented. Passed to `plt.plot` or
`plt.axhspan`(when `kind=u_value` and `reference=analytical`).
backend_kwargs : bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
Plot Bayesian p_values.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="p_value")
Plot custom t statistic comparison.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="t_stat", t_stat=lambda x:np.percentile(x, q=50, axis=-1))
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in ("{}_predictive".format(group), "observed_data"):
if not hasattr(data, groups):
raise TypeError('`data` argument must have the group "{group}"'.format(group=groups))
if kind.lower() not in ("t_stat", "u_value", "p_value"):
raise TypeError("`kind` argument must be either `t_stat`, `u_value`, or `p_value`")
if reference is not None:
if reference.lower() not in ("analytical", "samples"):
raise TypeError(
"`reference` argument must be either `analytical`, `samples`, or `None`"
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_t_stats",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, _, _, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
if plot_ref_kwargs is None:
plot_ref_kwargs = {}
if kind == "p_value" and reference == "analytical":
plot_ref_kwargs.setdefault("color", "k")
plot_ref_kwargs.setdefault("linestyle", "--")
else:
plot_ref_kwargs.setdefault("alpha", 0.1)
plot_ref_kwargs.setdefault("color", color)
if backend == "bokeh":
color = to_hex(color)
plot_ref_kwargs.pop("color")
if kind == "p_value" and reference == "analytical":
plot_ref_kwargs.pop("linestyle")
plot_ref_kwargs.setdefault("line_dash", "dashed")
plot_ref_kwargs.setdefault("color", "black")
else:
plot_ref_kwargs.setdefault("color", color)
bpvplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
total_pp_samples=total_pp_samples,
kind=kind,
bpv=bpv,
t_stat=t_stat,
reference=reference,
n_ref=n_ref,
hdi_prob=hdi_prob,
mean=mean,
color=color,
figsize=figsize,
ax_labelsize=ax_labelsize,
markersize=markersize,
linewidth=linewidth,
plot_ref_kwargs=plot_ref_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_bpv", "bpvplot", backend)
axes = plot(**bpvplot_kwargs)
return axes
|
57,982 |
def fetch_incidents(client: PolarisClient, last_run: dict, params: dict) -> Tuple[dict, list]:
"""
Fetch Rubrik Anomaly incidents.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type last_run: ``dict``
:param last_run: last run object obtained from demisto.getLastRun()
:type params: ``dict``
:param params: arguments obtained from demisto.params()
:return:
"""
max_fetch = arg_to_number(params.get('max_fetch', DEFAULT_MAX_FETCH), 'Fetch Limit')
last_run_time = last_run.get('last_fetch', None)
next_page_token = last_run.get('next_page_token', '')
next_run = last_run.copy()
if last_run_time is None:
# if the last run has not been set (i.e on the first run)
# check to see if a first_fetch value has been provided. If it hasn't
# return the current time
first_fetch = params.get('first_fetch', DEFAULT_FIRST_FETCH)
first_fetch = arg_to_datetime(first_fetch, "First fetch time")
last_run_time = first_fetch.strftime(DATE_TIME_FORMAT) # type: ignore
next_run["last_fetch"] = last_run_time
# removed manual fetch interval as this feature is built in XSOAR 6.0.0 and onwards
events = client.list_event_series(activity_type="Anomaly",
start_date=last_run_time,
sort_order="Asc",
first=max_fetch,
after=next_page_token)
activity_series_connection = events.get("data", {}).get("activitySeriesConnection", {})
new_next_page_token = activity_series_connection.get("pageInfo", {}).get("endCursor", "")
if new_next_page_token:
next_run["next_page_token"] = new_next_page_token
incidents = []
edges = activity_series_connection.get("edges", [])
for event in edges:
processed_incident = {
"incidentClassification": "RubrikRadar",
"message": [],
"severity": IncidentSeverity.UNKNOWN
}
node = event.get("node", {})
processed_incident.update(node)
processed_incident["eventCompleted"] = "True" if node.get("lastActivityStatus", "") == "Success" else False
activity_connection = node.get("activityConnection", {})
activity_nodes = activity_connection.get("nodes", [])
processed_incident = process_activity_nodes(activity_nodes, processed_incident)
# Map Severity Level
severity = node.get("severity", "")
if severity == "Critical" or severity == "Warning":
if demisto.params().get(f'radar_{severity.lower()}_severity_mapping') is None:
severity_mapping = 'XSOAR LOW'
else:
severity_mapping = demisto.params().get(f'radar_{severity.lower()}_severity_mapping')
processed_incident["severity"] = convert_to_demisto_severity(severity_mapping)
else:
processed_incident["severity"] = IncidentSeverity.LOW
incidents.append({
"name": f'Rubrik Radar Anomaly - {processed_incident.get("objectName", "")}',
"occurred": processed_incident.get("lastUpdated", ""),
"rawJSON": json.dumps(processed_incident),
"severity": processed_incident["severity"]
})
return next_run, incidents
|
def fetch_incidents(client: PolarisClient, last_run: dict, params: dict) -> Tuple[dict, list]:
"""
Fetch Rubrik Anomaly incidents.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type last_run: ``dict``
:param last_run: last run object obtained from demisto.getLastRun()
:type params: ``dict``
:param params: arguments obtained from demisto.params()
:return:
"""
max_fetch = arg_to_number(params.get('max_fetch', DEFAULT_MAX_FETCH), 'Fetch Limit')
last_run_time = last_run.get('last_fetch', None)
next_page_token = last_run.get('next_page_token', '')
next_run = last_run.copy()
if last_run_time is None:
# if the last run has not been set (i.e on the first run)
# check to see if a first_fetch value has been provided. If it hasn't
# return the current time
first_fetch = params.get('first_fetch', DEFAULT_FIRST_FETCH)
first_fetch = arg_to_datetime(first_fetch, "First fetch time")
last_run_time = first_fetch.strftime(DATE_TIME_FORMAT) # type: ignore
next_run["last_fetch"] = last_run_time
# removed manual fetch interval as this feature is built in XSOAR 6.0.0 and onwards
events = client.list_event_series(activity_type="Anomaly",
start_date=last_run_time,
sort_order="Asc",
first=max_fetch,
after=next_page_token)
activity_series_connection = events.get("data", {}).get("activitySeriesConnection", {})
new_next_page_token = activity_series_connection.get("pageInfo", {}).get("endCursor", "")
if new_next_page_token:
next_run["next_page_token"] = new_next_page_token
incidents = []
edges = activity_series_connection.get("edges", [])
for event in edges:
processed_incident = {
"incidentClassification": "RubrikRadar",
"message": [],
"severity": IncidentSeverity.UNKNOWN
}
node = event.get("node", {})
processed_incident.update(node)
processed_incident["eventCompleted"] = "True" if node.get("lastActivityStatus", "") == "Success" else False
activity_connection = node.get("activityConnection", {})
activity_nodes = activity_connection.get("nodes", [])
processed_incident = process_activity_nodes(activity_nodes, processed_incident)
# Map Severity Level
severity = node.get("severity", "")
if severity == "Critical" or severity == "Warning":
if params.get(f'radar_{severity.lower()}_severity_mapping') is None:
severity_mapping = 'XSOAR LOW'
else:
severity_mapping = params.get(f'radar_{severity.lower()}_severity_mapping')
processed_incident["severity"] = convert_to_demisto_severity(severity_mapping)
else:
processed_incident["severity"] = IncidentSeverity.LOW
incidents.append({
"name": f'Rubrik Radar Anomaly - {processed_incident.get("objectName", "")}',
"occurred": processed_incident.get("lastUpdated", ""),
"rawJSON": json.dumps(processed_incident),
"severity": processed_incident["severity"]
})
return next_run, incidents
|
54,222 |
def _single_qubit_matrices_with_sqrt_iswap(
kak: 'cirq.KakDecomposition',
required_sqrt_iswap_count: Optional[int] = None,
atol: float = 1e-8,
) -> Tuple[Sequence[Tuple[np.ndarray, np.ndarray]], complex]:
"""Computes the sequence of interleaved single-qubit unitary matrices in the
sqrt-iSWAP decomposition."""
if required_sqrt_iswap_count is not None:
if not 0 <= required_sqrt_iswap_count <= 3:
raise ValueError('the argument `required_sqrt_iswap_count` must be 0, 1, 2, or 3.')
if not [
_in_0_region,
_in_1sqrt_iswap_region,
_in_2sqrt_iswap_region,
_in_3sqrt_iswap_region,
][required_sqrt_iswap_count](kak.interaction_coefficients, weyl_tol=atol / 10):
raise ValueError(
f'the given gate cannot be decomposed into exactly '
f'{required_sqrt_iswap_count} sqrt-iSWAP gates.'
)
return [
_decomp_0_matrices,
_decomp_1sqrt_iswap_matrices,
_decomp_2sqrt_iswap_matrices,
_decomp_3sqrt_iswap_matrices,
][required_sqrt_iswap_count](kak, atol=atol)
if _in_0_region(kak.interaction_coefficients, weyl_tol=atol / 10):
return _decomp_0_matrices(kak, atol)
elif _in_1sqrt_iswap_region(kak.interaction_coefficients, weyl_tol=atol / 10):
return _decomp_1sqrt_iswap_matrices(kak, atol)
elif _in_2sqrt_iswap_region(kak.interaction_coefficients, weyl_tol=atol / 10):
return _decomp_2sqrt_iswap_matrices(kak, atol)
return _decomp_3sqrt_iswap_matrices(kak, atol)
|
def _single_qubit_matrices_with_sqrt_iswap(
kak: 'cirq.KakDecomposition',
required_sqrt_iswap_count: Optional[int] = None,
atol: float = 1e-8,
) -> Tuple[Sequence[Tuple[np.ndarray, np.ndarray]], complex]:
"""Computes the sequence of interleaved single-qubit unitary matrices in the
sqrt-iSWAP decomposition."""
decomposers = [
(_in_0_region, _decomp_0_matrices),
(_in_1sqrt_iswap_region, _decomp_1sqrt_iswap_matrices),
(_in_2sqrt_iswap_region, _decomp_2sqrt_iswap_matrices),
(_in_3sqrt_iswap_region, _decomp_3sqrt_iswap_matrices),
]
if required_sqrt_iswap_count is not None:
if not 0 <= required_sqrt_iswap_count <= 3:
raise ValueError(
"the argument `required_sqrt_iswap_count` must be 0, 1, 2, or 3."
)
can_decompose, decomposer = decomposers[required_sqrt_iswap_count]
if not can_decompose(kak.interaction_coefficients, weyl_tol=atol / 10):
raise ValueError(
f"the given gate cannot be decomposed into exactly {required_sqrt_iswap_count} sqrt-iSWAP gates."
)
return decomposer(kak, atol=atol)
for can_decompose, decomposer in decomposers:
if can_decompose(kak.interaction_coefficients, weyl_tol=atol / 10):
return decomposer(kak, atol)
|
29,836 |
def is_in_container() -> bool:
return os.path.exists(_DOCKERENV_FILE) or os.path.exists(_PODMAN_FLAG)
|
def is_in_container() -> bool:
return any([os.path.exists(p) for p in (_DOCKERENV_FILE, _PODMAN_FLAG)])
|
5,104 |
def _version_and_breakpoints(loca, fontdata):
"""
Read the version number of the font and determine sfnts breakpoints.
When a TrueType font file is written as a Type 42 font, it has to be
broken into substrings of at most 65535 bytes. These substrings must
begin at font table boundaries or glyph boundaries in the glyf table.
This function determines all possible breakpoints and it is the caller's
responsibility to do the splitting.
Helper function for _font_to_ps_type42.
Parameters
----------
loca : fontTools.ttLib._l_o_c_a.table__l_o_c_a or None
The loca table of the font if available
fontdata : bytes
The raw data of the font
Returns
-------
tuple
((v1, v2), breakpoints) where v1 is the major version number,
v2 is the minor version number and breakpoints is a sorted list
of offsets into fontdata; if loca is not available, just the table
boundaries
"""
v1, v2, numTables = struct.unpack('>3h', fontdata[:6])
version = (v1, v2)
tables = {}
for i in range(numTables):
tag, _, offset, _ = struct.unpack(
'>4sIII',
fontdata[12 + i*16:12 + (i+1)*16]
)
tables[tag.decode('ascii')] = offset
if loca is not None:
glyf_breakpoints = {
tables['glyf'] + offset for offset in loca.locations[:-1]
}
else:
glyf_breakpoints = set()
breakpoints = sorted(
set(tables.values()) | glyf_breakpoints | {len(fontdata)}
)
return version, breakpoints
|
def _version_and_breakpoints(loca, fontdata):
"""
Read the version number of the font and determine sfnts breakpoints.
When a TrueType font file is written as a Type 42 font, it has to be
broken into substrings of at most 65535 bytes. These substrings must
begin at font table boundaries or glyph boundaries in the glyf table.
This function determines all possible breakpoints and it is the caller's
responsibility to do the splitting.
Helper function for _font_to_ps_type42.
Parameters
----------
loca : fontTools.ttLib._l_o_c_a.table__l_o_c_a or None
The loca table of the font if available
fontdata : bytes
The raw data of the font
Returns
-------
tuple
((v1, v2), breakpoints) where v1 is the major version number,
v2 is the minor version number and breakpoints is a sorted list
of offsets into fontdata; if loca is not available, just the table
boundaries
"""
v1, v2, numTables = struct.unpack('>3h', fontdata[:6])
version = (v1, v2)
tables = {}
for i in range(numTables):
tag, _, offset, _ = struct.unpack(
'>4sIII',
fontdata[12 + i*16:12 + (i+1)*16]
)
tables[tag.decode('ascii')] = offset
if loca is not None:
glyf_breakpoints = {
tables['glyf'] + offset for offset in loca.locations[:-1]
}
else:
glyf_breakpoints = set()
breakpoints = sorted({*tables.values(), *glyf_breakpoints, len(fontdata)})
return version, breakpoints
|
47,026 |
def require_distributed_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
:class:`~transformers.RagRetriever`.
These tests are skipped when respective libraries are not installed.
"""
if not (is_datasets_available() and is_faiss_available() and is_psutil_available()):
test_case = unittest.skip("test requires Datasets, Faiss, " "psutil")(test_case)
return test_case
|
def require_distributed_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
:class:`~transformers.RagRetriever`.
These tests are skipped when respective libraries are not installed.
"""
if not (is_datasets_available() and is_faiss_available() and is_psutil_available()):
test_case = unittest.skip("test requires Datasets, Faiss, psutil")(test_case)
return test_case
|
53,322 |
def uniform_nullpoint_find(
x_range=[0, 1],
y_range=[0, 1],
z_range=[0, 1],
func=(lambda x, y, z: [x, y, z]),
precision=[0.05, 0.05, 0.05],
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_range: array_like
A 1 by 2 array containing the range of x-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
y_range: array_like
A 1 by 2 array containing the range of y-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
z_range: array_like
A 1 by 2 array containing the range of z-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
func: <class 'function'>
A function that takes in 3 arguments, respectively representing a x, y, and z
coordinate of a point and returns the vector value for that point in the form
of a 1 by 3 array.
precision: array_like
A 1 by 3 array containing the approximate precision values for each dimension,
in the case where uniform arrays are being used.
The default value is [0.05, 0.05, 0.05].
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
Notes
----------
This method is described by :cite:t:`haynes:2007`.
"""
vspace = _vector_space(
None,
None,
None,
x_range,
y_range,
z_range,
None,
None,
None,
func,
precision,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
def uniform_nullpoint_find(
x_range=[0, 1],
y_range=[0, 1],
z_range=[0, 1],
func=(lambda x, y, z: [x, y, z]),
precision=[0.05, 0.05, 0.05],
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_range: array_like
A 1 by 2 array containing the range of x-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
y_range: array_like
A 1 by 2 array containing the range of y-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
z_range: array_like
A 1 by 2 array containing the range of z-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
func: <class 'function'>
A function that takes in 3 arguments, respectively representing a x, y, and z
coordinate of a point and returns the vector value for that point in the form
of a 1 by 3 array.
precision: array_like
A 1 by 3 array containing the approximate precision values for each dimension,
in the case where uniform arrays are being used.
The default value is [0.05, 0.05, 0.05].
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of `~plasmapy.analysis.nullpoint.NullPoint` objects representing
the nullpoints of the given vector space.
Notes
----------
This method is described by :cite:t:`haynes:2007`.
"""
vspace = _vector_space(
None,
None,
None,
x_range,
y_range,
z_range,
None,
None,
None,
func,
precision,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
17,324 |
def decode_cf_datetime(num_dates, units, calendar=None, use_cftime=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than cftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
cftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
if use_cftime is None:
try:
dates = _decode_datetime_with_pandas(flat_num_dates, units,
calendar)
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(np.float), units, calendar)
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
if calendar in _STANDARD_CALENDARS:
warnings.warn(
'Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'cftime.datetime objects instead, reason: dates out '
'of range', SerializationWarning, stacklevel=3)
else:
if calendar in _STANDARD_CALENDARS:
dates = cftime_to_nptime(dates)
elif use_cftime:
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(np.float), units, calendar)
else:
dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)
return dates.reshape(num_dates.shape)
|
def decode_cf_datetime(num_dates, units, calendar=None, use_cftime=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than cftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
cftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
if use_cftime is None:
try:
dates = _decode_datetime_with_pandas(flat_num_dates, units,
calendar)
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(np.float), units, calendar)
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
if calendar in _STANDARD_CALENDARS:
warnings.warn(
'Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'cftime.datetime objects instead, reason: dates out '
'of range', SerializationWarning, stacklevel=3)
else:
if calendar in _STANDARD_CALENDARS:
dates = cftime_to_nptime(dates)
elif use_cftime:
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(np.float), units, calendar)
else:
dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)
return dates.reshape(num_dates.shape)
|
28,497 |
def _import_packages(package_helper, filtered_packages, check_function):
"""Test if packages can be imported
Note: using a list of packages instead of a fixture for the list of packages since pytest prevent to use multiple yields
"""
failures = {}
LOGGER.info(f"Testing the import of packages ...")
for package in filtered_packages:
LOGGER.info(f"Trying to import {package}")
try:
assert (
check_function(package_helper, package) == 0
), f"Package [{package}] import failed"
except AssertionError as err:
failures[package] = err
if failures:
raise AssertionError(failures)
|
def _import_packages(package_helper, filtered_packages, check_function):
"""Test if packages can be imported
Note: using a list of packages instead of a fixture for the list of packages since pytest prevents use of multiple yields
"""
failures = {}
LOGGER.info(f"Testing the import of packages ...")
for package in filtered_packages:
LOGGER.info(f"Trying to import {package}")
try:
assert (
check_function(package_helper, package) == 0
), f"Package [{package}] import failed"
except AssertionError as err:
failures[package] = err
if failures:
raise AssertionError(failures)
|
22,238 |
def build_trigger(op):
create_trigger_template = """
CREATE TRIGGER BEFORE_{operation}_DATASET
BEFORE {operation} ON dataset
BEGIN
update history
set update_time = current_timestamp
where id in (
select hda.history_id
from history_dataset_association as hda
where hda.dataset_id = {rowset}.id
);
END;
"""
rs = 'OLD' if op == 'DELETE' else 'NEW'
sql = create_trigger_template.format(operation=op, rowset=rs)
return DDL(sql).execute_if(callable_=not_pg)
|
def build_trigger(op):
create_trigger_template = """
CREATE TRIGGER BEFORE_{operation}_DATASET
AFTER {operation} ON dataset
BEGIN
update history
set update_time = current_timestamp
where id in (
select hda.history_id
from history_dataset_association as hda
where hda.dataset_id = {rowset}.id
);
END;
"""
rs = 'OLD' if op == 'DELETE' else 'NEW'
sql = create_trigger_template.format(operation=op, rowset=rs)
return DDL(sql).execute_if(callable_=not_pg)
|
15,226 |
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada camera."""
if config.get(CONF_STATION):
radar_object = ECRadar(
station_id=config[CONF_STATION], precip_type=config.get(CONF_PRECIP_TYPE)
)
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
radar_object = ECRadar(
coordinates=(lat, lon), precip_type=config.get(CONF_PRECIP_TYPE)
)
add_devices(
[ECCamera(radar_object, config.get(CONF_NAME), config.get(CONF_LOOP))], True
)
|
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada camera."""
if config.get(CONF_STATION):
radar_object = ECRadar(
station_id=config[CONF_STATION], precip_type=config.get(CONF_PRECIP_TYPE)
)
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
radar_object = ECRadar(
coordinates=(lat, lon), precip_type=config.get(CONF_PRECIP_TYPE)
)
add_devices(
[ECCamera(radar_object, config.get(CONF_NAME), config[CONF_LOOP])], True
)
|
5,711 |
def boschloo_exact(table, alternative="two-sided", n=32):
r"""Perform a Boschloo exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes section below.
n : int, optional
Number of sampling points used in the construction of the sampling
method. Note that this argument will automatically be converted to
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
select sample points. Default is 32. Must be positive. In most cases,
32 points is enough to reach good precision. More points comes at
performance cost.
Returns
-------
ber : BoschlooExactResult
A result object with the following attributes.
statistic : float
The Fisher statistic.
pvalue : float
The probability of obtaining a test statistic at least as extreme
as the one observed under the null hypothesis.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
Boschloo's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a uniformly more powerful alternative to Fisher's exact test
for 2x2 contingency tables.
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
Boschloo exact test, we can assert three different null hypotheses :
- :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
with `alternative` = "less"
- :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
with `alternative` = "greater"
- :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
with `alternative` = "two-sided" (default one)
In order to compute Boschloo's exact test, we are using the Fisher's pvalue.
where the sum is over all 2x2 contingency tables :math:`X` such that:
* :math:`T(X) \leq T(X_0)` when `alternative` = "less",
* :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
* :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
and :math:`t` the total (sum of the 4 sample's element).
The returned p-value is the maximum p-value taken over the nuisance
parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
number of sample points.
.. versionadded:: 1.7.0
References
----------
.. [1] Raised conditional level of significance for the 2 × 2‐table when testing
the equality of two probabilities. :doi:`10.1111/j.1467-9574.1970.tb00104.x`
.. [2] "Boschloo's test". *Wikipedia*.
https://en.wikipedia.org/wiki/Boschloo%27s_test
Examples
--------
We reconsider the example developped in `banard_exact` test :
Consider the following example of a vaccine efficacy study
(Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
inoculated with a recombinant DNA influenza vaccine and the 15 were
inoculated with a placebo. Twelve of the 15 subjects in the placebo
group (80%) eventually became infected with influenza whereas for the
vaccine group, only 7 of the 15 subjects (47%) became infected. The
data are tabulated as a 2 x 2 table::
Vaccine Placebo
Yes 7 12
No 8 3
When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.
Our alternative hypothesis is that the vaccine will lower the chance of
becoming infected with the virus; that is, the probability :math:`p_1` of
catching the virus with the vaccine will be *less than* the probability
:math:`p_2` of catching the virus without the vaccine. Therefore, we call
`barnard_exact` with the ``alternative="less"`` option:
>>> import scipy.stats as stats
>>> res = stats.boschloo_exact([[7, 12], [8, 3]], alternative="less")
>>> res.statistic
0.064...
>>> res.pvalue
0.0341...
Under the null hypothesis that the vaccine will not lower the chance of
becoming infected, the probability of obtaining test results at least as
extreme as the observed data is approximately 3.4%. Since this p-value is
less than our chosen significance level, we have evidence to reject
:math:`H_0` in favor of the alternative.
Suppose we had used Fisher's exact test instead:
>>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
>>> pvalue
0.0640...
With the same threshold significance of 5%, we would not have been able
to reject the null hypothesis in favor of the alternative. As stated in
[2]_, Barnard's test is uniformly more powerful than Fisher's exact test
because Barnard's test does not condition on any margin. Fisher's test
should only be used when both sets of marginals are fixed.
"""
fisher_exact = scipy.stats.fisher_exact
hypergeom = distributions.hypergeom
if n <= 0:
raise ValueError(
"Number of points `n` must be strictly positive, "
f"found {n!r}"
)
table = np.asarray(table, dtype=np.int64)
if not table.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(table < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in table.sum(axis=0):
# If both values in column are zero, the p-value is 1 and
# the score's statistic is NaN.
return BoschlooExactResult(np.nan, 1.0)
total_col_1, total_col_2 = table.sum(axis=0)
total = total_col_1 + total_col_2
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
x1_sum_x2 = x1 + x2
if alternative == 'less':
# cdf(k, M, n, N, loc=0)
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
fisher_stat = pvalues[table[0, 0], table[0, 1]]
index_arr = pvalues <= fisher_stat
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
fisher_stat = pvalues[table[0, 0], table[0, 1]]
index_arr = pvalues <= fisher_stat
elif alternative == 'two-sided':
p1, p2 = table[0, 0] / total_col_1, table[0, 1] / total_col_2
if p1 >= p2 :
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
_, fisher_stat = fisher_exact(table)
index_arr = pvalues <= fisher_stat
else:
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
_, fisher_stat = fisher_exact(table)
index_arr = pvalues <= fisher_stat
else:
msg = (
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
f" found {alternative!r}"
)
raise ValueError(msg)
x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
x1_log_comb = _compute_log_combinations(total_col_1)
x2_log_comb = _compute_log_combinations(total_col_2)
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
result = shgo(
_get_binomial_log_p_value_with_nuisance_param,
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
bounds=((0, 1),),
n=n,
sampling_method="sobol",
)
# result.fun is the negative log pvalue and therefore needs to be
# changed before return
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
return BoschlooExactResult(fisher_stat, p_value)
|
def boschloo_exact(table, alternative="two-sided", n=32):
r"""Perform a Boschloo exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes section below.
n : int, optional
Number of sampling points used in the construction of the sampling
method. Note that this argument will automatically be converted to
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
select sample points. Default is 32. Must be positive. In most cases,
32 points is enough to reach good precision. More points comes at
performance cost.
Returns
-------
ber : BoschlooExactResult
A result object with the following attributes.
statistic : float
The Fisher statistic.
pvalue : float
The probability of obtaining a test statistic at least as extreme
as the one observed under the null hypothesis.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
Boschloo's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a uniformly more powerful alternative to Fisher's exact test
for 2x2 contingency tables.
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
Boschloo exact test, we can assert three different null hypotheses :
- :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
with `alternative` = "less"
- :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
with `alternative` = "greater"
- :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
with `alternative` = "two-sided" (default one)
In order to compute Boschloo's exact test, we are using the Fisher's pvalue.
where the sum is over all 2x2 contingency tables :math:`X` such that:
* :math:`T(X) \leq T(X_0)` when `alternative` = "less",
* :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
* :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
and :math:`t` the total (sum of the 4 sample's element).
The returned p-value is the maximum p-value taken over the nuisance
parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
number of sample points.
.. versionadded:: 1.7.0
References
----------
.. [1] Raised conditional level of significance for the 2 × 2‐table when testing
the equality of two probabilities. :doi:`10.1111/j.1467-9574.1970.tb00104.x`
.. [2] "Boschloo's test". *Wikipedia*.
https://en.wikipedia.org/wiki/Boschloo%27s_test
Examples
--------
We reconsider the example developped in `banard_exact` test :
Consider the following example of a vaccine efficacy study
(Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
inoculated with a recombinant DNA influenza vaccine and the 15 were
inoculated with a placebo. Twelve of the 15 subjects in the placebo
group (80%) eventually became infected with influenza whereas for the
vaccine group, only 7 of the 15 subjects (47%) became infected. The
data are tabulated as a 2 x 2 table::
Vaccine Placebo
Yes 7 12
No 8 3
When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.
Our alternative hypothesis is that the vaccine will lower the chance of
becoming infected with the virus; that is, the probability :math:`p_1` of
catching the virus with the vaccine will be *less than* the probability
:math:`p_2` of catching the virus without the vaccine. Therefore, we call
`boschloo_exact` with the ``alternative="less"`` option:
>>> import scipy.stats as stats
>>> res = stats.boschloo_exact([[7, 12], [8, 3]], alternative="less")
>>> res.statistic
0.064...
>>> res.pvalue
0.0341...
Under the null hypothesis that the vaccine will not lower the chance of
becoming infected, the probability of obtaining test results at least as
extreme as the observed data is approximately 3.4%. Since this p-value is
less than our chosen significance level, we have evidence to reject
:math:`H_0` in favor of the alternative.
Suppose we had used Fisher's exact test instead:
>>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
>>> pvalue
0.0640...
With the same threshold significance of 5%, we would not have been able
to reject the null hypothesis in favor of the alternative. As stated in
[2]_, Barnard's test is uniformly more powerful than Fisher's exact test
because Barnard's test does not condition on any margin. Fisher's test
should only be used when both sets of marginals are fixed.
"""
fisher_exact = scipy.stats.fisher_exact
hypergeom = distributions.hypergeom
if n <= 0:
raise ValueError(
"Number of points `n` must be strictly positive, "
f"found {n!r}"
)
table = np.asarray(table, dtype=np.int64)
if not table.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(table < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in table.sum(axis=0):
# If both values in column are zero, the p-value is 1 and
# the score's statistic is NaN.
return BoschlooExactResult(np.nan, 1.0)
total_col_1, total_col_2 = table.sum(axis=0)
total = total_col_1 + total_col_2
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
x1_sum_x2 = x1 + x2
if alternative == 'less':
# cdf(k, M, n, N, loc=0)
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
fisher_stat = pvalues[table[0, 0], table[0, 1]]
index_arr = pvalues <= fisher_stat
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
fisher_stat = pvalues[table[0, 0], table[0, 1]]
index_arr = pvalues <= fisher_stat
elif alternative == 'two-sided':
p1, p2 = table[0, 0] / total_col_1, table[0, 1] / total_col_2
if p1 >= p2 :
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
_, fisher_stat = fisher_exact(table)
index_arr = pvalues <= fisher_stat
else:
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
_, fisher_stat = fisher_exact(table)
index_arr = pvalues <= fisher_stat
else:
msg = (
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
f" found {alternative!r}"
)
raise ValueError(msg)
x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
x1_log_comb = _compute_log_combinations(total_col_1)
x2_log_comb = _compute_log_combinations(total_col_2)
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
result = shgo(
_get_binomial_log_p_value_with_nuisance_param,
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
bounds=((0, 1),),
n=n,
sampling_method="sobol",
)
# result.fun is the negative log pvalue and therefore needs to be
# changed before return
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
return BoschlooExactResult(fisher_stat, p_value)
|
50,040 |
def get_dist_file_from_egg_link(egg_link_file, prefix_path):
"""
Return the egg info file path following an egg link.
"""
egg_info_full_path = None
egg_link_path = join(prefix_path, win_path_ok(egg_link_file))
try:
with open(egg_link_path) as fh:
# See: https://setuptools.readthedocs.io/en/latest/formats.html#egg-links
# "...Each egg-link file should contain a single file or directory name
# with no newlines..."
egg_link_contents = fh.readlines()[0].strip()
except UnicodeDecodeError:
from locale import getpreferredencoding
with open(egg_link_path, encoding=getpreferredencoding()) as fh:
egg_link_contents = fh.readlines()[0].strip()
if lexists(egg_link_contents):
egg_info_fnames = fnmatch_filter(listdir(egg_link_contents), '*.egg-info')
else:
egg_info_fnames = ()
if egg_info_fnames:
if len(egg_info_fnames) != 1:
raise CondaError(
"Expected exactly one `egg-info` directory in '{}', via egg-link '{}'."
" Instead found: {}".format(egg_link_contents, egg_link_file, egg_info_fnames))
egg_info_full_path = join(egg_link_contents, egg_info_fnames[0])
if isdir(egg_info_full_path):
egg_info_full_path = join(egg_info_full_path, "PKG-INFO")
if egg_info_full_path is None:
raise EnvironmentError(ENOENT, strerror(ENOENT), egg_link_contents)
return egg_info_full_path
|
def get_dist_file_from_egg_link(egg_link_file, prefix_path):
"""
Return the egg info file path following an egg link.
"""
egg_info_full_path = None
egg_link_path = join(prefix_path, win_path_ok(egg_link_file))
try:
with open(egg_link_path) as fh:
# See: https://setuptools.readthedocs.io/en/latest/formats.html#egg-links
# "...Each egg-link file should contain a single file or directory name
# with no newlines..."
egg_link_contents = fh.readlines()[0].strip()
except UnicodeDecodeError:
from locale import getpreferredencoding
with open(egg_link_path, encoding=getpreferredencoding()) as fh:
egg_link_contents = fh.readlines()[0].strip()
if lexists(egg_link_contents):
egg_info_fnames = fnmatch_filter(listdir(egg_link_contents), '*.egg-info')
else:
egg_info_fnames = ()
if egg_info_fnames:
if len(egg_info_fnames) != 1:
raise CondaError(
"Expected exactly one `egg-info` directory in '{}', via egg-link '{}'."
" Instead found: {}. These are often left over from "
"legacy operations that did not clean up correctly. Please "
"remove all but one of these.".format(egg_link_contents,
egg_link_file, egg_info_fnames))
egg_info_full_path = join(egg_link_contents, egg_info_fnames[0])
if isdir(egg_info_full_path):
egg_info_full_path = join(egg_info_full_path, "PKG-INFO")
if egg_info_full_path is None:
raise EnvironmentError(ENOENT, strerror(ENOENT), egg_link_contents)
return egg_info_full_path
|
7,324 |
def _validate_feature_type(feature_type):
"""Transform feature type to an iterable and check that it exists."""
if feature_type is None:
feature_type_ = FEATURE_TYPE
else:
if isinstance(feature_type, str):
feature_type_ = [feature_type]
else:
feature_type_ = feature_type
for feat_t in feature_type_:
if feat_t not in FEATURE_TYPE:
raise ValueError(
f'The given feature type is unknown. Got {feat_t} instead of one'
'of {FEATURE_TYPE}.')
return feature_type_
|
def _validate_feature_type(feature_type):
"""Transform feature type to an iterable and check that it exists."""
if feature_type is None:
feature_type_ = FEATURE_TYPE
else:
if isinstance(feature_type, str):
feature_type_ = [feature_type]
else:
feature_type_ = feature_type
for feat_t in feature_type_:
if feat_t not in FEATURE_TYPE:
raise ValueError(
f'The given feature type is unknown. Got {feat_t} instead of one'
f'of {FEATURE_TYPE}.')
return feature_type_
|
35,202 |
def install_lib(cuda, prefix, library):
record = None
lib_records = library_records
for record in lib_records[library]:
if record['cuda'] == cuda:
break
else:
raise RuntimeError('''
The CUDA version specified is not supported.
Should be one of {}.'''.format(str([x['cuda'] for x in lib_records[library]])))
if prefix is None:
prefix = os.path.expanduser('~/.cupy/cuda_lib')
destination = calculate_destination(prefix, cuda, library, record[library])
if os.path.exists(destination):
raise RuntimeError('''
The destination directory {} already exists.
Remove the directory first if you want to reinstall.'''.format(destination))
target_platform = platform.system()
asset = record['assets'].get(target_platform, None)
if asset is None:
raise RuntimeError('''
The current platform ({}) is not supported.'''.format(target_platform))
print('Installing {} {} for CUDA {} to: {}'.format(
library, record[library], record['cuda'], destination))
url = asset['url']
print('Downloading {}...'.format(url))
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, os.path.basename(url)), 'wb') as f:
with urllib.request.urlopen(url) as response:
f.write(response.read())
print('Extracting...')
outdir = os.path.join(tmpdir, 'extract')
shutil.unpack_archive(f.name, outdir)
print('Installing...')
if library == 'cudnn':
shutil.move(os.path.join(outdir, 'cuda'), destination)
elif library == 'cutensor':
if cuda.startswith('11.') and cuda != '11.0':
cuda = '11'
shutil.move(
os.path.join(outdir, 'libcutensor', 'include'),
os.path.join(destination, 'include'))
shutil.move(
os.path.join(outdir, 'libcutensor', 'lib', cuda),
os.path.join(destination, 'lib'))
if cuda == '10.2':
license = 'license.pdf' # v1.2.2
else:
license = 'license.txt' # v1.3.0
shutil.move(
os.path.join(outdir, 'libcutensor', license), destination)
elif library == 'nccl':
subdir = os.listdir(outdir) # ['nccl_2.8.4-1+cuda11.2_x86_64']
assert len(subdir) == 1
shutil.move(os.path.join(outdir, subdir[0]), destination)
else:
assert False
print('Cleaning up...')
print('Done!')
|
def install_lib(cuda, prefix, library):
record = None
lib_records = library_records
for record in lib_records[library]:
if record['cuda'] == cuda:
break
else:
raise RuntimeError('''
The CUDA version specified is not supported.
Should be one of {}.'''.format(str([x['cuda'] for x in lib_records[library]])))
if prefix is None:
prefix = os.path.expanduser('~/.cupy/cuda_lib')
destination = calculate_destination(prefix, cuda, library, record[library])
if os.path.exists(destination):
raise RuntimeError('''
The destination directory {} already exists.
Remove the directory first if you want to reinstall.'''.format(destination))
target_platform = platform.system()
asset = record['assets'].get(target_platform, None)
if asset is None:
raise RuntimeError('''
The current platform ({}) is not supported.'''.format(target_platform))
print('Installing {} {} for CUDA {} to: {}'.format(
library, record[library], record['cuda'], destination))
url = asset['url']
print('Downloading {}...'.format(url))
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, os.path.basename(url)), 'wb') as f:
with urllib.request.urlopen(url) as response:
f.write(response.read())
print('Extracting...')
outdir = os.path.join(tmpdir, 'extract')
shutil.unpack_archive(f.name, outdir)
print('Installing...')
if library == 'cudnn':
shutil.move(os.path.join(outdir, 'cuda'), destination)
elif library == 'cutensor':
if cuda.startswith('11.') and cuda != '11.0':
cuda = '11'
shutil.move(
os.path.join(outdir, 'libcutensor', 'include'),
os.path.join(destination, 'include'))
shutil.move(
os.path.join(outdir, 'libcutensor', 'lib', cuda),
os.path.join(destination, 'lib'))
if cuda == '10.1':
license = 'license.pdf' # v1.2.2
else:
license = 'license.txt' # v1.3.0
shutil.move(
os.path.join(outdir, 'libcutensor', license), destination)
elif library == 'nccl':
subdir = os.listdir(outdir) # ['nccl_2.8.4-1+cuda11.2_x86_64']
assert len(subdir) == 1
shutil.move(os.path.join(outdir, subdir[0]), destination)
else:
assert False
print('Cleaning up...')
print('Done!')
|
45,438 |
def call_progress_bar(result_parts, line_no):
"""
Attach a progress bar to given `result_parts`.
The progress bar is expected to be shown in a Jupyter Notebook cell.
Parameters
----------
result_parts : list of list of ray.ObjectRef
Objects which are being computed for which progress is requested.
line_no : int
Line number in the call stack which we're displaying progress for.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from tqdm.autonotebook import tqdm as tqdm_notebook
except ImportError:
raise ImportError("Please pip install tqdm to use the progress bar")
from IPython import get_ipython
try:
cell_no = get_ipython().execution_count
# This happens if we are not in ipython or jupyter.
# No progress bar is supported in that case.
except AttributeError:
return
pbar_id = str(cell_no) + "-" + str(line_no)
futures = [x.list_of_blocks[0] for row in result_parts for x in row]
bar_format = (
"{l_bar}{bar}{r_bar}"
if "DEBUG_PROGRESS_BAR" in os.environ
and os.environ["DEBUG_PROGRESS_BAR"] == "True"
else "{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}"
)
bar_lock.acquire()
if pbar_id in progress_bars:
if hasattr(progress_bars[pbar_id], "container"):
if hasattr(progress_bars[pbar_id].container.children[0], "max"):
index = 0
else:
index = 1
progress_bars[pbar_id].container.children[index].max = progress_bars[
pbar_id
].container.children[index].max + len(futures)
progress_bars[pbar_id].total = progress_bars[pbar_id].total + len(futures)
progress_bars[pbar_id].refresh()
else:
progress_bars[pbar_id] = tqdm_notebook(
total=len(futures),
desc="Estimated completion of line " + str(line_no),
bar_format=bar_format,
)
bar_lock.release()
threading.Thread(target=_show_time_updates, args=(progress_bars[pbar_id],)).start()
for i in range(1, len(futures) + 1):
ray.wait(futures, num_returns=i)
progress_bars[pbar_id].update(1)
progress_bars[pbar_id].refresh()
if progress_bars[pbar_id].n == progress_bars[pbar_id].total:
progress_bars[pbar_id].close()
|
def call_progress_bar(result_parts, line_no):
"""
Attach a progress bar to given `result_parts`.
The progress bar is expected to be shown in a Jupyter Notebook cell.
Parameters
----------
result_parts : list of list of ray.ObjectRef
Objects which are being computed for which progress is requested.
line_no : int
Line number in the call stack which we're displaying progress for.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from tqdm.autonotebook import tqdm as tqdm_notebook
except ImportError:
raise ImportError("Please pip install tqdm to use the progress bar")
from IPython import get_ipython
try:
cell_no = get_ipython().execution_count
# This happens if we are not in ipython or jupyter.
# No progress bar is supported in that case.
except AttributeError:
return
pbar_id = str(cell_no) + "-" + str(line_no)
futures = [block for row in result_parts for partition in row for block in partition.list_of_blocks]
bar_format = (
"{l_bar}{bar}{r_bar}"
if "DEBUG_PROGRESS_BAR" in os.environ
and os.environ["DEBUG_PROGRESS_BAR"] == "True"
else "{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}"
)
bar_lock.acquire()
if pbar_id in progress_bars:
if hasattr(progress_bars[pbar_id], "container"):
if hasattr(progress_bars[pbar_id].container.children[0], "max"):
index = 0
else:
index = 1
progress_bars[pbar_id].container.children[index].max = progress_bars[
pbar_id
].container.children[index].max + len(futures)
progress_bars[pbar_id].total = progress_bars[pbar_id].total + len(futures)
progress_bars[pbar_id].refresh()
else:
progress_bars[pbar_id] = tqdm_notebook(
total=len(futures),
desc="Estimated completion of line " + str(line_no),
bar_format=bar_format,
)
bar_lock.release()
threading.Thread(target=_show_time_updates, args=(progress_bars[pbar_id],)).start()
for i in range(1, len(futures) + 1):
ray.wait(futures, num_returns=i)
progress_bars[pbar_id].update(1)
progress_bars[pbar_id].refresh()
if progress_bars[pbar_id].n == progress_bars[pbar_id].total:
progress_bars[pbar_id].close()
|
31,088 |
def copy_notes_to_target_incident(args: Dict[str, Any]) -> CommandResults:
target_incident = args.get('target_incident', None)
if not target_incident:
raise ValueError('Target Incident ID not specified')
tags = argToList(args.get('tags'))
entries = demisto.executeCommand('getEntries', {'filter': {'tags': tags}})
note_entries: List = []
md: str = ''
if isinstance(entries, list) and len(entries) > 0:
for n in entries:
if 'Note' in n and n['Note'] is True:
note_entries.append(n)
if len(note_entries) > 0:
demisto.executeCommand("addEntries", {"id": target_incident, "entries": note_entries})
md = f'## {len(note_entries)} notes copied'
else:
md = '## No notes found'
else:
md = '## No notes found'
return CommandResults(readable_output=md)
|
def copy_notes_to_target_incident(args: Dict[str, Any]) -> CommandResults:
target_incident = args.get('target_incident', None)
if not target_incident:
raise ValueError('Target Incident ID not specified')
tags = argToList(args.get('tags'))
entries = demisto.executeCommand('getEntries', {'filter': {'tags': tags}})
note_entries: List = []
md: str = ''
if isinstance(entries, list) and len(entries) > 0:
for n in entries:
if n.get('Note') is True:
note_entries.append(n)
if len(note_entries) > 0:
demisto.executeCommand("addEntries", {"id": target_incident, "entries": note_entries})
md = f'## {len(note_entries)} notes copied'
else:
md = '## No notes found'
else:
md = '## No notes found'
return CommandResults(readable_output=md)
|
56,841 |
def get_domain_for_ucr_table_name(table_name):
if table_name.startswith(UCR_TABLE_PREFIX):
return table_name.split('_')[1]
elif table_name.startswith(LEGACY_UCR_TABLE_PREFIX):
return table_name.split('_')[2]
else:
raise ValueError(f"Expected {table_name} to start with {UCR_TABLE_PREFIX} or {LEGACY_UCR_TABLE_PREFIX}")
|
def get_domain_for_ucr_table_name(table_name):
if ucr_table_name.startswith(UCR_TABLE_PREFIX):
return ucr_table_name[len(UCR_TABLE_PREFIX):]
if table_name.startswith(LEGACY_UCR_TABLE_PREFIX):
return ucr_table_name[len(LEGACY_UCR_TABLE_PREFIX):]
raise ValueError(f"Expected {table_name} to start with {UCR_TABLE_PREFIX} or {LEGACY_UCR_TABLE_PREFIX}")
|
30,619 |
def get_request_args(params):
limit = try_parse_integer(request.args.get('n', params.get('edl_size', 10000)), EDL_LIMIT_ERR_MSG)
offset = try_parse_integer(request.args.get('s', 0), EDL_OFFSET_ERR_MSG)
query = request.args.get('q', params.get('indicators_query'))
strip_port = request.args.get('sp', params.get('url_port_stripping', False))
drop_invalids = request.args.get('di', params.get('drop_invalids', False))
collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
# handle flags
if drop_invalids is not None and drop_invalids == '':
drop_invalids = True
if strip_port is not None and strip_port == '':
strip_port = True
if collapse_ips is not None and collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, EDL_COLLAPSE_ERR_MSG)
if collapse_ips not in [0, 1, 2]:
raise DemistoException(EDL_COLLAPSE_ERR_MSG)
collapse_options = {
0: DONT_COLLAPSE,
1: COLLAPSE_TO_RANGES,
2: COLLAPSE_TO_CIDR
}
collapse_ips = collapse_options[collapse_ips]
return RequestArguments(query, limit, offset, strip_port, drop_invalids, collapse_ips)
|
def get_request_args(params):
limit = try_parse_integer(request.args.get('n', params.get('edl_size', 10000)), EDL_LIMIT_ERR_MSG)
offset = try_parse_integer(request.args.get('s', 0), EDL_OFFSET_ERR_MSG)
query = request.args.get('q', params.get('indicators_query'))
strip_port = request.args.get('sp', params.get('url_port_stripping', False))
drop_invalids = request.args.get('di', params.get('drop_invalids', False))
collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
# handle flags
if drop_invalids == '':
drop_invalids = True
if strip_port is not None and strip_port == '':
strip_port = True
if collapse_ips is not None and collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, EDL_COLLAPSE_ERR_MSG)
if collapse_ips not in [0, 1, 2]:
raise DemistoException(EDL_COLLAPSE_ERR_MSG)
collapse_options = {
0: DONT_COLLAPSE,
1: COLLAPSE_TO_RANGES,
2: COLLAPSE_TO_CIDR
}
collapse_ips = collapse_options[collapse_ips]
return RequestArguments(query, limit, offset, strip_port, drop_invalids, collapse_ips)
|
22,348 |
def set_log_handler(filename=None, stream=sys.stderr):
if filename:
handler = logging.FileHandler(filename)
else:
handler = logging.StreamHandler(stream=stream)
return handler
|
def set_log_handler(filename=None, stream=None):
if filename:
handler = logging.FileHandler(filename)
else:
handler = logging.StreamHandler(stream=stream)
return handler
|
42,624 |
def check_airdrops(
addresses: List[ChecksumEthAddress],
data_dir: Path,
) -> Dict[ChecksumEthAddress, Dict]:
"""Checks airdrop data for the given list of ethereum addresses
May raise:
- RemoteError if the remote request fails
"""
found_data: Dict[ChecksumEthAddress, Dict] = defaultdict(lambda: defaultdict(dict))
for protocol_name, airdrop_data in AIRDROPS.items():
data, csvfile = get_airdrop_data(protocol_name, data_dir)
for row in data:
if len(row) < 2:
raise InvalidData(f'Airdrop for {protocol_name} contains an invalid row {row}')
addr, amount, *_ = row
# not doing to_checksum_address() here since the file addresses are checksummed
# and doing to_checksum_address() so many times hits performance
if protocol_name in ('cornichon', 'tornado', 'grain', 'lido'):
amount = token_normalized_value_decimals(int(amount), 18)
if addr in addresses:
found_data[addr][protocol_name] = {
'amount': str(amount),
'asset': airdrop_data[1],
'link': airdrop_data[2],
}
csvfile.close()
# TODO: fix next line annotation
for protocol_name, airdrop_data in POAP_AIRDROPS.items(): # type: ignore
data_dict = get_poap_airdrop_data(protocol_name, data_dir)
for addr, assets in data_dict.items():
# not doing to_checksum_address() here since the file addresses are checksummed
# and doing to_checksum_address() so many times hits performance
if addr in addresses:
if 'poap' not in found_data[addr]:
found_data[addr]['poap'] = []
found_data[addr]['poap'].append({
'event': protocol_name,
'assets': assets,
'link': airdrop_data[1],
'name': airdrop_data[2],
})
return dict(found_data)
|
def check_airdrops(
addresses: List[ChecksumEthAddress],
data_dir: Path,
) -> Dict[ChecksumEthAddress, Dict]:
"""Checks airdrop data for the given list of ethereum addresses
May raise:
- RemoteError if the remote request fails
"""
found_data: Dict[ChecksumEthAddress, Dict] = defaultdict(lambda: defaultdict(dict))
for protocol_name, airdrop_data in AIRDROPS.items():
data, csvfile = get_airdrop_data(protocol_name, data_dir)
for row in data:
if len(row) < 2:
raise InvalidData(f'Airdrop CSV for {protocol_name} contains an invalid row: {row}')
addr, amount, *_ = row
# not doing to_checksum_address() here since the file addresses are checksummed
# and doing to_checksum_address() so many times hits performance
if protocol_name in ('cornichon', 'tornado', 'grain', 'lido'):
amount = token_normalized_value_decimals(int(amount), 18)
if addr in addresses:
found_data[addr][protocol_name] = {
'amount': str(amount),
'asset': airdrop_data[1],
'link': airdrop_data[2],
}
csvfile.close()
# TODO: fix next line annotation
for protocol_name, airdrop_data in POAP_AIRDROPS.items(): # type: ignore
data_dict = get_poap_airdrop_data(protocol_name, data_dir)
for addr, assets in data_dict.items():
# not doing to_checksum_address() here since the file addresses are checksummed
# and doing to_checksum_address() so many times hits performance
if addr in addresses:
if 'poap' not in found_data[addr]:
found_data[addr]['poap'] = []
found_data[addr]['poap'].append({
'event': protocol_name,
'assets': assets,
'link': airdrop_data[1],
'name': airdrop_data[2],
})
return dict(found_data)
|
14,843 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IO expander devices."""
global _PORT_VALUE
global _I2C_ADDR
global _BUS
invert_logic = config.get(CONF_INVERT_LOGIC)
binary_sensors = []
pins = config.get("pins")
bits = config.get(CONF_BITS)
_I2C_ADDR = config.get(CONF_I2CADDR)
# Make 8-bits (can be 2- or 4-bits, but should always pack in a 8-bit msg)
while bits % 8:
bits += 1
# Increase array size
_PORT_VALUE *= int(bits / 8)
# Set up I2C bus connectivity
_BUS = SMBus(config.get(CONF_I2CBUS))
# Write 1 to all pins to prepaire them for reading
msg = i2c_msg.write(_I2C_ADDR, _PORT_VALUE)
if _BUS:
_BUS.i2c_rdwr(msg)
else:
_LOGGER.error("I2C bus %d not available!!", config.get(CONF_I2CBUS))
for pin_num, pin_name in pins.items():
binary_sensors.append(Pi4ioe5v9BinarySensor(pin_name, pin_num, invert_logic))
add_entities(binary_sensors, True)
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IO expander devices."""
global _PORT_VALUE
global _I2C_ADDR
global _BUS
invert_logic = config[CONF_INVERT_LOGIC]
binary_sensors = []
pins = config.get("pins")
bits = config.get(CONF_BITS)
_I2C_ADDR = config.get(CONF_I2CADDR)
# Make 8-bits (can be 2- or 4-bits, but should always pack in a 8-bit msg)
while bits % 8:
bits += 1
# Increase array size
_PORT_VALUE *= int(bits / 8)
# Set up I2C bus connectivity
_BUS = SMBus(config.get(CONF_I2CBUS))
# Write 1 to all pins to prepaire them for reading
msg = i2c_msg.write(_I2C_ADDR, _PORT_VALUE)
if _BUS:
_BUS.i2c_rdwr(msg)
else:
_LOGGER.error("I2C bus %d not available!!", config.get(CONF_I2CBUS))
for pin_num, pin_name in pins.items():
binary_sensors.append(Pi4ioe5v9BinarySensor(pin_name, pin_num, invert_logic))
add_entities(binary_sensors, True)
|
7,197 |
def regionprops_table(label_image, intensity_image=None,
properties=('label', 'bbox'),
*,
cache=True, separator='-'):
"""Compute image properties and return them as a pandas-compatible table.
The table is a dictionary mapping column names to value arrays. See Notes
section below for details.
Parameters
----------
label_image : (N, M) ndarray
Labeled input image. Labels with value 0 are ignored.
intensity_image : (N, M) ndarray, optional
Intensity (i.e., input) image with same size as labeled image.
Default is None.
properties : tuple or list of str, optional
Properties that will be included in the resulting dictionary
For a list of available properties, please see :func:`regionprops`.
Users should remember to add "label" to keep track of region
identities.
cache : bool, optional
Determine whether to cache calculated properties. The computation is
much faster for cached properties, whereas the memory consumption
increases.
separator : str, optional
For non-scalar properties not listed in OBJECT_COLUMNS, each element
will appear in its own column, with the index of that element separated
from the property name by this separator. For example, the inertia
tensor of a 2D region will appear in four columns:
``inertia_tensor-0-0``, ``inertia_tensor-0-1``, ``inertia_tensor-1-0``,
and ``inertia_tensor-1-1`` (where the separator is ``-``).
Object columns are those that cannot be split in this way because the
number of columns would change depending on the object. For example,
``image`` and ``coords``.
Returns
-------
out_dict : dict
Dictionary mapping property names to an array of values of that
property, one value per region. This dictionary can be used as input to
pandas ``DataFrame`` to map property names to columns in the frame and
regions to rows. If the image has 0 regions, the output will be for a
an ndimensional cube of size 4 with a hypercube in the middle of it.
All arrays will have no elements and parameters which are treated as
arrays will have not elements.
Notes
-----
Each column contains either a scalar property, an object property, or an
element in a multidimensional array.
Properties with scalar values for each region, such as "eccentricity", will
appear as a float or int array with that property name as key.
Multidimensional properties *of fixed size* for a given image dimension,
such as "centroid" (every centroid will have three elements in a 3D image,
no matter the region size), will be split into that many columns, with the
name {property_name}{separator}{element_num} (for 1D properties),
{property_name}{separator}{elem_num0}{separator}{elem_num1} (for 2D
properties), and so on.
For multidimensional properties that don't have a fixed size, such as
"image" (the image of a region varies in size depending on the region
size), an object array will be used, with the corresponding property name
as the key.
Examples
--------
>>> from skimage import data, util, measure
>>> image = data.coins()
>>> label_image = measure.label(image > 110, connectivity=image.ndim)
>>> props = regionprops_table(label_image, image,
... properties=['label', 'inertia_tensor',
... 'inertia_tensor_eigvals'])
>>> props # doctest: +ELLIPSIS +SKIP
{'label': array([ 1, 2, ...]), ...
'inertia_tensor-0-0': array([ 4.012...e+03, 8.51..., ...]), ...
...,
'inertia_tensor_eigvals-1': array([ 2.67...e+02, 2.83..., ...])}
The resulting dictionary can be directly passed to pandas, if installed, to
obtain a clean DataFrame:
>>> import pandas as pd # doctest: +SKIP
>>> data = pd.DataFrame(props) # doctest: +SKIP
>>> data.head() # doctest: +SKIP
label inertia_tensor-0-0 ... inertia_tensor_eigvals-1
0 1 4012.909888 ... 267.065503
1 2 8.514739 ... 2.834806
2 3 0.666667 ... 0.000000
3 4 0.000000 ... 0.000000
4 5 0.222222 ... 0.111111
[5 rows x 7 columns]
"""
regions = regionprops(label_image, intensity_image=intensity_image,
cache=cache)
if len(regions) == 0:
label_image = np.zeros(tuple([3] * len(label_image.shape)), dtype=int)
label_image[tuple([1] * len(label_image.shape))] = 1
if intensity_image is not None:
intensity_image = label_image.copy()
regions = regionprops(label_image, intensity_image=intensity_image,
cache=cache)
out = {}
for prop in properties:
if np.isscalar(regions[0][prop]) or prop in OBJECT_COLUMNS:
out[prop] = np.empty(shape=(0,), dtype=COL_DTYPES[prop])
return out
return _props_to_dict(regions, properties=properties, separator=separator)
|
def regionprops_table(label_image, intensity_image=None,
properties=('label', 'bbox'),
*,
cache=True, separator='-'):
"""Compute image properties and return them as a pandas-compatible table.
The table is a dictionary mapping column names to value arrays. See Notes
section below for details.
Parameters
----------
label_image : (N, M) ndarray
Labeled input image. Labels with value 0 are ignored.
intensity_image : (N, M) ndarray, optional
Intensity (i.e., input) image with same size as labeled image.
Default is None.
properties : tuple or list of str, optional
Properties that will be included in the resulting dictionary
For a list of available properties, please see :func:`regionprops`.
Users should remember to add "label" to keep track of region
identities.
cache : bool, optional
Determine whether to cache calculated properties. The computation is
much faster for cached properties, whereas the memory consumption
increases.
separator : str, optional
For non-scalar properties not listed in OBJECT_COLUMNS, each element
will appear in its own column, with the index of that element separated
from the property name by this separator. For example, the inertia
tensor of a 2D region will appear in four columns:
``inertia_tensor-0-0``, ``inertia_tensor-0-1``, ``inertia_tensor-1-0``,
and ``inertia_tensor-1-1`` (where the separator is ``-``).
Object columns are those that cannot be split in this way because the
number of columns would change depending on the object. For example,
``image`` and ``coords``.
Returns
-------
out_dict : dict
Dictionary mapping property names to an array of values of that
property, one value per region. This dictionary can be used as input to
pandas ``DataFrame`` to map property names to columns in the frame and
regions to rows. If the image has 0 regions, the output will be for a
an ndimensional cube of size 4 with a hypercube in the middle of it.
All arrays will have no elements and parameters which are treated as
arrays will have not elements.
Notes
-----
Each column contains either a scalar property, an object property, or an
element in a multidimensional array.
Properties with scalar values for each region, such as "eccentricity", will
appear as a float or int array with that property name as key.
Multidimensional properties *of fixed size* for a given image dimension,
such as "centroid" (every centroid will have three elements in a 3D image,
no matter the region size), will be split into that many columns, with the
name {property_name}{separator}{element_num} (for 1D properties),
{property_name}{separator}{elem_num0}{separator}{elem_num1} (for 2D
properties), and so on.
For multidimensional properties that don't have a fixed size, such as
"image" (the image of a region varies in size depending on the region
size), an object array will be used, with the corresponding property name
as the key.
Examples
--------
>>> from skimage import data, util, measure
>>> image = data.coins()
>>> label_image = measure.label(image > 110, connectivity=image.ndim)
>>> props = regionprops_table(label_image, image,
... properties=['label', 'inertia_tensor',
... 'inertia_tensor_eigvals'])
>>> props # doctest: +ELLIPSIS +SKIP
{'label': array([ 1, 2, ...]), ...
'inertia_tensor-0-0': array([ 4.012...e+03, 8.51..., ...]), ...
...,
'inertia_tensor_eigvals-1': array([ 2.67...e+02, 2.83..., ...])}
The resulting dictionary can be directly passed to pandas, if installed, to
obtain a clean DataFrame:
>>> import pandas as pd # doctest: +SKIP
>>> data = pd.DataFrame(props) # doctest: +SKIP
>>> data.head() # doctest: +SKIP
label inertia_tensor-0-0 ... inertia_tensor_eigvals-1
0 1 4012.909888 ... 267.065503
1 2 8.514739 ... 2.834806
2 3 0.666667 ... 0.000000
3 4 0.000000 ... 0.000000
4 5 0.222222 ... 0.111111
[5 rows x 7 columns]
"""
regions = regionprops(label_image, intensity_image=intensity_image,
cache=cache)
if len(regions) == 0:
label_image = np.zeros(tuple([3] * len(label_image.shape)), dtype=int)
label_image[tuple([1] * len(label_image.shape))] = 1
if intensity_image is not None:
intensity_image = np.zeros(label_image.shape, dtype=intensity_image.dtype)
regions = regionprops(label_image, intensity_image=intensity_image,
cache=cache)
out = {}
for prop in properties:
if np.isscalar(regions[0][prop]) or prop in OBJECT_COLUMNS:
out[prop] = np.empty(shape=(0,), dtype=COL_DTYPES[prop])
return out
return _props_to_dict(regions, properties=properties, separator=separator)
|
28,576 |
def plot_forest(
data,
kind="forestplot",
model_names=None,
var_names=None,
filter_vars=None,
transform=None,
coords=None,
combined=False,
hdi_prob=None,
rope=None,
quartiles=True,
ess=False,
r_hat=False,
colors="cycle",
textsize=None,
linewidth=None,
markersize=None,
legend=True,
labeller=None,
ridgeplot_alpha=None,
ridgeplot_overlap=2,
ridgeplot_kind="auto",
ridgeplot_truncate=True,
ridgeplot_quantiles=None,
figsize=None,
ax=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""Forest plot to compare HDI intervals from a number of distributions.
Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.
Parameters
----------
data: obj or list[obj]
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
kind: str
Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
model_names: list[str], optional
List with names for the models in the list of data. Useful when plotting more that one
dataset.
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all variables plotted)
Prefix the variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like", interpret
var_names as substrings of the real variables names. If "regex", interpret var_names as
regular expressions on the real variables names. A la ``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e.the identity function)
coords: dict, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`.
combined: bool
Flag for combining multiple chains into a single chain. If ``False``(default), chains will
be plotted separately.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density.
Defaults to `0.94`.
rope: tuple or dictionary of tuples
Lower and upper values of the Region Of Practical Equivalence. If a list with one interval
only is provided, the **rope** will be displayed across the y-axis. If more than one
interval is provided the length of the list should match the number of variables.
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the hdi_prob intervals.
Defaults to ``True``.
r_hat: bool, optional
Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
ess: bool, optional
Flag for plotting the effective sample size. Defaults to ``False``.
colors: list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the matplotlibs
cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all
models. Defaults to 'cycle'.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
linewidth: int
Line width throughout. If None it will be autoscaled based on ``figsize``.
markersize: int
Markersize throughout. If None it will be autoscaled based on ``figsize``.
legend : bool, optional
Show a legend with the color encoded model information.
Defaults to ``True`` if there are multiple models
labeller : labeller instance, optional
Class providing the method ``make_model_label`` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ridgeplot_alpha: float
Transparency for ridgeplot fill. If **0**, border is colored by model, otherwise
a `black` outline is used.
ridgeplot_overlap: float
Overlap height for ridgeplots.
ridgeplot_kind: string
By default ("auto") continuous variables are plotted using KDEs and discrete ones using
histograms. To override this use "hist" to plot histograms and "density" for KDEs
ridgeplot_truncate: bool
Whether to truncate densities according to the value of hdi_prop. Defaults to ``True``.
ridgeplot_quantiles: list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to ``None``.
figsize: tuple
Figure size. If ``None``, it will be defined automatically.
ax: axes, optional
:class:`matplotlib.axes` or :class:`bokeh.plotting.figure`.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``.
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
gridspec: matplotlib GridSpec or bokeh figures
See Also
--------
plot_posterior: Plot Posterior densities in the style of John K. Kruschke’s book.
plot_density: Generate KDE plots for continuous variables and histograms for discrete ones.
Examples
--------
Forestpĺot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered_data = az.load_arviz_data('non_centered_eight')
>>> axes = az.plot_forest(non_centered_data,
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Forestplot with multiple datasets
.. plot::
:context: close-figs
>>> centered_data = az.load_arviz_data('centered_eight')
>>> axes = az.plot_forest([non_centered_data, centered_data],
>>> model_names = ["non centered eight", "centered eight"],
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools models')
Forestpĺot with ropes
.. plot::
:context: close-figs
>>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}
>>> axes = az.plot_forest(non_centered_data,
>>> rope=rope,
>>> var_names='~tau',
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_overlap=3,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot non-truncated and with quantiles
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_truncate=False,
>>> ridgeplot_quantiles=[.25, .5, .75],
>>> ridgeplot_overlap=0.7,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
"""
if not isinstance(data, (list, tuple)):
data = [data]
if len(data) == 1:
legend = False
if coords is None:
coords = {}
if labeller is None:
labeller = NoModelLabeller() if legend else BaseLabeller()
datasets = [convert_to_dataset(datum) for datum in reversed(data)]
if transform is not None:
datasets = [transform(dataset) for dataset in datasets]
datasets = get_coords(
datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords
)
var_names = _var_names(var_names, datasets, filter_vars)
ncols, width_ratios = 1, [3]
if ess:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
plot_forest_kwargs = dict(
ax=ax,
datasets=datasets,
var_names=var_names,
model_names=model_names,
combined=combined,
colors=colors,
figsize=figsize,
width_ratios=width_ratios,
linewidth=linewidth,
markersize=markersize,
kind=kind,
ncols=ncols,
hdi_prob=hdi_prob,
quartiles=quartiles,
rope=rope,
ridgeplot_overlap=ridgeplot_overlap,
ridgeplot_alpha=ridgeplot_alpha,
ridgeplot_kind=ridgeplot_kind,
ridgeplot_truncate=ridgeplot_truncate,
ridgeplot_quantiles=ridgeplot_quantiles,
textsize=textsize,
legend=legend,
labeller=labeller,
ess=ess,
r_hat=r_hat,
backend_kwargs=backend_kwargs,
backend_config=backend_config,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_forest", "forestplot", backend)
axes = plot(**plot_forest_kwargs)
return axes
|
def plot_forest(
data,
kind="forestplot",
model_names=None,
var_names=None,
filter_vars=None,
transform=None,
coords=None,
combined=False,
hdi_prob=None,
rope=None,
quartiles=True,
ess=False,
r_hat=False,
colors="cycle",
textsize=None,
linewidth=None,
markersize=None,
legend=True,
labeller=None,
ridgeplot_alpha=None,
ridgeplot_overlap=2,
ridgeplot_kind="auto",
ridgeplot_truncate=True,
ridgeplot_quantiles=None,
figsize=None,
ax=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""Forest plot to compare HDI intervals from a number of distributions.
Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.
Parameters
----------
data: obj or list[obj]
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
kind: str
Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
model_names: list[str], optional
List with names for the models in the list of data. Useful when plotting more that one
dataset.
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all variables plotted)
Prefix the variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like", interpret
var_names as substrings of the real variables names. If "regex", interpret var_names as
regular expressions on the real variables names. A la ``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e.the identity function)
coords: dict, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`.
combined: bool
Flag for combining multiple chains into a single chain. If ``False``(default), chains will
be plotted separately.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density.
Defaults to `0.94`.
rope: tuple or dictionary of tuples
Lower and upper values of the Region Of Practical Equivalence. If a list with one interval
only is provided, the **rope** will be displayed across the y-axis. If more than one
interval is provided the length of the list should match the number of variables.
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the ``hdi_prob`` intervals.
Defaults to ``True``.
r_hat: bool, optional
Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
ess: bool, optional
Flag for plotting the effective sample size. Defaults to ``False``.
colors: list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the matplotlibs
cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all
models. Defaults to 'cycle'.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
linewidth: int
Line width throughout. If None it will be autoscaled based on ``figsize``.
markersize: int
Markersize throughout. If None it will be autoscaled based on ``figsize``.
legend : bool, optional
Show a legend with the color encoded model information.
Defaults to ``True`` if there are multiple models
labeller : labeller instance, optional
Class providing the method ``make_model_label`` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ridgeplot_alpha: float
Transparency for ridgeplot fill. If **0**, border is colored by model, otherwise
a `black` outline is used.
ridgeplot_overlap: float
Overlap height for ridgeplots.
ridgeplot_kind: string
By default ("auto") continuous variables are plotted using KDEs and discrete ones using
histograms. To override this use "hist" to plot histograms and "density" for KDEs
ridgeplot_truncate: bool
Whether to truncate densities according to the value of hdi_prop. Defaults to ``True``.
ridgeplot_quantiles: list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to ``None``.
figsize: tuple
Figure size. If ``None``, it will be defined automatically.
ax: axes, optional
:class:`matplotlib.axes` or :class:`bokeh.plotting.figure`.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``.
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
gridspec: matplotlib GridSpec or bokeh figures
See Also
--------
plot_posterior: Plot Posterior densities in the style of John K. Kruschke’s book.
plot_density: Generate KDE plots for continuous variables and histograms for discrete ones.
Examples
--------
Forestpĺot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered_data = az.load_arviz_data('non_centered_eight')
>>> axes = az.plot_forest(non_centered_data,
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Forestplot with multiple datasets
.. plot::
:context: close-figs
>>> centered_data = az.load_arviz_data('centered_eight')
>>> axes = az.plot_forest([non_centered_data, centered_data],
>>> model_names = ["non centered eight", "centered eight"],
>>> kind='forestplot',
>>> var_names=["^the"],
>>> filter_vars="regex",
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools models')
Forestpĺot with ropes
.. plot::
:context: close-figs
>>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}
>>> axes = az.plot_forest(non_centered_data,
>>> rope=rope,
>>> var_names='~tau',
>>> combined=True,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_overlap=3,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot non-truncated and with quantiles
.. plot::
:context: close-figs
>>> axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_truncate=False,
>>> ridgeplot_quantiles=[.25, .5, .75],
>>> ridgeplot_overlap=0.7,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
"""
if not isinstance(data, (list, tuple)):
data = [data]
if len(data) == 1:
legend = False
if coords is None:
coords = {}
if labeller is None:
labeller = NoModelLabeller() if legend else BaseLabeller()
datasets = [convert_to_dataset(datum) for datum in reversed(data)]
if transform is not None:
datasets = [transform(dataset) for dataset in datasets]
datasets = get_coords(
datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords
)
var_names = _var_names(var_names, datasets, filter_vars)
ncols, width_ratios = 1, [3]
if ess:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
plot_forest_kwargs = dict(
ax=ax,
datasets=datasets,
var_names=var_names,
model_names=model_names,
combined=combined,
colors=colors,
figsize=figsize,
width_ratios=width_ratios,
linewidth=linewidth,
markersize=markersize,
kind=kind,
ncols=ncols,
hdi_prob=hdi_prob,
quartiles=quartiles,
rope=rope,
ridgeplot_overlap=ridgeplot_overlap,
ridgeplot_alpha=ridgeplot_alpha,
ridgeplot_kind=ridgeplot_kind,
ridgeplot_truncate=ridgeplot_truncate,
ridgeplot_quantiles=ridgeplot_quantiles,
textsize=textsize,
legend=legend,
labeller=labeller,
ess=ess,
r_hat=r_hat,
backend_kwargs=backend_kwargs,
backend_config=backend_config,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_forest", "forestplot", backend)
axes = plot(**plot_forest_kwargs)
return axes
|
7,823 |
def test_photodat_only(tmpdir):
endf_data = os.environ['OPENMC_ENDF_DATA']
filename = str(tmpdir.join('tmp.h5'))
p_file = 'photoat-{:03}_{}_000.endf'.format(1, 'H')
p_path = os.path.join(endf_data, 'photoat', p_file)
data=openmc.data.IncidentPhoton.from_endf(p_path)
data.export_to_hdf5(filename, 'w')
|
def test_photoatomic_data_only(run_in_tmpdir):
endf_data = os.environ['OPENMC_ENDF_DATA']
filename = str(tmpdir.join('tmp.h5'))
p_file = 'photoat-{:03}_{}_000.endf'.format(1, 'H')
p_path = os.path.join(endf_data, 'photoat', p_file)
data=openmc.data.IncidentPhoton.from_endf(p_path)
data.export_to_hdf5(filename, 'w')
|
47,402 |
def processor_class_from_name(class_name: str):
for module_name, processors in PROCESSOR_MAPPING_NAMES.items():
if class_name in processors:
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(f".{module_name}", "transformers.models")
return getattr(module, class_name)
for _, processor in PROCESSOR_MAPPING._extra_content.items():
if getattr(processor, "__name__", None) == class_name:
return processor
return None
|
def processor_class_from_name(class_name: str):
for module_name, processors in PROCESSOR_MAPPING_NAMES.items():
if class_name in processors:
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(f".{module_name}", "transformers.models")
return getattr(module, class_name)
for processor in PROCESSOR_MAPPING._extra_content.values():
if getattr(processor, "__name__", None) == class_name:
return processor
return None
|
25,553 |
def exclude_routes_from_channels(
route_states: List[RouteState], channel_ids: List[ChannelID]
) -> List[RouteState]:
""" Keeps only routes whose forward_channel is not in the given list. """
return [route for route in route_states if route.forward_channel_id not in channel_ids]
|
def filter_routes_by_channel_ids(
route_states: List[RouteState], channel_ids: List[ChannelID]
) -> List[RouteState]:
""" Keeps only routes whose forward_channel is not in the given list. """
return [route for route in route_states if route.forward_channel_id not in channel_ids]
|
42,695 |
def test_eth2_equivalent_eth_balances(database: DBHandler):
"""Test that the balances of ETH and ETH2 are same."""
balances = [
DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488326400),
asset=A_ETH,
amount='10',
usd_value='4517.4',
),
DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488326400),
asset=A_ETH2,
amount='10',
usd_value='4517.4',
),
]
database.add_multiple_balances(balances)
# Test query_timed_balances for ETh
with patch(
'rotkehlchen.db.dbhandler.DBHandler.get_settings',
return_value=DBSettings(eth_equivalent_eth2=True),
):
eth_balances = database.query_timed_balances(A_ETH)
assert len(eth_balances) == 2
eth2_balances = database.query_timed_balances(A_ETH2)
assert len(eth2_balances) == 1
eth_balances = database.query_timed_balances(A_ETH)
assert len(eth_balances) == 1
|
def test_eth2_equivalent_eth_time_balances(database: DBHandler):
"""Test that the balances of ETH and ETH2 are same."""
balances = [
DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488326400),
asset=A_ETH,
amount='10',
usd_value='4517.4',
),
DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488326400),
asset=A_ETH2,
amount='10',
usd_value='4517.4',
),
]
database.add_multiple_balances(balances)
# Test query_timed_balances for ETh
with patch(
'rotkehlchen.db.dbhandler.DBHandler.get_settings',
return_value=DBSettings(eth_equivalent_eth2=True),
):
eth_balances = database.query_timed_balances(A_ETH)
assert len(eth_balances) == 2
eth2_balances = database.query_timed_balances(A_ETH2)
assert len(eth2_balances) == 1
eth_balances = database.query_timed_balances(A_ETH)
assert len(eth_balances) == 1
|
42,964 |
def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
|
def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of each GBS sample in units of :math:`\text{cm}^{-1}`.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
|
7,424 |
def unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True,
clip=True, *, random_state=None):
"""Unsupervised Wiener-Hunt deconvolution.
Return the deconvolution with a Wiener-Hunt approach, where the
hyperparameters are automatically estimated. The algorithm is a
stochastic iterative process (Gibbs sampler) described in the
reference below. See also ``wiener`` function.
Parameters
----------
image : (M, N) ndarray
The input degraded image.
psf : ndarray
The impulse response (input image's space) or the transfer
function (Fourier space). Both are accepted. The transfer
function is automatically recognized as being complex
(``np.iscomplexobj(psf)``).
reg : ndarray, optional
The regularisation operator. The Laplacian by default. It can
be an impulse response or a transfer function, as for the psf.
user_params : dict, optional
Dictionary of parameters for the Gibbs sampler. See below.
clip : boolean, optional
True by default. If true, pixel values of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is None the `numpy.random.Generator` singleton is
used.
If `random_state` is an int, a new ``Generator`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` instance then that
instance is used.
.. versionadded:: 0.19
Returns
-------
x_postmean : (M, N) ndarray
The deconvolved image (the posterior mean).
chains : dict
The keys ``noise`` and ``prior`` contain the chain list of
noise and prior precision respectively.
Other parameters
----------------
The keys of ``user_params`` are:
threshold : float
The stopping criterion: the norm of the difference between to
successive approximated solution (empirical mean of object
samples, see Notes section). 1e-4 by default.
burnin : int
The number of sample to ignore to start computation of the
mean. 15 by default.
min_num_iter : int
The minimum number of iterations. 30 by default.
max_num_iter : int
The maximum number of iterations if ``threshold`` is not
satisfied. 200 by default.
callback : callable (None by default)
A user provided callable to which is passed, if the function
exists, the current image sample for whatever purpose. The user
can store the sample, or compute other moments than the
mean. It has no influence on the algorithm execution and is
only for inspection.
Examples
--------
>>> from skimage import color, data, restoration
>>> img = color.rgb2gray(data.astronaut())
>>> from scipy.signal import convolve2d
>>> psf = np.ones((5, 5)) / 25
>>> img = convolve2d(img, psf, 'same')
>>> rng = np.random.default_rng()
>>> img += 0.1 * img.std() * rng.standard_normal(img.shape)
>>> deconvolved_img = restoration.unsupervised_wiener(img, psf)
Notes
-----
The estimated image is design as the posterior mean of a
probability law (from a Bayesian analysis). The mean is defined as
a sum over all the possible images weighted by their respective
probability. Given the size of the problem, the exact sum is not
tractable. This algorithm use of MCMC to draw image under the
posterior law. The practical idea is to only draw highly probable
images since they have the biggest contribution to the mean. At the
opposite, the less probable images are drawn less often since
their contribution is low. Finally, the empirical mean of these
samples give us an estimation of the mean, and an exact
computation with an infinite sample set.
References
----------
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593
https://hal.archives-ouvertes.fr/hal-00674508/document
"""
if user_params is not None:
for s in ('max', 'min'):
if (s + '_iter') in user_params:
warning_msg = (
f'`{s}_iter` is a deprecated key for `user_params`. '
f'It will be removed in version 1.0. '
f'Use `{s}_num_iter` instead.'
)
warnings.warn(warning_msg, FutureWarning)
user_params[s + '_num_iter'] = user_params.pop(s + '_iter')
params = {'threshold': 1e-4, 'max_num_iter': 200,
'min_num_iter': 30, 'burnin': 15, 'callback': None}
params.update(user_params or {})
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not np.iscomplexobj(reg):
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
float_type = _supported_float_type(image.dtype)
image = image.astype(float_type, copy=False)
psf = psf.real.astype(float_type, copy=False)
reg = reg.real.astype(float_type, copy=False)
if psf.shape != reg.shape:
trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)
else:
trans_fct = psf
# The mean of the object
x_postmean = np.zeros(trans_fct.shape, dtype=float_type)
# The previous computed mean in the iterative loop
prev_x_postmean = np.zeros(trans_fct.shape, dtype=float_type)
# Difference between two successive mean
delta = np.NAN
# Initial state of the chain
gn_chain, gx_chain = [1], [1]
# The correlation of the object in Fourier space (if size is big,
# this can reduce computation time in the loop)
areg2 = np.abs(reg) ** 2
atf2 = np.abs(trans_fct) ** 2
# The Fourier transform may change the image.size attribute, so we
# store it.
if is_real:
data_spectrum = uft.urfft2(image)
else:
data_spectrum = uft.ufft2(image)
rng = np.random.default_rng(random_state)
# Gibbs sampling
for iteration in range(params['max_num_iter']):
# Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).
# weighting (correlation in direct space)
precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2 # Eq. 29
# Note: Use astype instead of dtype argument to standard_normal to get
# similar random values across precisions, as needed for
# reference data used by test_unsupervised_wiener.
_rand1 = rng.standard_normal(data_spectrum.shape)
_rand1 = _rand1.astype(float_type, copy=False)
_rand2 = rng.standard_normal(data_spectrum.shape)
_rand2 = _rand2.astype(float_type, copy=False)
excursion = np.sqrt(0.5 / precision) * (_rand1 + 1j * _rand2)
# mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)
wiener_filter = gn_chain[-1] * np.conj(trans_fct) / precision
# sample of X in Fourier space
x_sample = wiener_filter * data_spectrum + excursion
if params['callback']:
params['callback'](x_sample)
# sample of Eq. 31 p(gn | x^k, gx^k, y)
gn_chain.append(rng.gamma(image.size / 2,
2 / uft.image_quad_norm(data_spectrum
- x_sample
* trans_fct)))
# sample of Eq. 31 p(gx | x^k, gn^k-1, y)
gx_chain.append(rng.gamma((image.size - 1) / 2,
2 / uft.image_quad_norm(x_sample * reg)))
# current empirical average
if iteration > params['burnin']:
x_postmean = prev_x_postmean + x_sample
if iteration > (params['burnin'] + 1):
current = x_postmean / (iteration - params['burnin'])
previous = prev_x_postmean / (iteration - params['burnin'] - 1)
delta = (np.sum(np.abs(current - previous))
/ np.sum(np.abs(x_postmean))
/ (iteration - params['burnin']))
prev_x_postmean = x_postmean
# stop of the algorithm
if (
(iteration > params['min_num_iter'])
and (delta < params['threshold'])
):
break
# Empirical average \approx POSTMEAN Eq. 44
x_postmean = x_postmean / (iteration - params['burnin'])
if is_real:
x_postmean = uft.uirfft2(x_postmean, shape=image.shape)
else:
x_postmean = uft.uifft2(x_postmean)
if clip:
x_postmean[x_postmean > 1] = 1
x_postmean[x_postmean < -1] = -1
return (x_postmean, {'noise': gn_chain, 'prior': gx_chain})
|
def unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True,
clip=True, *, random_state=None):
"""Unsupervised Wiener-Hunt deconvolution.
Return the deconvolution with a Wiener-Hunt approach, where the
hyperparameters are automatically estimated. The algorithm is a
stochastic iterative process (Gibbs sampler) described in the
reference below. See also ``wiener`` function.
Parameters
----------
image : (M, N) ndarray
The input degraded image.
psf : ndarray
The impulse response (input image's space) or the transfer
function (Fourier space). Both are accepted. The transfer
function is automatically recognized as being complex
(``np.iscomplexobj(psf)``).
reg : ndarray, optional
The regularisation operator. The Laplacian by default. It can
be an impulse response or a transfer function, as for the psf.
user_params : dict, optional
Dictionary of parameters for the Gibbs sampler. See below.
clip : boolean, optional
True by default. If true, pixel values of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is None the `numpy.random.Generator` singleton is
used.
If `random_state` is an int, a new ``Generator`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` instance then that
instance is used.
.. versionadded:: 0.19
Returns
-------
x_postmean : (M, N) ndarray
The deconvolved image (the posterior mean).
chains : dict
The keys ``noise`` and ``prior`` contain the chain list of
noise and prior precision respectively.
Other parameters
----------------
The keys of ``user_params`` are:
threshold : float
The stopping criterion: the norm of the difference between to
successive approximated solution (empirical mean of object
samples, see Notes section). 1e-4 by default.
burnin : int
The number of sample to ignore to start computation of the
mean. 15 by default.
min_num_iter : int
The minimum number of iterations. 30 by default.
max_num_iter : int
The maximum number of iterations if ``threshold`` is not
satisfied. 200 by default.
callback : callable (None by default)
A user provided callable to which is passed, if the function
exists, the current image sample for whatever purpose. The user
can store the sample, or compute other moments than the
mean. It has no influence on the algorithm execution and is
only for inspection.
Examples
--------
>>> from skimage import color, data, restoration
>>> img = color.rgb2gray(data.astronaut())
>>> from scipy.signal import convolve2d
>>> psf = np.ones((5, 5)) / 25
>>> img = convolve2d(img, psf, 'same')
>>> rng = np.random.default_rng()
>>> img += 0.1 * img.std() * rng.standard_normal(img.shape)
>>> deconvolved_img = restoration.unsupervised_wiener(img, psf)
Notes
-----
The estimated image is design as the posterior mean of a
probability law (from a Bayesian analysis). The mean is defined as
a sum over all the possible images weighted by their respective
probability. Given the size of the problem, the exact sum is not
tractable. This algorithm use of MCMC to draw image under the
posterior law. The practical idea is to only draw highly probable
images since they have the biggest contribution to the mean. At the
opposite, the less probable images are drawn less often since
their contribution is low. Finally, the empirical mean of these
samples give us an estimation of the mean, and an exact
computation with an infinite sample set.
References
----------
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593
https://hal.archives-ouvertes.fr/hal-00674508
"""
if user_params is not None:
for s in ('max', 'min'):
if (s + '_iter') in user_params:
warning_msg = (
f'`{s}_iter` is a deprecated key for `user_params`. '
f'It will be removed in version 1.0. '
f'Use `{s}_num_iter` instead.'
)
warnings.warn(warning_msg, FutureWarning)
user_params[s + '_num_iter'] = user_params.pop(s + '_iter')
params = {'threshold': 1e-4, 'max_num_iter': 200,
'min_num_iter': 30, 'burnin': 15, 'callback': None}
params.update(user_params or {})
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not np.iscomplexobj(reg):
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
float_type = _supported_float_type(image.dtype)
image = image.astype(float_type, copy=False)
psf = psf.real.astype(float_type, copy=False)
reg = reg.real.astype(float_type, copy=False)
if psf.shape != reg.shape:
trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)
else:
trans_fct = psf
# The mean of the object
x_postmean = np.zeros(trans_fct.shape, dtype=float_type)
# The previous computed mean in the iterative loop
prev_x_postmean = np.zeros(trans_fct.shape, dtype=float_type)
# Difference between two successive mean
delta = np.NAN
# Initial state of the chain
gn_chain, gx_chain = [1], [1]
# The correlation of the object in Fourier space (if size is big,
# this can reduce computation time in the loop)
areg2 = np.abs(reg) ** 2
atf2 = np.abs(trans_fct) ** 2
# The Fourier transform may change the image.size attribute, so we
# store it.
if is_real:
data_spectrum = uft.urfft2(image)
else:
data_spectrum = uft.ufft2(image)
rng = np.random.default_rng(random_state)
# Gibbs sampling
for iteration in range(params['max_num_iter']):
# Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).
# weighting (correlation in direct space)
precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2 # Eq. 29
# Note: Use astype instead of dtype argument to standard_normal to get
# similar random values across precisions, as needed for
# reference data used by test_unsupervised_wiener.
_rand1 = rng.standard_normal(data_spectrum.shape)
_rand1 = _rand1.astype(float_type, copy=False)
_rand2 = rng.standard_normal(data_spectrum.shape)
_rand2 = _rand2.astype(float_type, copy=False)
excursion = np.sqrt(0.5 / precision) * (_rand1 + 1j * _rand2)
# mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)
wiener_filter = gn_chain[-1] * np.conj(trans_fct) / precision
# sample of X in Fourier space
x_sample = wiener_filter * data_spectrum + excursion
if params['callback']:
params['callback'](x_sample)
# sample of Eq. 31 p(gn | x^k, gx^k, y)
gn_chain.append(rng.gamma(image.size / 2,
2 / uft.image_quad_norm(data_spectrum
- x_sample
* trans_fct)))
# sample of Eq. 31 p(gx | x^k, gn^k-1, y)
gx_chain.append(rng.gamma((image.size - 1) / 2,
2 / uft.image_quad_norm(x_sample * reg)))
# current empirical average
if iteration > params['burnin']:
x_postmean = prev_x_postmean + x_sample
if iteration > (params['burnin'] + 1):
current = x_postmean / (iteration - params['burnin'])
previous = prev_x_postmean / (iteration - params['burnin'] - 1)
delta = (np.sum(np.abs(current - previous))
/ np.sum(np.abs(x_postmean))
/ (iteration - params['burnin']))
prev_x_postmean = x_postmean
# stop of the algorithm
if (
(iteration > params['min_num_iter'])
and (delta < params['threshold'])
):
break
# Empirical average \approx POSTMEAN Eq. 44
x_postmean = x_postmean / (iteration - params['burnin'])
if is_real:
x_postmean = uft.uirfft2(x_postmean, shape=image.shape)
else:
x_postmean = uft.uifft2(x_postmean)
if clip:
x_postmean[x_postmean > 1] = 1
x_postmean[x_postmean < -1] = -1
return (x_postmean, {'noise': gn_chain, 'prior': gx_chain})
|
55,630 |
def _solarize(input: torch.Tensor, thresholds: Union[float, torch.Tensor] = 0.5) -> torch.Tensor:
r""" For each pixel in the image, select the pixel if the value is less than the threshold.
Otherwise, subtract 1.0 from the pixel.
Args:
input (torch.Tensor): image or batched images to solarize.
thresholds (float or torch.Tensor): solarize thresholds.
If int or one element tensor, input will be solarized across the whole batch.
If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input).
Returns:
torch.Tensor: Solarized images.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(thresholds, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(thresholds)}")
if isinstance(thresholds, torch.Tensor) and len(thresholds.shape) != 0:
assert input.size(0) == len(thresholds) and len(thresholds.shape) == 1, \
f"threshholds must be a 1-d vector of shape ({input.size(0)},). Got {thresholds}"
# TODO: I am not happy about this line, but no easy to do batch-wise operation
thresholds = torch.stack([x.expand(*input.shape[1:]) for x in thresholds])
thresholds = thresholds.to(input.device).to(input.dtype)
return torch.where(input < thresholds, input, 1.0 - input)
|
def _solarize(input: torch.Tensor, thresholds: Union[float, torch.Tensor] = 0.5) -> torch.Tensor:
r""" For each pixel in the image, select the pixel if the value is less than the threshold.
Otherwise, subtract 1.0 from the pixel.
Args:
input (torch.Tensor): image or batched images to solarize.
thresholds (float or torch.Tensor): solarize thresholds.
If int or one element tensor, input will be solarized across the whole batch.
If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input).
Returns:
torch.Tensor: Solarized images.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(thresholds, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(thresholds)}")
if isinstance(thresholds, torch.Tensor) and len(thresholds.shape) != 0:
assert input.size(0) == len(thresholds) and len(thresholds.shape) == 1, \
f"threshholds must be a 1-d vector of shape ({input.size(0)},). Got {thresholds}"
# TODO: I am not happy about this line, but no easy to do batch-wise operation
thresholds = thresholds.to(input.device).to(input.dtype)
thresholds = torch.stack([x.expand(*input.shape[1:]) for x in thresholds])
return torch.where(input < thresholds, input, 1.0 - input)
|
6,972 |
def validate_google_sheets_url(url):
from urllib.parse import urlparse
u = urlparse(url)
if u.scheme != "https" and u.netloc != "docs.google.com" and "/spreadsheets/" not in u.path:
frappe.throw(
_('"{0}" is not a valid Google Sheets URL').format(url),
title=_("Invalid URL"),
)
|
def validate_google_sheets_url(url):
from urllib.parse import urlparse
u = urlparse(url)
if u.scheme != "https" or u.netloc != "docs.google.com" or "/spreadsheets/" not in u.path:
frappe.throw(
_('"{0}" is not a valid Google Sheets URL').format(url),
title=_("Invalid URL"),
)
|
33,780 |
def start(detached: bool = False,
http_host: Optional[str] = DEFAULT_HTTP_HOST,
http_port: int = DEFAULT_HTTP_PORT,
http_middlewares: List[Any] = [],
http_options: Optional[Union[dict, HTTPOptions]] = None,
dedicated_cpu: bool = False) -> Client:
"""Initialize a serve instance.
By default, the instance will be scoped to the lifetime of the returned
Client object (or when the script exits). If detached is set to True, the
instance will instead persist until client.shutdown() is called and clients
to it can be connected using serve.connect(). This is only relevant if
connecting to a long-running Ray cluster (e.g., with address="auto").
Args:
detached (bool): Whether not the instance should be detached from this
script.
http_host (Optional[str]): Deprecated, use http_options instead.
http_port (int): Deprecated, use http_options instead.
http_middlewares (list): Deprecated, use http_options instead.
http_options (Optional[Dict, serve.HTTPOptions]): Configuration options
for HTTP proxy. You can pass in a dictionary or HTTPOptions object
with fields:
- host(str, None): Host for HTTP servers to listen on. Defaults to
"127.0.0.1". To expose Serve publicly, you probably want to set
this to "0.0.0.0".
- port(int): Port for HTTP server. Defaults to 8000.
- middlewares(list): A list of Starlette middlewares that will be
applied to the HTTP servers in the cluster. Defaults to [].
- location(str, serve.config.DeploymentMode): The deployment
location of HTTP servers:
- "HeadOnly": start one HTTP server on the head node. Serve
assumes the head node is the node you executed serve.start
on. This is the default.
- "EveryNode": start one HTTP server per node.
- "NoServer" or None: disable HTTP server.
- dedicated_cpu(bool): Whether to set `num_cpus=1` for each
internal HTTP proxy actor. Defaults to False (`num_cpus=0`).
dedicated_cpu (bool): Whether to set `num_cpus=1` for the internal
Serve controller actor. Defaults to False (`num_cpus=0`).
"""
if ((http_host != DEFAULT_HTTP_HOST) or (http_port != DEFAULT_HTTP_PORT)
or (len(http_middlewares) != 0)):
if http_options is not None:
raise ValueError(
"You cannot specify both `http_options` and any of the "
"`http_host`, `http_port`, and `http_middlewares` arguments. "
"`http_options` is preferred.")
else:
warn(
"`http_host`, `http_port`, `http_middlewares` are deprecated. "
"Please use serve.start(http_options={'host': ..., "
"'port': ..., middlewares': ...}) instead.",
DeprecationWarning,
)
# Initialize ray if needed.
if not ray.is_initialized():
ray.init()
register_custom_serializers()
# Try to get serve controller if it exists
if detached:
controller_name = SERVE_CONTROLLER_NAME
try:
ray.get_actor(controller_name)
raise RayServeException("Called serve.start(detached=True) but a "
"detached instance is already running. "
"Please use serve.connect() to connect to "
"the running instance instead.")
except ValueError:
pass
else:
controller_name = format_actor_name(SERVE_CONTROLLER_NAME,
get_random_letters())
if isinstance(http_options, dict):
http_options = HTTPOptions.parse_obj(http_options)
if http_options is None:
http_options = HTTPOptions(
host=http_host, port=http_port, middlewares=http_middlewares)
controller = ServeController.options(
num_cpus=(1 if dedicated_cpu else 0),
name=controller_name,
lifetime="detached" if detached else None,
max_restarts=-1,
max_task_retries=-1,
# Pin Serve controller on the head node.
resources={
get_current_node_resource_key(): 0.01
},
).remote(
controller_name,
http_options,
detached=detached,
)
proxy_handles = ray.get(controller.get_http_proxies.remote())
if len(proxy_handles) > 0:
try:
ray.get(
[handle.ready.remote() for handle in proxy_handles.values()],
timeout=HTTP_PROXY_TIMEOUT,
)
except ray.exceptions.GetTimeoutError:
raise TimeoutError(
"HTTP proxies not available after {HTTP_PROXY_TIMEOUT}s.")
client = Client(controller, controller_name, detached=detached)
_set_global_client(client)
return client
|
def start(detached: bool = False,
http_host: Optional[str] = DEFAULT_HTTP_HOST,
http_port: int = DEFAULT_HTTP_PORT,
http_middlewares: List[Any] = [],
http_options: Optional[Union[dict, HTTPOptions]] = None,
dedicated_cpu: bool = False,) -> Client:
"""Initialize a serve instance.
By default, the instance will be scoped to the lifetime of the returned
Client object (or when the script exits). If detached is set to True, the
instance will instead persist until client.shutdown() is called and clients
to it can be connected using serve.connect(). This is only relevant if
connecting to a long-running Ray cluster (e.g., with address="auto").
Args:
detached (bool): Whether not the instance should be detached from this
script.
http_host (Optional[str]): Deprecated, use http_options instead.
http_port (int): Deprecated, use http_options instead.
http_middlewares (list): Deprecated, use http_options instead.
http_options (Optional[Dict, serve.HTTPOptions]): Configuration options
for HTTP proxy. You can pass in a dictionary or HTTPOptions object
with fields:
- host(str, None): Host for HTTP servers to listen on. Defaults to
"127.0.0.1". To expose Serve publicly, you probably want to set
this to "0.0.0.0".
- port(int): Port for HTTP server. Defaults to 8000.
- middlewares(list): A list of Starlette middlewares that will be
applied to the HTTP servers in the cluster. Defaults to [].
- location(str, serve.config.DeploymentMode): The deployment
location of HTTP servers:
- "HeadOnly": start one HTTP server on the head node. Serve
assumes the head node is the node you executed serve.start
on. This is the default.
- "EveryNode": start one HTTP server per node.
- "NoServer" or None: disable HTTP server.
- dedicated_cpu(bool): Whether to set `num_cpus=1` for each
internal HTTP proxy actor. Defaults to False (`num_cpus=0`).
dedicated_cpu (bool): Whether to set `num_cpus=1` for the internal
Serve controller actor. Defaults to False (`num_cpus=0`).
"""
if ((http_host != DEFAULT_HTTP_HOST) or (http_port != DEFAULT_HTTP_PORT)
or (len(http_middlewares) != 0)):
if http_options is not None:
raise ValueError(
"You cannot specify both `http_options` and any of the "
"`http_host`, `http_port`, and `http_middlewares` arguments. "
"`http_options` is preferred.")
else:
warn(
"`http_host`, `http_port`, `http_middlewares` are deprecated. "
"Please use serve.start(http_options={'host': ..., "
"'port': ..., middlewares': ...}) instead.",
DeprecationWarning,
)
# Initialize ray if needed.
if not ray.is_initialized():
ray.init()
register_custom_serializers()
# Try to get serve controller if it exists
if detached:
controller_name = SERVE_CONTROLLER_NAME
try:
ray.get_actor(controller_name)
raise RayServeException("Called serve.start(detached=True) but a "
"detached instance is already running. "
"Please use serve.connect() to connect to "
"the running instance instead.")
except ValueError:
pass
else:
controller_name = format_actor_name(SERVE_CONTROLLER_NAME,
get_random_letters())
if isinstance(http_options, dict):
http_options = HTTPOptions.parse_obj(http_options)
if http_options is None:
http_options = HTTPOptions(
host=http_host, port=http_port, middlewares=http_middlewares)
controller = ServeController.options(
num_cpus=(1 if dedicated_cpu else 0),
name=controller_name,
lifetime="detached" if detached else None,
max_restarts=-1,
max_task_retries=-1,
# Pin Serve controller on the head node.
resources={
get_current_node_resource_key(): 0.01
},
).remote(
controller_name,
http_options,
detached=detached,
)
proxy_handles = ray.get(controller.get_http_proxies.remote())
if len(proxy_handles) > 0:
try:
ray.get(
[handle.ready.remote() for handle in proxy_handles.values()],
timeout=HTTP_PROXY_TIMEOUT,
)
except ray.exceptions.GetTimeoutError:
raise TimeoutError(
"HTTP proxies not available after {HTTP_PROXY_TIMEOUT}s.")
client = Client(controller, controller_name, detached=detached)
_set_global_client(client)
return client
|
54,986 |
def hf_state(n_electrons, n_spin_orbitals):
r"""Generates the occupation-number vector representing the Hartree-Fock state.
The many-particle wave function in the Hartree-Fock (HF) approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
for :math:`N` electrons is represented by the occupation-number vector:
.. math:
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_\mathrm{orbs} \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right,
where :math:`n_i` indicates the occupation of the :math:`ith`-orbital.
Args:
n_electrons (int): Number of electrons. If an active space is defined, 'n_electrons'
is the number of active electrons.
n_spin_orbitals (int): Number of spin-orbitals. If an active space is defined,
'n_spin_orbitals' is the number of active spin-orbitals.
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
**Example**
>>> state = hf_state(2, 6)
>>> print(state)
[1 1 0 0 0 0]
"""
if n_electrons <= 0:
raise ValueError(
"The number of active electrons has to be larger than zero; got 'n_electrons' = {}".format(
n_electrons
)
)
if n_electrons > n_spin_orbitals:
raise ValueError(
"The number of active orbitals cannot be smaller than the number of active electrons;"
" got 'n_spin_orbitals'={} < 'n_electrons'={}".format(n_spin_orbitals, n_electrons)
)
state = np.where(np.arange(n_spin_orbitals) < n_electrons, 1, 0)
return np.array(state)
|
def hf_state(n_electrons, n_spin_orbitals):
r"""Generates the occupation-number vector representing the Hartree-Fock state.
The many-particle wave function in the Hartree-Fock (HF) approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
for :math:`N` electrons is represented by the occupation-number vector:
.. math:
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_\mathrm{orbs} \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right,
where :math:`n_i` indicates the occupation of the :math:`ith`-orbital.
Args:
n_electrons (int): Number of electrons. If an active space is defined, this
is the number of active electrons.
n_spin_orbitals (int): Number of spin-orbitals. If an active space is defined,
this is the number of active spin-orbitals.
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
**Example**
>>> state = hf_state(2, 6)
>>> print(state)
[1 1 0 0 0 0]
"""
if n_electrons <= 0:
raise ValueError(
"The number of active electrons has to be larger than zero; got 'n_electrons' = {}".format(
n_electrons
)
)
if n_electrons > n_spin_orbitals:
raise ValueError(
"The number of active orbitals cannot be smaller than the number of active electrons;"
" got 'n_spin_orbitals'={} < 'n_electrons'={}".format(n_spin_orbitals, n_electrons)
)
state = np.where(np.arange(n_spin_orbitals) < n_electrons, 1, 0)
return np.array(state)
|
3,317 |
def get_performance_facets(
query,
params,
orderby=None,
aggregate_column="duration",
aggregate_function="avg",
limit=20,
referrer=None,
):
"""
High-level API for getting 'facet map' results for performance data
Performance facets are high frequency tags and the aggregate duration of
their most frequent values
query (str) Filter query string to create conditions from.
params (Dict[str, str]) Filtering parameters with start, end, project_id, environment
limit (int) The number of records to fetch.
referrer (str|None) A referrer string to help locate the origin of this query.
Returns Sequence[FacetResult]
"""
with sentry_sdk.start_span(
op="discover.discover", description="facets.filter_transform"
) as span:
span.set_data("query", query)
snuba_filter = get_filter(query, params)
# Resolve the public aliases into the discover dataset names.
snuba_filter, translated_columns = resolve_discover_aliases(snuba_filter)
# Exclude tracing tags as they are noisy and generally not helpful.
# TODO(markus): Tracing tags are no longer written but may still reside in DB.
excluded_tags = ["tags_key", "NOT IN", ["trace", "trace.ctx", "trace.span", "project"]]
# Sampling keys for multi-project results as we don't need accuracy
# with that much data.
sample = len(snuba_filter.filter_keys["project_id"]) > 2
with sentry_sdk.start_span(op="discover.discover", description="facets.frequent_tags"):
# Get the tag keys with the highest deviation
key_names = raw_query(
aggregations=[["count", None, "count"]],
start=snuba_filter.start,
end=snuba_filter.end,
conditions=snuba_filter.conditions,
filter_keys=snuba_filter.filter_keys,
orderby=["-count", "tags_key"],
groupby="tags_key",
# TODO(Kevan): Check using having vs where before mainlining
having=[excluded_tags],
dataset=Dataset.Discover,
limit=limit,
referrer=referrer,
turbo=sample,
)
top_tags = [r["tags_key"] for r in key_names["data"]]
if not top_tags:
return []
results = []
snuba_filter.conditions.append([aggregate_column, "IS NOT NULL", None])
# Only enable sampling if over 10000 values
sampling_enabled = True if (key_names["data"][0]["count"] > 10000) else False
options_sample_rate = options.get("discover2.tags_performance_facet_sample_rate") or 0.1
sample_rate = options_sample_rate if sampling_enabled else None
max_aggregate_tags = 20
aggregate_tags = []
for i, tag in enumerate(top_tags):
if i >= len(top_tags) - max_aggregate_tags:
aggregate_tags.append(tag)
if orderby is None:
orderby = []
if aggregate_tags:
with sentry_sdk.start_span(op="discover.discover", description="facets.aggregate_tags"):
conditions = snuba_filter.conditions
conditions.append(["tags_key", "IN", aggregate_tags])
tag_values = raw_query(
aggregations=[
[aggregate_function, aggregate_column, "aggregate"],
["count", None, "count"],
],
conditions=conditions,
start=snuba_filter.start,
end=snuba_filter.end,
filter_keys=snuba_filter.filter_keys,
orderby=orderby + ["tags_key"],
groupby=["tags_key", "tags_value"],
dataset=Dataset.Discover,
referrer=referrer,
sample=sample_rate,
turbo=sample_rate is not None,
limitby=[TOP_VALUES_DEFAULT_LIMIT, "tags_key"],
)
results.extend(
[
PerformanceFacetResult(
r["tags_key"], r["tags_value"], int(r["aggregate"]), int(r["count"])
)
for r in tag_values["data"]
]
)
return results
|
def get_performance_facets(
query,
params,
orderby=None,
aggregate_column="duration",
aggregate_function="avg",
limit=20,
referrer=None,
):
"""
High-level API for getting 'facet map' results for performance data
Performance facets are high frequency tags and the aggregate duration of
their most frequent values
query (str) Filter query string to create conditions from.
params (Dict[str, str]) Filtering parameters with start, end, project_id, environment
limit (int) The number of records to fetch.
referrer (str|None) A referrer string to help locate the origin of this query.
Returns Sequence[FacetResult]
"""
with sentry_sdk.start_span(
op="discover.discover", description="facets.filter_transform"
) as span:
span.set_data("query", query)
snuba_filter = get_filter(query, params)
# Resolve the public aliases into the discover dataset names.
snuba_filter, translated_columns = resolve_discover_aliases(snuba_filter)
# Exclude tracing tags as they are noisy and generally not helpful.
# TODO(markus): Tracing tags are no longer written but may still reside in DB.
excluded_tags = ["tags_key", "NOT IN", ["trace", "trace.ctx", "trace.span", "project"]]
# Sampling keys for multi-project results as we don't need accuracy
# with that much data.
sample = len(snuba_filter.filter_keys["project_id"]) > 2
with sentry_sdk.start_span(op="discover.discover", description="facets.frequent_tags"):
# Get the most relevant tag keys
key_names = raw_query(
aggregations=[["count", None, "count"]],
start=snuba_filter.start,
end=snuba_filter.end,
conditions=snuba_filter.conditions,
filter_keys=snuba_filter.filter_keys,
orderby=["-count", "tags_key"],
groupby="tags_key",
# TODO(Kevan): Check using having vs where before mainlining
having=[excluded_tags],
dataset=Dataset.Discover,
limit=limit,
referrer=referrer,
turbo=sample,
)
top_tags = [r["tags_key"] for r in key_names["data"]]
if not top_tags:
return []
results = []
snuba_filter.conditions.append([aggregate_column, "IS NOT NULL", None])
# Only enable sampling if over 10000 values
sampling_enabled = True if (key_names["data"][0]["count"] > 10000) else False
options_sample_rate = options.get("discover2.tags_performance_facet_sample_rate") or 0.1
sample_rate = options_sample_rate if sampling_enabled else None
max_aggregate_tags = 20
aggregate_tags = []
for i, tag in enumerate(top_tags):
if i >= len(top_tags) - max_aggregate_tags:
aggregate_tags.append(tag)
if orderby is None:
orderby = []
if aggregate_tags:
with sentry_sdk.start_span(op="discover.discover", description="facets.aggregate_tags"):
conditions = snuba_filter.conditions
conditions.append(["tags_key", "IN", aggregate_tags])
tag_values = raw_query(
aggregations=[
[aggregate_function, aggregate_column, "aggregate"],
["count", None, "count"],
],
conditions=conditions,
start=snuba_filter.start,
end=snuba_filter.end,
filter_keys=snuba_filter.filter_keys,
orderby=orderby + ["tags_key"],
groupby=["tags_key", "tags_value"],
dataset=Dataset.Discover,
referrer=referrer,
sample=sample_rate,
turbo=sample_rate is not None,
limitby=[TOP_VALUES_DEFAULT_LIMIT, "tags_key"],
)
results.extend(
[
PerformanceFacetResult(
r["tags_key"], r["tags_value"], int(r["aggregate"]), int(r["count"])
)
for r in tag_values["data"]
]
)
return results
|
32,468 |
def arguments_handler():
""" Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description='Linking GitHub PR to Jira Issue.')
parser.add_argument('-l', '--pr_link', help='The PR url.')
parser.add_argument('-n', '--pr_num', help='The PR number.')
parser.add_argument('-t', '--pr_title', help='The PR Title.')
parser.add_argument('-b', '--pr_body', help='the content of the PR description.')
parser.add_argument('-m', '--is_merged', help='boolean. Whether the PR was merged or not.')
parser.add_argument('-u', '--username', help='The instance username.')
parser.add_argument('-s', '--password', help='The instance password.')
parser.add_argument('-url', '--url', help='The instance url.')
return parser.parse_args()
|
def arguments_handler():
""" Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description='Linking GitHub PR to Jira Issue.')
parser.add_argument('-l', '--pr_link', help='The PR url.')
parser.add_argument('-n', '--pr_num', help='The PR number.')
parser.add_argument('-t', '--pr_title', help='The PR Title.')
parser.add_argument('-b', '--pr_body', help='The content of the PR description.')
parser.add_argument('-m', '--is_merged', help='boolean. Whether the PR was merged or not.')
parser.add_argument('-u', '--username', help='The instance username.')
parser.add_argument('-s', '--password', help='The instance password.')
parser.add_argument('-url', '--url', help='The instance url.')
return parser.parse_args()
|
31,538 |
def file_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns file's reputation
"""
files = argToList(args.get('file'))
since = arg_to_number(args.get('since'), arg_name='since')
until = arg_to_number(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for file in files:
if get_hash_type(file) not in ('sha256', 'sha1', 'md5'): # check file's validity
raise ValueError(f'Hash "{file}" is not of type SHA-256, SHA-1 or MD5')
try:
raw_response = client.file(file, since, until, limit)
except Exception as exception:
# If anything happens, just keep going
demisto.debug(f'Could not process file: "{file}"\n {str(exception)}')
continue
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
for data_entry in data:
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=data_entry.get('description')
)
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for file hash: {file}:', data_entry)
file_indicator = Common.File(
dbot_score=dbot_score,
file_type=data_entry.get('sample_type'),
size=data_entry.get('sample_size'),
md5=data_entry.get('md5'),
sha1=data_entry.get('sha1'),
sha256=data_entry.get('sha256'),
ssdeep=data_entry.get('ssdeep'),
tags=data_entry.get('tags')
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.File',
outputs_key_field='id',
outputs=data_entry,
indicator=file_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
else: # no data
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about file: {file} \n'
file_indicator = Common.File(
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.File',
outputs_key_field='id',
outputs=data,
indicator=file_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
|
def file_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns file's reputation
"""
files = argToList(args.get('file'))
since = arg_to_number(args.get('since'), arg_name='since')
until = arg_to_number(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for file in files:
if get_hash_type(file) not in ('sha256', 'sha1', 'md5'): # check file's validity
raise ValueError(f'Hash "{file}" is not of type SHA-256, SHA-1 or MD5')
try:
raw_response = client.file(file, since, until, limit)
except Exception as exception:
# If anything happens, just keep going
demisto.debug(f'Could not process file: "{file}"\n {str(exception)}')
continue
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
for data_entry in data:
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=data_entry.get('description')
)
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for file hash {file}', data_entry)
file_indicator = Common.File(
dbot_score=dbot_score,
file_type=data_entry.get('sample_type'),
size=data_entry.get('sample_size'),
md5=data_entry.get('md5'),
sha1=data_entry.get('sha1'),
sha256=data_entry.get('sha256'),
ssdeep=data_entry.get('ssdeep'),
tags=data_entry.get('tags')
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.File',
outputs_key_field='id',
outputs=data_entry,
indicator=file_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
else: # no data
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about file: {file} \n'
file_indicator = Common.File(
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.File',
outputs_key_field='id',
outputs=data,
indicator=file_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
|
11,802 |
def screen(image1, image2):
"""
Superimposes two inverted images on top of each other. At least one of the
images must be "1" mode.
.. code-block:: python
out = MAX - ((MAX - image1) * (MAX - image2) / MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im))
|
def screen(image1, image2):
"""
Superimposes two inverted images on top of each other. At least one of the
images must have mode "1".
.. code-block:: python
out = MAX - ((MAX - image1) * (MAX - image2) / MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im))
|
20,228 |
def get_secondary_nav_items(request, current_page):
# If the parent page of the current page is a BrowsePage or a
# BrowseFilterablePage, then use that as the top-level page for the
# purposes of the navigation sidebar. Otherwise, treat the current page
# as top-level.
parent = current_page.get_parent().specific
if instanceOfBrowseOrFilterablePages(parent):
page = parent
else:
page = current_page
# If there's no appropriate page version (e.g. not published for a sharing
# request), then return no sidebar at all.
if not page:
return [], False
# Return a boolean about whether or not the current page has Browse
# children
has_children = False
if page.secondary_nav_exclude_sibling_pages:
pages = [page]
else:
pages = filter(
lambda p: instanceOfBrowseOrFilterablePages(p.specific),
page.get_appropriate_siblings()
)
if wagtail.VERSION > (2, 8):
site = Site.find_for_request(request)
else:
site = request.site
nav_items = []
for sibling in pages:
if page.id == sibling.id:
sibling = page
else:
sibling = sibling
item_selected = current_page.pk == sibling.pk
item = {
'title': sibling.title,
'slug': sibling.slug,
'url': sibling.relative_url(site),
'children': [],
'active': item_selected,
'expanded': item_selected,
}
if page.id == sibling.id:
visible_children = list(filter(
lambda c: (
instanceOfBrowseOrFilterablePages(c) and
(c.live)
),
sibling.get_children().specific()
))
if len(visible_children):
has_children = True
for child in visible_children:
child_selected = current_page.pk == child.pk
if child_selected:
item['expanded'] = True
item['children'].append({
'title': child.title,
'slug': child.slug,
'url': child.relative_url(site),
'active': child_selected,
})
nav_items.append(item)
# Add `/process/` segment to BAH journey page nav urls.
# TODO: Remove this when redirects for `/process/` urls
# are added after 2018 homebuying campaign.
journey_urls = (
'/owning-a-home/prepare',
'/owning-a-home/explore',
'/owning-a-home/compare',
'/owning-a-home/close',
'/owning-a-home/sources',
)
if current_page.relative_url(site).startswith(journey_urls):
for item in nav_items:
item['url'] = item['url'].replace(
'owning-a-home', 'owning-a-home/process')
for child in item['children']:
child['url'] = child['url'].replace(
'owning-a-home', 'owning-a-home/process')
# END TODO
return nav_items, has_children
|
def get_secondary_nav_items(request, current_page):
# If the parent page of the current page is a BrowsePage or a
# BrowseFilterablePage, then use that as the top-level page for the
# purposes of the navigation sidebar. Otherwise, treat the current page
# as top-level.
parent = current_page.get_parent().specific
if instanceOfBrowseOrFilterablePages(parent):
page = parent
else:
page = current_page
# If there's no appropriate page version (e.g. not published for a sharing
# request), then return no sidebar at all.
if not page:
return [], False
# Return a boolean about whether or not the current page has Browse
# children
has_children = False
if page.secondary_nav_exclude_sibling_pages:
pages = [page]
else:
pages = filter(
lambda p: instanceOfBrowseOrFilterablePages(p.specific),
page.get_appropriate_siblings()
)
if wagtail.VERSION >= (2, 9):
site = Site.find_for_request(request)
else:
site = request.site
nav_items = []
for sibling in pages:
if page.id == sibling.id:
sibling = page
else:
sibling = sibling
item_selected = current_page.pk == sibling.pk
item = {
'title': sibling.title,
'slug': sibling.slug,
'url': sibling.relative_url(site),
'children': [],
'active': item_selected,
'expanded': item_selected,
}
if page.id == sibling.id:
visible_children = list(filter(
lambda c: (
instanceOfBrowseOrFilterablePages(c) and
(c.live)
),
sibling.get_children().specific()
))
if len(visible_children):
has_children = True
for child in visible_children:
child_selected = current_page.pk == child.pk
if child_selected:
item['expanded'] = True
item['children'].append({
'title': child.title,
'slug': child.slug,
'url': child.relative_url(site),
'active': child_selected,
})
nav_items.append(item)
# Add `/process/` segment to BAH journey page nav urls.
# TODO: Remove this when redirects for `/process/` urls
# are added after 2018 homebuying campaign.
journey_urls = (
'/owning-a-home/prepare',
'/owning-a-home/explore',
'/owning-a-home/compare',
'/owning-a-home/close',
'/owning-a-home/sources',
)
if current_page.relative_url(site).startswith(journey_urls):
for item in nav_items:
item['url'] = item['url'].replace(
'owning-a-home', 'owning-a-home/process')
for child in item['children']:
child['url'] = child['url'].replace(
'owning-a-home', 'owning-a-home/process')
# END TODO
return nav_items, has_children
|
32,596 |
def main():
"""Parse params and runs command functions."""
params = demisto.params()
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
headers = {
"Authorization": params.get('api_key')
}
client = CyCognitoFeedClient(
base_url=BASE_URL,
headers=headers,
verify=verify_certificate,
proxy=proxy
)
if command == 'test-module':
return_results(test_module(client, params))
elif command == 'cycognito-get-indicators':
args = demisto.args()
remove_nulls_from_dictionary(trim_spaces_from_args(args))
return_results(get_indicators_command(client, args))
elif command == 'fetch-indicators':
last_run = demisto.getLastRun()
next_run, indicators = fetch_indicators_command(client, params, last_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
demisto.info(f"[+] FeedCyCognito: Last run object to be used for next set of indicators is: {next_run}")
demisto.setLastRun(next_run)
else:
raise NotImplementedError(f'Command {command} is not implemented')
except Exception as err:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {command} command.\nError:\n{str(err)}')
|
def main():
"""Parse params and runs command functions."""
params = demisto.params()
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
headers = {
"Authorization": params.get('api_key')
}
client = CyCognitoFeedClient(
base_url=BASE_URL,
headers=headers,
verify=verify_certificate,
proxy=proxy
)
if command == 'test-module':
return_results(test_module(client, params))
elif command == 'cycognito-get-indicators':
args = demisto.args()
remove_nulls_from_dictionary(trim_spaces_from_args(args))
return_results(get_indicators_command(client, args))
elif command == 'fetch-indicators':
last_run = demisto.getLastRun()
next_run, indicators = fetch_indicators_command(client, params, last_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
demisto.info(f"[+] FeedCyCognito: Last run object to be used for next set of indicators is: {next_run}")
demisto.setLastRun(next_run)
else:
raise NotImplementedError(f'Command {command} is not implemented')
except Exception as err:
return_error(f'Failed to execute {command} command.\nError:\n{err}', error=err)
|
27,267 |
def test_date_time_literals():
ibis.date(2022, 2, 4)
ibis.time(16, 20, 00)
ibis.timestamp(2022, 2, 4, 16, 20, 00)
|
def test_date_time_literals():
assert ibis.date(2022, 2, 4).type() == dt.date
ibis.time(16, 20, 00)
ibis.timestamp(2022, 2, 4, 16, 20, 00)
|
31,578 |
def create_standard_ip_context(ip_data):
command_results = CommandResults(
outputs_prefix=f"IP",
outputs_key_field="Address",
outputs=ip_data,
readable_output=tableToMarkdown(f"IP Address(es):", ip_data)
)
return_results(command_results)
|
def create_standard_ip_context(ip_data):
command_results = CommandResults(
outputs_prefix="IP",
outputs_key_field="Address",
outputs=ip_data,
readable_output=tableToMarkdown(f"IP Address(es):", ip_data)
)
return_results(command_results)
|
9,458 |
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(
net_id=dict(type='str'),
net_name=dict(type='str'),
name=dict(type='str'),
subnet=dict(type='str'),
gateway_ip=dict(type='str'),
state=dict(type='str', choices=['absent', 'query', 'present'], default='present'),
fixed_ip_assignments=dict(type='list'),
reserved_ip_ranges=dict(type='list'),
route_id=dict(type='str'),
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False,
)
meraki = MerakiModule(module, function='static_route')
module.params['follow_redirects'] = 'all'
payload = None
query_urls = {'static_route': '/networks/{net_id}/staticRoutes'}
query_one_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
create_urls = {'static_route': '/networks/{net_id}/staticRoutes/'}
update_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
delete_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
meraki.url_catalog['get_all'].update(query_urls)
meraki.url_catalog['get_one'].update(query_one_urls)
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['delete'] = delete_urls
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id parameters are required')
if not meraki.params['net_name'] and not meraki.params['net_id']:
meraki.fail_json(msg='net_name or net_id parameters are required')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return meraki.result
# Construct payload
if meraki.params['state'] == 'present':
payload = dict()
if meraki.params['net_name']:
payload['name'] = meraki.params['net_name']
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not org_id:
org_id = meraki.get_org_id(meraki.params['org_name'])
nets = meraki.get_nets(org_id=org_id)
net_id = meraki.params['net_id']
if net_id is None:
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
if meraki.params['route_id'] is not None:
meraki.result['data'] = get_static_route(meraki, net_id, meraki.params['route_id'])
else:
meraki.result['data'] = get_static_routes(meraki, net_id)
elif meraki.params['state'] == 'present':
payload = dict()
payload['name'] = meraki.params['name']
payload['subnet'] = meraki.params['subnet']
payload['gatewayIp'] = meraki.params['gateway_ip']
if meraki.params['fixed_ip_assignments'] is not None:
payload['fixedIpAssignments'] = meraki.params['fixed_ip_assignments']
if meraki.params['reserved_ip_ranges'] is not None:
payload['reserved_ip_ranges'] = meraki.params['reserved_ip_ranges']
if meraki.params['route_id']:
existing_route = get_static_route(meraki, net_id, meraki.params['route_id'])
if meraki.is_update_required(existing_route, payload, optional_ignore=['id']):
path = meraki.construct_path('update', net_id=net_id, custom={'route_id': meraki.params['route_id']})
meraki.result['data'] = meraki.request(path, method="PUT", payload=json.dumps(payload))
meraki.result['changed'] = True
else:
path = meraki.construct_path('create', net_id=net_id)
meraki.result['data'] = meraki.request(path, method="POST", payload=json.dumps(payload))
meraki.result['changed'] = True
elif meraki.params['state'] == 'absent':
path = meraki.construct_path('delete', net_id=net_id, custom={'route_id': meraki.params['route_id']})
meraki.result['data'] = meraki.request(path, method='DELETE')
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
|
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(
net_id=dict(type='str'),
net_name=dict(type='str'),
name=dict(type='str'),
subnet=dict(type='str'),
gateway_ip=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
fixed_ip_assignments=dict(type='list'),
reserved_ip_ranges=dict(type='list'),
route_id=dict(type='str'),
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False,
)
meraki = MerakiModule(module, function='static_route')
module.params['follow_redirects'] = 'all'
payload = None
query_urls = {'static_route': '/networks/{net_id}/staticRoutes'}
query_one_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
create_urls = {'static_route': '/networks/{net_id}/staticRoutes/'}
update_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
delete_urls = {'static_route': '/networks/{net_id}/staticRoutes/{route_id}'}
meraki.url_catalog['get_all'].update(query_urls)
meraki.url_catalog['get_one'].update(query_one_urls)
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['delete'] = delete_urls
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id parameters are required')
if not meraki.params['net_name'] and not meraki.params['net_id']:
meraki.fail_json(msg='net_name or net_id parameters are required')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return meraki.result
# Construct payload
if meraki.params['state'] == 'present':
payload = dict()
if meraki.params['net_name']:
payload['name'] = meraki.params['net_name']
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not org_id:
org_id = meraki.get_org_id(meraki.params['org_name'])
nets = meraki.get_nets(org_id=org_id)
net_id = meraki.params['net_id']
if net_id is None:
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
if meraki.params['route_id'] is not None:
meraki.result['data'] = get_static_route(meraki, net_id, meraki.params['route_id'])
else:
meraki.result['data'] = get_static_routes(meraki, net_id)
elif meraki.params['state'] == 'present':
payload = dict()
payload['name'] = meraki.params['name']
payload['subnet'] = meraki.params['subnet']
payload['gatewayIp'] = meraki.params['gateway_ip']
if meraki.params['fixed_ip_assignments'] is not None:
payload['fixedIpAssignments'] = meraki.params['fixed_ip_assignments']
if meraki.params['reserved_ip_ranges'] is not None:
payload['reserved_ip_ranges'] = meraki.params['reserved_ip_ranges']
if meraki.params['route_id']:
existing_route = get_static_route(meraki, net_id, meraki.params['route_id'])
if meraki.is_update_required(existing_route, payload, optional_ignore=['id']):
path = meraki.construct_path('update', net_id=net_id, custom={'route_id': meraki.params['route_id']})
meraki.result['data'] = meraki.request(path, method="PUT", payload=json.dumps(payload))
meraki.result['changed'] = True
else:
path = meraki.construct_path('create', net_id=net_id)
meraki.result['data'] = meraki.request(path, method="POST", payload=json.dumps(payload))
meraki.result['changed'] = True
elif meraki.params['state'] == 'absent':
path = meraki.construct_path('delete', net_id=net_id, custom={'route_id': meraki.params['route_id']})
meraki.result['data'] = meraki.request(path, method='DELETE')
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
|
29,823 |
def parse_args(argv) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Dumps information about locally running services."
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
return parser.parse_args(argv)
|
def parse_args(argv: Optional[Sequence[str]]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Dumps information about locally running services."
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
return parser.parse_args(argv)
|
51,956 |
def env_status_setup_parser(subparser):
"""print whether there is an active environment"""
subparser.add_argument(
'-x', '--exit_code', action='store_true', default=False,
help='Exit with nonzero status if no environment is active')
|
def env_status_setup_parser(subparser):
"""print whether there is an active environment"""
subparser.add_argument(
'-e', '--error', action='store_true', default=False,
help='Exit with non-zero status if no environment is active')
|
4,281 |
def vertex_depths(inst, info=None, picks=None, trans=None, mode='dist',
verbose=None):
"""Compute source depths as distances between vertices and nearest sensor.
Parameters
----------
inst : instance of Forward | instance of SourceSpaces
The object to select vertices from.
info : instance of Info | None
The info structure that contains information about the channels with
respect to which to compute distances.
picks : array-like of int | None
Indices of sensors to include in distance calculations. If `None``
(default) then only MEG channels are used.
trans : str | instance of Transform | None
Either the full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration, or the Transformation itself. If trans
is None, an identity matrix is assumed. Only needed when ``inst`` is a
source space in MRI coordinates.
mode : str
How to compute source depth. 'dist' computes Euclidean distance
between vertices and nearest sensors.
verbose : bool | str | int | None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
depth : array of shape (,n_vertices)
The depths of source space vertices with respect to sensors.
"""
from .forward import Forward
if isinstance(inst, Forward):
info = inst['info']
src = inst['src']
elif isinstance(inst, SourceSpaces):
src = inst
if info is None:
raise ValueError('You need to specify an Info object with '
'information about the channels.')
src = inst
# Load the head<->MRI transform if necessary
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
if trans is None:
raise ValueError('Source space is in MRI coordinates, but no '
'head<->MRI transform was given. Please specify'
'the full path to the appropriate *-trans.fif '
'file as the "trans" parameter.')
if isinstance(trans, string_types):
trans = read_trans(trans, return_all=True)
last_exp = None
for trans in trans: # we got at least 1
try:
trans = _ensure_trans(trans, 'head', 'mri')
except Exception as exp:
last_exp = exp
else:
break
else:
raise last_exp
src_trans = invert_transform(_ensure_trans(trans, 'head', 'mri'))
print('Transform!')
else:
src_trans = Transform('head', 'head') # Identity transform
dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head')
# Select channels to be used for distance calculations
if picks is None:
picks = pick_types(info, meg=True)
if len(picks) > 0:
logger.info('Using MEG channels')
else:
logger.info('Using EEG channels')
picks = pick_types(info, eeg=True)
# get vertex position in same coordinates as for sensors below
src_pos = np.vstack([
apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
for s in src
])
# get sensor positions
sensor_pos = []
for ch in picks:
# MEG channels are in device coordinates, translate them to head
if channel_type(info, ch) in ['mag', 'grad']:
sensor_pos.append(apply_trans(dev_to_head,
info['chs'][ch]['loc'][:3]))
else:
sensor_pos.append(info['chs'][ch]['loc'][:3])
sensor_pos = np.array(sensor_pos)
# minimum distances per vertex
depths = distance.cdist(sensor_pos, src_pos).min(axis=0)
return depths
|
def vertex_depths(inst, info=None, picks=None, trans=None, mode='dist',
verbose=None):
"""Compute source depths as distances between vertices and nearest sensor.
Parameters
----------
inst : instance of Forward | instance of SourceSpaces
The object to select vertices from.
info : instance of Info | None
The info structure that contains information about the channels with
respect to which to compute distances.
picks : array-like of int | None
Indices of sensors to include in distance calculations. If `None``
(default) then only MEG channels are used.
trans : str | instance of Transform | None
Either the full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration, or the Transformation itself. If trans
is None, an identity matrix is assumed. Only needed when ``inst`` is a
source space in MRI coordinates.
mode : str
How to compute source depth. 'dist' computes Euclidean distance
between vertices and nearest sensors.
verbose : bool | str | int | None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
depth : array of shape (,n_vertices)
The depths of source space vertices with respect to sensors.
"""
from .forward import Forward
if isinstance(inst, Forward):
info = inst['info']
src = inst['src']
elif isinstance(inst, SourceSpaces):
src = inst
if info is None:
raise ValueError('You need to specify an Info object with '
'information about the channels.')
src = inst
# Load the head<->MRI transform if necessary
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
src_trans = _get_trans(trans, allow_none=False)
else:
src_trans = Transform('head', 'head') # Identity transform
dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head')
# Select channels to be used for distance calculations
if picks is None:
picks = pick_types(info, meg=True)
if len(picks) > 0:
logger.info('Using MEG channels')
else:
logger.info('Using EEG channels')
picks = pick_types(info, eeg=True)
# get vertex position in same coordinates as for sensors below
src_pos = np.vstack([
apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
for s in src
])
# get sensor positions
sensor_pos = []
for ch in picks:
# MEG channels are in device coordinates, translate them to head
if channel_type(info, ch) in ['mag', 'grad']:
sensor_pos.append(apply_trans(dev_to_head,
info['chs'][ch]['loc'][:3]))
else:
sensor_pos.append(info['chs'][ch]['loc'][:3])
sensor_pos = np.array(sensor_pos)
# minimum distances per vertex
depths = distance.cdist(sensor_pos, src_pos).min(axis=0)
return depths
|
42,218 |
def setup(bot: Bot) -> None:
"""Load `Source` cog."""
bot.add_cog(BotSource(bot))
|
def setup(bot: Bot) -> None:
"""Load the BotSource cog."""
bot.add_cog(BotSource(bot))
|
4,202 |
def _channel_frequencies(raw):
"""Return the light frequency for each channel."""
picks = _picks_to_idx(raw.info, 'fnirs_od')
freqs = np.empty_like(picks)
for ii in picks:
ch_name_info = re.match(r'S(\d+)-D(\d+) (\d+)',
raw.info['chs'][ii]['ch_name'])
freqs[ii] = ch_name_info.groups()[2]
return freqs
|
def _channel_frequencies(raw):
"""Return the light frequency for each channel."""
picks = _picks_to_idx(raw.info, 'fnirs_od')
freqs = np.empty(picks.size, int)
for ii in picks:
ch_name_info = re.match(r'S(\d+)-D(\d+) (\d+)',
raw.info['chs'][ii]['ch_name'])
freqs[ii] = ch_name_info.groups()[2]
return freqs
|
17,727 |
def RB_to_CHARMM(c0, c1, c2, c3, c4, c5):
"""Converts Ryckaert-Bellemans (RB) type dihedrals to CHARMM type
or
RB_torsions = c0 + c1*Cos[Psi] + c2*Cos[Psi]^2 + c3*CosPsi]^3 + c4*Cos[Psi]^4 + c5*Cos[5*Psi]^5
where Psi= t-Pi = t - 180 degress
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
converts to:
CHARMM_torsions =
= K0 * (1 + Cos[n0*(t) - (d0)] ) + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
= K0 + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
Returns
-------
0, K1, K2, K3, K4, K5, n0, n1, n2, n3, n4, n5, d0, d1, d2, d3, d4, and d5 : Charmm coefficients (in kcal/mol)
CHARMM_ dihedral coeffs : np.matrix, shape=(6,3)
Array containing the CHARMM dihedral coeffs [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3],
[K4, n4, d4], [K5, n5, d5]] (in kcal/mol)
"""
# see below or the long version is, K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5
K0 = (c0 -c1 - c3 - c4/4 - c5)
K1 = (+c1 + 3/4 * c3 + 5/8 * c5)
K2 = (+(1/2) * c2 + 1/2 * c4)
K3 = (+(1/4) * c3 + 5/16 * c5)
K4 = (+(1/8) * c4)
K5 = (+(1/16) * c5)
n0 = 0
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
d0 = 90
d1 = 180
d2 = 0
d3 = 180
d4 = 0
d5 = 180
return np.matrix([[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]])
|
def RB_to_CHARMM(c0, c1, c2, c3, c4, c5):
"""Converts Ryckaert-Bellemans (RB) type dihedrals to CHARMM type
or
RB_torsions = c0 + c1*Cos[Psi] + c2*Cos[Psi]^2 + c3*CosPsi]^3 + c4*Cos[Psi]^4 + c5*Cos[5*Psi]^5
where Psi= t-Pi = t - 180 degrees
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
converts to:
CHARMM_torsions =
= K0 * (1 + Cos[n0*(t) - (d0)] ) + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
= K0 + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
Returns
-------
0, K1, K2, K3, K4, K5, n0, n1, n2, n3, n4, n5, d0, d1, d2, d3, d4, and d5 : Charmm coefficients (in kcal/mol)
CHARMM_ dihedral coeffs : np.matrix, shape=(6,3)
Array containing the CHARMM dihedral coeffs [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3],
[K4, n4, d4], [K5, n5, d5]] (in kcal/mol)
"""
# see below or the long version is, K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5
K0 = (c0 -c1 - c3 - c4/4 - c5)
K1 = (+c1 + 3/4 * c3 + 5/8 * c5)
K2 = (+(1/2) * c2 + 1/2 * c4)
K3 = (+(1/4) * c3 + 5/16 * c5)
K4 = (+(1/8) * c4)
K5 = (+(1/16) * c5)
n0 = 0
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
d0 = 90
d1 = 180
d2 = 0
d3 = 180
d4 = 0
d5 = 180
return np.matrix([[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]])
|
57,934 |
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: int,
watcher_group_uids: Optional[str], severity: str, last_alert_uid: str
) -> Tuple[str, Dict[str, int], List[dict]]:
# Get the last fetch time, if exists
# last_run is a dict with a single key, called last_fetch
last_fetch: int = last_run.get('last_fetch', 0)
# Handle first fetch time
if last_fetch == 0:
# if missing, use what provided via first_fetch_time
last_fetch = first_fetch_time * 1000
else:
# otherwise use the stored last fetch
last_fetch = int(last_fetch)
# for type checking, making sure that latest_created_time is int
latest_created_time = cast(int, last_fetch)
# Initialize an empty list of incidents to return
# Each incident is a dict with a string as a key
incidents: List[Dict[str, Any]] = []
# Get the CSV list of severities from severity
# severity = ','.join(INTEL471_SEVERITIES[INTEL471_SEVERITIES.index(severity):])
alerts_wrapper: Dict = client.search_alerts(
watcher_group_uids=watcher_group_uids,
max_results=max_results,
start_time=last_fetch,
last_alert_uid=last_alert_uid
)
latest_alert_uid: str = ''
if alerts_wrapper.get('alerts'):
watcher_groups: List = []
if alerts_wrapper.get('watcherGroups'):
watcher_groups = alerts_wrapper.get('watcherGroups', [])
alerts: List = alerts_wrapper.get('alerts', [])
for alert in alerts:
# If no created_time set is as epoch (0). We use time in ms so we must
# convert it from the Titan API response
incident_created_time = int(alert.get('foundTime', '0'))
# to prevent duplicates, we are only adding incidents with creation_time > last fetched incident
# if last_fetch:
# if incident_created_time <= last_fetch:
# continue
incident_name: str = compose_incident_title(alert)
titan_url: str = compose_titan_url(alert)
watcher_group_description, watcher_description = compose_incident_watcher_details(alert, watcher_groups)
incident_details: str = compose_incident_details(alert, watcher_groups)
incident = {
'name': incident_name,
'details': incident_details,
'occurred': timestamp_to_datestring(incident_created_time),
'rawJSON': json.dumps(alert),
'type': INCIDENT_TYPE, # Map to a specific XSOAR incident Type
'severity': convert_to_demisto_severity(alert.get('severity', 'Medium')),
'CustomFields': {
'intel471feedtitanurl': titan_url,
'intel471feedtitanwatchergroup': watcher_group_description,
'intel471feedtitanwatcher': watcher_description
}
}
incidents.append(incident)
latest_alert_uid = alert.get('uid', '')
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': latest_created_time}
return latest_alert_uid, next_run, incidents
|
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: int,
watcher_group_uids: Optional[str], severity: str, last_alert_uid: str
) -> Tuple[str, Dict[str, int], List[dict]]:
# Get the last fetch time, if exists
# last_run is a dict with a single key, called last_fetch
last_fetch: int = last_run.get('last_fetch', 0)
# Handle first fetch time
if last_fetch == 0:
# if missing, use what provided via first_fetch_time
last_fetch = first_fetch_time * 1000
else:
# otherwise use the stored last fetch
last_fetch = int(last_fetch)
# for type checking, making sure that latest_created_time is int
latest_created_time = cast(int, last_fetch)
# Initialize an empty list of incidents to return
# Each incident is a dict with a string as a key
incidents: List[Dict[str, Any]] = []
# Get the CSV list of severities from severity
# severity = ','.join(INTEL471_SEVERITIES[INTEL471_SEVERITIES.index(severity):])
alerts_wrapper: Dict = client.search_alerts(
watcher_group_uids=watcher_group_uids,
max_results=max_results,
start_time=last_fetch,
last_alert_uid=last_alert_uid
)
latest_alert_uid: str = ''
if alerts_wrapper.get('alerts'):
watcher_groups: List = []
if alerts_wrapper.get('watcherGroups'):
watcher_groups = alerts_wrapper.get('watcherGroups', [])
alerts: List = alerts_wrapper.get('alerts', [])
for alert in alerts:
# If no created_time set is as epoch (0). We use time in ms so we must
# convert it from the Titan API response
incident_created_time = int(alert.get('foundTime', '0'))
# to prevent duplicates, we are only adding incidents with creation_time > last fetched incident
# if last_fetch:
# if incident_created_time <= last_fetch:
# continue
incident_name: str = compose_incident_title(alert)
titan_url: str = compose_titan_url(alert)
watcher_group_description, watcher_description = compose_incident_watcher_details(alert, watcher_groups)
incident_details: str = compose_incident_details(alert, watcher_groups)
incident = {
'name': incident_name,
'details': incident_details,
'occurred': timestamp_to_datestring(incident_created_time),
'rawJSON': json.dumps(alert),
'type': INCIDENT_TYPE, # Map to a specific XSOAR incident Type
'severity': convert_to_demisto_severity(alert.get('severity', 'Medium')),
'CustomFields': {
'titanurl': titan_url,
'titanwatchergroup': watcher_group_description,
'titanwatcher': watcher_description
}
}
incidents.append(incident)
latest_alert_uid = alert.get('uid', '')
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': latest_created_time}
return latest_alert_uid, next_run, incidents
|
32,042 |
def get_all_lists(args: dict, sg):
params = {}
pageSize = args.get('page_size')
if pageSize:
params['page_size'] = int(pageSize)
pageToken = args.get('page_token')
if pageToken:
params['page_token'] = pageToken
headers = args.get('headers')
response = sg.client.marketing.lists.get(query_params=params)
if response.status_code == 200:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.Lists.Result': body['result'], 'Sendgrid.Lists.Metadata': body['_metadata']}
if headers:
if isinstance(headers, str):
headers = headers.split(",")
md = tableToMarkdown('All Lists details fetched successfully: ', body['result'], headers)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'All lists fetch failed: ' + str(response.body)
|
def get_all_lists(args: dict, sg):
params = {}
pageSize = args.get('page_size')
if pageSize:
params['page_size'] = int(pageSize)
pageToken = args.get('page_token')
if pageToken:
params['page_token'] = pageToken
headers = args.get('headers')
response = sg.client.marketing.lists.get(query_params=params)
if response.status_code == 200:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.Lists.Result': body['result'], 'Sendgrid.Lists.Metadata': body['_metadata']}
if headers:
if isinstance(headers, str):
headers = headers.split(",")
md = tableToMarkdown('All Lists details fetched successfully: ', body['result'], headers)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'Failed to fetch lists information: ' + str(response.body)
|
56,843 |
def test_domain_dump_sql_models():
dump_apps = _get_app_list(set(), set())
covered_models = set(itertools.chain.from_iterable(dump_apps.values()))
def _ignore_model(model):
if not model._meta.managed:
return True
if get_model_label(model) in IGNORE_MODELS:
return True
if model._meta.proxy:
return model._meta.concrete_model in covered_models
# Used in Couch to SQL migration tests
if model.__name__ == 'DummySQLModel':
return True
installed_models = {
model for model in apps.get_models() if not _ignore_model(model)
}
uncovered_models = [
get_model_label(model) for model in installed_models - covered_models
]
print('\n'.join(sorted(uncovered_models)))
eq(len(uncovered_models), 0, "Not all Django models are covered by domain dump.")
|
def test_domain_dump_sql_models():
dump_apps = _get_app_list(set(), set())
covered_models = set(itertools.chain.from_iterable(dump_apps.values()))
def _ignore_model(model):
if not model._meta.managed:
return True
if get_model_label(model) in IGNORE_MODELS:
return True
if model._meta.proxy:
return model._meta.concrete_model in covered_models
# Used in Couch to SQL migration tests
if model.__name__ == 'DummySQLModel':
return True
installed_models = {
model for model in apps.get_models() if not _ignore_model(model)
}
uncovered_models = [
get_model_label(model) for model in installed_models - covered_models
]
assert not uncovered_models, ("Not all Django models are covered by domain dump.\n"
+ '\n'.join(sorted(uncovered_models)))
|
52,626 |
def parse_server_name(server_name: str) -> Tuple[str, Optional[int]]:
"""Split a server name into host/port parts.
Args:
server_name: server name to parse
Returns:
host/port parts.
Raises:
ValueError if the server name could not be parsed.
"""
try:
if not server_name:
# Nothing provided
return "", None
elif server_name[-1] == "]":
# ipv6 literal, hopefully
return server_name, None
elif ":" not in server_name:
# hostname only
return server_name, None
domain, port = server_name.rsplit(":", 1)
return domain, int(port) if port else None
except Exception:
raise ValueError("Invalid server name '%s'" % server_name)
|
def parse_server_name(server_name: str) -> Tuple[str, Optional[int]]:
"""Split a server name into host/port parts.
Args:
server_name: server name to parse
Returns:
host/port parts.
Raises:
ValueError if the server name could not be parsed.
"""
try:
if not server_name:
# Nothing provided
return "", None
elif server_name[-1] == "]":
# ipv6 literal, hopefully
return server_name, None
elif ":" not in server_name:
# hostname only
return server_name, None
else:
domain, port = server_name.rsplit(":", 1)
return domain, int(port) if port else None
except Exception:
raise ValueError("Invalid server name '%s'" % server_name)
|
7,202 |
def perimeter(image, neighbourhood=4):
"""Calculate total perimeter of all objects in binary image.
Parameters
----------
image : (N, M) ndarray
2D binary image.
neighbourhood : 4 or 8, optional
Neighborhood connectivity for border pixel determination. It is used to
compute the contour. A higher neighbourhood widens the border on which
the perimeter is computed.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
References
----------
.. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
a Perimeter Estimator. The Queen's University of Belfast.
http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
>>> # coins image (binary)
>>> img_coins = data.coins() > 110
>>> # total perimeter of all objects in the image
>>> perimeter(img_coins, neighbourhood=4) # doctest: +ELLIPSIS
7796.867...
>>> perimeter(img_coins, neighbourhood=8) # doctest: +ELLIPSIS
8806.268...
"""
if image.ndim != 2:
raise NotImplementedError('`perimeter` supports 2D images only')
if neighbourhood == 4:
strel = STREL_4
else:
strel = STREL_8
image = image.astype(np.uint8)
eroded_image = ndi.binary_erosion(image, strel, border_value=0)
border_image = image - eroded_image
perimeter_weights = np.zeros(50, dtype=np.double)
perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
perimeter_weights[[21, 33]] = sqrt(2)
perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2
perimeter_image = ndi.convolve(border_image, np.array([[10, 2, 10],
[2, 1, 2],
[10, 2, 10]]),
mode='constant', cval=0)
# You can also write
# return perimeter_weights[perimeter_image].sum()
# but that was measured as taking much longer than bincount + np.dot (5x
# as much time)
perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50)
total_perimeter = perimeter_histogram @ perimeter_weights
return total_perimeter
|
def perimeter(image, neighbourhood=4):
"""Calculate total perimeter of all objects in binary image.
Parameters
----------
image : (N, M) ndarray
2D binary image.
neighbourhood : 4 or 8, optional
Neighborhood connectivity for border pixel determination. It is used to
compute the contour. A higher neighbourhood widens the border on which
the perimeter is computed.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
References
----------
.. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
a Perimeter Estimator. The Queen's University of Belfast.
http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
>>> # coins image (binary)
>>> img_coins = data.coins() > 110
>>> # total perimeter of all objects in the image
>>> perimeter(img_coins, neighbourhood=4) # doctest: +ELLIPSIS
7796.867...
>>> perimeter(img_coins, neighbourhood=8) # doctest: +ELLIPSIS
8806.268...
"""
if image.ndim != 2:
raise NotImplementedError('`perimeter` supports 2D images only')
if neighbourhood == 4:
strel = STREL_4
else:
strel = STREL_8
image = image.astype(np.uint8)
eroded_image = ndi.binary_erosion(image, strel, border_value=0)
border_image = image - eroded_image
perimeter_weights = np.zeros(50, dtype=np.double)
perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
perimeter_weights[[21, 33]] = sqrt(2)
perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2
perimeter_image = ndi.convolve(border_image, np.array([[10, 2, 10],
[ 2, 1, 2],
[10, 2, 10]]),
mode='constant', cval=0)
# You can also write
# return perimeter_weights[perimeter_image].sum()
# but that was measured as taking much longer than bincount + np.dot (5x
# as much time)
perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50)
total_perimeter = perimeter_histogram @ perimeter_weights
return total_perimeter
|
58,149 |
def main() -> None:
LOG('command is %s' % (demisto.command(),))
try:
handle_proxy()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
http_request('/domains/categorization/google.com?showLabels')
demisto.results('ok')
sys.exit(0)
elif demisto.command() == 'investigate-umbrella-domain-categorization' or demisto.command() == \
'umbrella-domain-categorization':
demisto.results(get_domain_categorization_command())
elif demisto.command() == 'investigate-umbrella-domain-search' or demisto.command() == 'umbrella-domain-search':
demisto.results(get_domain_search_command())
elif demisto.command() == 'investigate-umbrella-domain-co-occurrences' or demisto.command() == \
'umbrella-domain-co-occurrences':
demisto.results(get_domain_co_occurrences_command())
elif demisto.command() == 'investigate-umbrella-domain-related' or demisto.command() == 'umbrella-domain-related':
demisto.results(get_domain_related_command())
elif demisto.command() == 'investigate-umbrella-domain-security' or demisto.command() == 'umbrella-domain-security':
demisto.results(get_domain_security_command())
elif demisto.command() == 'investigate-umbrella-domain-dns-history' or demisto.command() == \
'umbrella-domain-dns-history':
demisto.results(get_domain_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-dns-history' or demisto.command() == 'umbrella-ip-dns-history':
demisto.results(get_ip_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-malicious-domains' or demisto.command() == \
'umbrella-ip-malicious-domains':
demisto.results(get_ip_malicious_domains_command())
# new-commands:
elif demisto.command() == 'domain':
return_results(get_domain_command())
elif demisto.command() == 'umbrella-get-related-domains':
demisto.results(get_related_domains_command())
elif demisto.command() == 'umbrella-get-domain-classifiers':
demisto.results(get_domain_classifiers_command())
elif demisto.command() == 'umbrella-get-domain-queryvolume':
demisto.results(get_domain_query_volume_command())
elif demisto.command() == 'umbrella-get-domain-details':
demisto.results(get_domain_details_command())
elif demisto.command() == 'umbrella-get-domains-for-email-registrar':
demisto.results(get_domains_for_email_registrar_command())
elif demisto.command() == 'umbrella-get-domains-for-nameserver':
demisto.results(get_domains_for_nameserver_command())
elif demisto.command() == 'umbrella-get-whois-for-domain':
return_results(get_whois_for_domain_command())
elif demisto.command() == 'umbrella-get-malicious-domains-for-ip':
demisto.results(get_malicious_domains_for_ip_command())
elif demisto.command() == 'umbrella-get-domains-using-regex':
demisto.results(get_domain_using_regex_command())
elif demisto.command() == 'umbrella-get-domain-timeline':
demisto.results(get_domain_timeline_command())
elif demisto.command() == 'umbrella-get-ip-timeline':
demisto.results(get_ip_timeline_command())
elif demisto.command() == 'umbrella-get-url-timeline':
demisto.results(get_url_timeline_command())
except HTTPError as e:
if e.args[0]:
return_error(e.args[0])
else:
return_error(f"HTTP error with code {e.response.status_code}")
except Exception as e:
LOG(str(e))
LOG.print_log()
return_error(str(e))
|
def main() -> None:
LOG('command is %s' % (demisto.command(),))
try:
handle_proxy()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
http_request('/domains/categorization/google.com?showLabels')
demisto.results('ok')
sys.exit(0)
elif demisto.command() == 'investigate-umbrella-domain-categorization' or demisto.command() == \
'umbrella-domain-categorization':
demisto.results(get_domain_categorization_command())
elif demisto.command() == 'investigate-umbrella-domain-search' or demisto.command() == 'umbrella-domain-search':
demisto.results(get_domain_search_command())
elif demisto.command() == 'investigate-umbrella-domain-co-occurrences' or demisto.command() == \
'umbrella-domain-co-occurrences':
demisto.results(get_domain_co_occurrences_command())
elif demisto.command() == 'investigate-umbrella-domain-related' or demisto.command() == 'umbrella-domain-related':
demisto.results(get_domain_related_command())
elif demisto.command() == 'investigate-umbrella-domain-security' or demisto.command() == 'umbrella-domain-security':
demisto.results(get_domain_security_command())
elif demisto.command() == 'investigate-umbrella-domain-dns-history' or demisto.command() == \
'umbrella-domain-dns-history':
demisto.results(get_domain_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-dns-history' or demisto.command() == 'umbrella-ip-dns-history':
demisto.results(get_ip_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-malicious-domains' or demisto.command() == \
'umbrella-ip-malicious-domains':
demisto.results(get_ip_malicious_domains_command())
# new-commands:
elif demisto.command() == 'domain':
return_results(get_domain_command())
elif demisto.command() == 'umbrella-get-related-domains':
demisto.results(get_related_domains_command())
elif demisto.command() == 'umbrella-get-domain-classifiers':
demisto.results(get_domain_classifiers_command())
elif demisto.command() == 'umbrella-get-domain-queryvolume':
demisto.results(get_domain_query_volume_command())
elif demisto.command() == 'umbrella-get-domain-details':
demisto.results(get_domain_details_command())
elif demisto.command() == 'umbrella-get-domains-for-email-registrar':
demisto.results(get_domains_for_email_registrar_command())
elif demisto.command() == 'umbrella-get-domains-for-nameserver':
demisto.results(get_domains_for_nameserver_command())
elif demisto.command() == 'umbrella-get-whois-for-domain':
return_results(get_whois_for_domain_command())
elif demisto.command() == 'umbrella-get-malicious-domains-for-ip':
demisto.results(get_malicious_domains_for_ip_command())
elif demisto.command() == 'umbrella-get-domains-using-regex':
demisto.results(get_domain_using_regex_command())
elif demisto.command() == 'umbrella-get-domain-timeline':
demisto.results(get_domain_timeline_command())
elif demisto.command() == 'umbrella-get-ip-timeline':
demisto.results(get_ip_timeline_command())
elif demisto.command() == 'umbrella-get-url-timeline':
demisto.results(get_url_timeline_command())
except HTTPError as e:
if e.args[0]:
return_error(e.args[0])
else:
return_error(f"HTTP error with code {e.response.status_code}")
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
30,099 |
def test_save_signatures_to_location_1_dirout(runtmp):
# save to sigfile.gz
sig2 = utils.get_test_data('2.fa.sig')
ss2 = sourmash.load_one_signature(sig2, ksize=31)
sig47 = utils.get_test_data('47.fa.sig')
ss47 = sourmash.load_one_signature(sig47, ksize=31)
outloc = runtmp.output('sigout/')
with sourmash_args.SaveSignaturesToLocation(outloc) as save_sig:
print(save_sig)
save_sig.add(ss2)
save_sig.add(ss47)
assert os.path.isdir(outloc)
saved = list(sourmash.load_file_as_signatures(outloc))
assert ss2 in saved
assert ss47 in saved
assert len(saved) == 2
|
def test_save_signatures_to_location_1_dirout(runtmp):
# save to sigout/ (directory)
sig2 = utils.get_test_data('2.fa.sig')
ss2 = sourmash.load_one_signature(sig2, ksize=31)
sig47 = utils.get_test_data('47.fa.sig')
ss47 = sourmash.load_one_signature(sig47, ksize=31)
outloc = runtmp.output('sigout/')
with sourmash_args.SaveSignaturesToLocation(outloc) as save_sig:
print(save_sig)
save_sig.add(ss2)
save_sig.add(ss47)
assert os.path.isdir(outloc)
saved = list(sourmash.load_file_as_signatures(outloc))
assert ss2 in saved
assert ss47 in saved
assert len(saved) == 2
|
46,233 |
def color_arrowheads(colors, num_segments):
if len(colors) == 2:
return np.concatenate(
[[colors[0]] * num_segments, [colors[1]] * num_segments], axis=0,
)
elif len(colors) == 3:
return np.concatenate(
[
[colors[0]] * num_segments,
[colors[1]] * num_segments,
[colors[2]] * num_segments,
],
axis=0,
)
else:
return ValueError(
'Either 2 or 3 colors ' f'must be provided, got f{len(colors)}.'
)
|
def color_arrowheads(colors, num_segments):
if len(colors) == 2:
return np.concatenate(
[[colors[0]] * num_segments, [colors[1]] * num_segments], axis=0,
)
elif len(colors) == 3:
return np.concatenate(
[
[colors[0]] * num_segments,
[colors[1]] * num_segments,
[colors[2]] * num_segments,
],
axis=0,
)
else:
return ValueError(
f'Either 2 or 3 colors must be provided, got {len(colors)}.'
)
|
45,699 |
def extrapolate(
precip,
velocity,
timesteps,
outval=np.nan,
xy_coords=None,
allow_nonfinite_values=False,
vel_timestep=1,
**kwargs,
):
"""Apply semi-Lagrangian backward extrapolation to a two-dimensional
precipitation field.
Parameters
----------
precip: array-like or None
Array of shape (m,n) containing the input precipitation field. All
values are required to be finite by default. If set to None, only the
displacement field is returned without interpolating the inputs. This
requires that return_displacement is set to True.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the m*n
advection field. All values are required to be finite by default.
timesteps: int or list
If timesteps is integer, it specifies the number of time steps to
extrapolate. If a list is given, each element is the desired
extrapolation time step from the current time. The elements of the list
are required to be in ascending order.
outval: float, optional
Optional argument for specifying the value for pixels advected from
outside the domain. If outval is set to 'min', the value is taken as
the minimum value of precip.
Default: np.nan
xy_coords: ndarray, optional
Array with the coordinates of the grid dimension (2, m, n ).
* xy_coords[0]: x coordinates
* xy_coords[1]: y coordinates
By default, the *xy_coords* are computed for each extrapolation.
allow_nonfinite_values: bool, optional
If True, allow non-finite values in the precipitation and advection
fields. This option is useful if the input fields contain a radar mask
(i.e. pixels with no observations are set to nan).
Other Parameters
----------------
displacement_prev: array-like
Optional initial displacement vector field of shape (2,m,n) for the
extrapolation.
Default: None
n_iter: int
Number of inner iterations in the semi-Lagrangian scheme. If n_iter > 0,
the integration is done using the midpoint rule. Otherwise, the advection
vectors are taken from the starting point of each interval.
Default: 1
return_displacement: bool
If True, return the displacement between the initial input field and
the one obtained by integrating along the advection field.
Default: False
vel_timestep: float
The time step of the velocity field. It is assumed to have the same
unit as the timesteps argument. Applicable if timeseps is a list.
Default: 1.
interp_order: int
The order of interpolation to use. Default: 1 (linear). Setting this
to 0 (nearest neighbor) gives the best computational performance but
may produce visible artefacts. Setting this to 3 (cubic) gives the best
ability to reproduce small-scale variability but may significantly
increase the computation time.
Returns
-------
out: array or tuple
If return_displacement=False, return a time series extrapolated fields
of shape (num_timesteps,m,n). Otherwise, return a tuple containing the
extrapolated fields and the integrated trajectory (displacement) along
the advection field.
References
----------
:cite:`GZ2002`
"""
if precip is not None and len(precip.shape) != 2:
raise ValueError("precip must be a two-dimensional array")
if len(velocity.shape) != 3:
raise ValueError("velocity must be a three-dimensional array")
if precip is not None and not allow_nonfinite_values:
if np.any(~np.isfinite(precip)):
raise ValueError("precip contains non-finite values")
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
# defaults
verbose = kwargs.get("verbose", False)
displacement_prev = kwargs.get("displacement_prev", None)
n_iter = kwargs.get("n_iter", 1)
return_displacement = kwargs.get("return_displacement", False)
interp_order = kwargs.get("interp_order", 1)
if precip is None and not return_displacement:
raise ValueError("precip is None but return_displacement is False")
if "D_prev" in kwargs.keys():
warnings.warn(
"deprecated argument D_prev is ignored, use displacement_prev instead",
)
# if interp_order > 1, apply separate masking to preserve nan and
# non-precipitation values
if precip is not None and interp_order > 1:
minval = np.nanmin(precip)
mask_min = (precip > minval).astype(float)
if allow_nonfinite_values:
mask_finite = np.isfinite(precip)
precip = precip.copy()
precip[~mask_finite] = 0.0
mask_finite = mask_finite.astype(float)
prefilter = True if interp_order > 1 else False
if isinstance(timesteps, int):
timesteps = np.arange(1, timesteps + 1)
vel_timestep = 1.0
elif np.any(np.diff(timesteps) <= 0.0):
raise ValueError("the given timestep sequence is not monotonously increasing")
timestep_diff = np.hstack([[timesteps[0]], np.diff(timesteps)])
if verbose:
print("Computing the advection with the semi-lagrangian scheme.")
t0 = time.time()
if precip is not None and outval == "min":
outval = np.nanmin(precip)
if xy_coords is None:
x_values, y_values = np.meshgrid(
np.arange(velocity.shape[2]), np.arange(velocity.shape[1])
)
xy_coords = np.stack([x_values, y_values])
def interpolate_motion(displacement, velocity_inc, td):
coords_warped = xy_coords + displacement
coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]
velocity_inc_x = ip.map_coordinates(
velocity[0, :, :],
coords_warped,
mode="nearest",
order=1,
prefilter=False,
)
velocity_inc_y = ip.map_coordinates(
velocity[1, :, :],
coords_warped,
mode="nearest",
order=1,
prefilter=False,
)
velocity_inc[0, :, :] = velocity_inc_x
velocity_inc[1, :, :] = velocity_inc_y
if n_iter > 1:
velocity_inc /= n_iter
velocity_inc *= td / vel_timestep
precip_extrap = []
if displacement_prev is None:
displacement = np.zeros((2, velocity.shape[1], velocity.shape[2]))
velocity_inc = velocity.copy() * timestep_diff[0] / vel_timestep
else:
displacement = displacement_prev.copy()
velocity_inc = np.empty(velocity.shape)
interpolate_motion(displacement, velocity_inc, timestep_diff[0])
for ti, td in enumerate(timestep_diff):
if n_iter > 0:
for k in range(n_iter):
interpolate_motion(displacement - velocity_inc / 2.0, velocity_inc, td)
displacement -= velocity_inc
interpolate_motion(displacement, velocity_inc, td)
else:
if ti > 0 or displacement_prev is not None:
interpolate_motion(displacement, velocity_inc, td)
displacement -= velocity_inc
coords_warped = xy_coords + displacement
coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]
if precip is not None:
precip_warped = ip.map_coordinates(
precip,
coords_warped,
mode="constant",
cval=outval,
order=interp_order,
prefilter=prefilter,
)
if interp_order > 1:
mask_warped = ip.map_coordinates(
mask_min,
coords_warped,
mode="constant",
cval=0,
order=1,
prefilter=False,
)
precip_warped[mask_warped < 0.5] = minval
if allow_nonfinite_values:
mask_warped = ip.map_coordinates(
mask_finite,
coords_warped,
mode="constant",
cval=0,
order=1,
prefilter=False,
)
precip_warped[mask_warped < 0.5] = np.nan
precip_extrap.append(np.reshape(precip_warped, precip.shape))
if verbose:
print("--- %s seconds ---" % (time.time() - t0))
if precip is not None:
if not return_displacement:
return np.stack(precip_extrap)
else:
return np.stack(precip_extrap), displacement
else:
return None, displacement
|
def extrapolate(
precip,
velocity,
timesteps,
outval=np.nan,
xy_coords=None,
allow_nonfinite_values=False,
vel_timestep=1,
**kwargs,
):
"""Apply semi-Lagrangian backward extrapolation to a two-dimensional
precipitation field.
Parameters
----------
precip: array-like or None
Array of shape (m,n) containing the input precipitation field. All
values are required to be finite by default. If set to None, only the
displacement field is returned without interpolating the inputs. This
requires that return_displacement is set to True.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the m*n
advection field. All values are required to be finite by default.
timesteps: int or list
If timesteps is integer, it specifies the number of time steps to
extrapolate. If a list is given, each element is the desired
extrapolation time step from the current time. The elements of the list
are required to be in ascending order.
outval: float, optional
Optional argument for specifying the value for pixels advected from
outside the domain. If outval is set to 'min', the value is taken as
the minimum value of precip.
Default: np.nan
xy_coords: ndarray, optional
Array with the coordinates of the grid dimension (2, m, n ).
* xy_coords[0]: x coordinates
* xy_coords[1]: y coordinates
By default, the *xy_coords* are computed for each extrapolation.
allow_nonfinite_values: bool, optional
If True, allow non-finite values in the precipitation and advection
fields. This option is useful if the input fields contain a radar mask
(i.e. pixels with no observations are set to nan).
Other Parameters
----------------
displacement_prev: array-like
Optional initial displacement vector field of shape (2,m,n) for the
extrapolation.
Default: None
n_iter: int
Number of inner iterations in the semi-Lagrangian scheme. If n_iter > 0,
the integration is done using the midpoint rule. Otherwise, the advection
vectors are taken from the starting point of each interval.
Default: 1
return_displacement: bool
If True, return the displacement between the initial input field and
the one obtained by integrating along the advection field.
Default: False
vel_timestep: float
The time step of the velocity field. It is assumed to have the same
unit as the timesteps argument. Applicable if timeseps is a list.
Default: 1.
interp_order: int
The order of interpolation to use. Default: 1 (linear). Setting this
to 0 (nearest neighbor) gives the best computational performance but
may produce visible artefacts. Setting this to 3 (cubic) gives the best
ability to reproduce small-scale variability but may significantly
increase the computation time.
Returns
-------
out: array or tuple
If return_displacement=False, return a time series extrapolated fields
of shape (num_timesteps,m,n). Otherwise, return a tuple containing the
extrapolated fields and the integrated trajectory (displacement) along
the advection field.
References
----------
:cite:`GZ2002`
"""
if precip is not None and len(precip.shape) != 2:
raise ValueError("precip must be a two-dimensional array")
if velocity.ndim != 3:
raise ValueError("velocity must be a three-dimensional array")
if precip is not None and not allow_nonfinite_values:
if np.any(~np.isfinite(precip)):
raise ValueError("precip contains non-finite values")
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
# defaults
verbose = kwargs.get("verbose", False)
displacement_prev = kwargs.get("displacement_prev", None)
n_iter = kwargs.get("n_iter", 1)
return_displacement = kwargs.get("return_displacement", False)
interp_order = kwargs.get("interp_order", 1)
if precip is None and not return_displacement:
raise ValueError("precip is None but return_displacement is False")
if "D_prev" in kwargs.keys():
warnings.warn(
"deprecated argument D_prev is ignored, use displacement_prev instead",
)
# if interp_order > 1, apply separate masking to preserve nan and
# non-precipitation values
if precip is not None and interp_order > 1:
minval = np.nanmin(precip)
mask_min = (precip > minval).astype(float)
if allow_nonfinite_values:
mask_finite = np.isfinite(precip)
precip = precip.copy()
precip[~mask_finite] = 0.0
mask_finite = mask_finite.astype(float)
prefilter = True if interp_order > 1 else False
if isinstance(timesteps, int):
timesteps = np.arange(1, timesteps + 1)
vel_timestep = 1.0
elif np.any(np.diff(timesteps) <= 0.0):
raise ValueError("the given timestep sequence is not monotonously increasing")
timestep_diff = np.hstack([[timesteps[0]], np.diff(timesteps)])
if verbose:
print("Computing the advection with the semi-lagrangian scheme.")
t0 = time.time()
if precip is not None and outval == "min":
outval = np.nanmin(precip)
if xy_coords is None:
x_values, y_values = np.meshgrid(
np.arange(velocity.shape[2]), np.arange(velocity.shape[1])
)
xy_coords = np.stack([x_values, y_values])
def interpolate_motion(displacement, velocity_inc, td):
coords_warped = xy_coords + displacement
coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]
velocity_inc_x = ip.map_coordinates(
velocity[0, :, :],
coords_warped,
mode="nearest",
order=1,
prefilter=False,
)
velocity_inc_y = ip.map_coordinates(
velocity[1, :, :],
coords_warped,
mode="nearest",
order=1,
prefilter=False,
)
velocity_inc[0, :, :] = velocity_inc_x
velocity_inc[1, :, :] = velocity_inc_y
if n_iter > 1:
velocity_inc /= n_iter
velocity_inc *= td / vel_timestep
precip_extrap = []
if displacement_prev is None:
displacement = np.zeros((2, velocity.shape[1], velocity.shape[2]))
velocity_inc = velocity.copy() * timestep_diff[0] / vel_timestep
else:
displacement = displacement_prev.copy()
velocity_inc = np.empty(velocity.shape)
interpolate_motion(displacement, velocity_inc, timestep_diff[0])
for ti, td in enumerate(timestep_diff):
if n_iter > 0:
for k in range(n_iter):
interpolate_motion(displacement - velocity_inc / 2.0, velocity_inc, td)
displacement -= velocity_inc
interpolate_motion(displacement, velocity_inc, td)
else:
if ti > 0 or displacement_prev is not None:
interpolate_motion(displacement, velocity_inc, td)
displacement -= velocity_inc
coords_warped = xy_coords + displacement
coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]
if precip is not None:
precip_warped = ip.map_coordinates(
precip,
coords_warped,
mode="constant",
cval=outval,
order=interp_order,
prefilter=prefilter,
)
if interp_order > 1:
mask_warped = ip.map_coordinates(
mask_min,
coords_warped,
mode="constant",
cval=0,
order=1,
prefilter=False,
)
precip_warped[mask_warped < 0.5] = minval
if allow_nonfinite_values:
mask_warped = ip.map_coordinates(
mask_finite,
coords_warped,
mode="constant",
cval=0,
order=1,
prefilter=False,
)
precip_warped[mask_warped < 0.5] = np.nan
precip_extrap.append(np.reshape(precip_warped, precip.shape))
if verbose:
print("--- %s seconds ---" % (time.time() - t0))
if precip is not None:
if not return_displacement:
return np.stack(precip_extrap)
else:
return np.stack(precip_extrap), displacement
else:
return None, displacement
|
57,763 |
def mantis_get_issue_by_id_command(client, args):
"""
Returns Hello {somename}
Args:
client (Client): Mantis client.
args (dict): all command arguments.
Returns:
Mantis
"""
_id = args.get('id')
resp = client.get_issue(_id).get('issues')[0]
issues = create_output_result(resp)
readable_output = tableToMarkdown("Mantis Issue Details", issues, headers=TABLE_HEADERS)
results = CommandResults(
readable_output=readable_output,
outputs_prefix="Mantis.issue",
outputs_key_field=TABLE_HEADERS,
outputs=issues
)
return_results(results)
|
def mantis_get_issue_by_id_command(client, args):
"""
Returns Hello {somename}
Args:
client (Client): Mantis client.
args (dict): all command arguments.
Returns:
Mantis
"""
_id = args.get('id')
resp_issues = client.get_issue(_id).get('issues')
resp = {}
if len(resp_issues) > 0:
resp = resp_issues[0]
issues = create_output_result(resp)
readable_output = tableToMarkdown("Mantis Issue Details", issues, headers=TABLE_HEADERS)
results = CommandResults(
readable_output=readable_output,
outputs_prefix="Mantis.issue",
outputs_key_field=TABLE_HEADERS,
outputs=issues
)
return_results(results)
|
17,536 |
def show_info(device, parent):
def format_boolean(x):
return _('yes') if x else _('no')
def format_rssi(rssi):
if rssi in [0x99, 0x7f]:
'invalid (0x{:02x})'.format(rssi)
else:
'{} dBm (0x{:02x})'.format(rssi, rssi)
def format_uuids(uuids):
return "\n".join([uuid + ' ' + ServiceUUID(uuid).name for uuid in uuids])
def on_accel_activated(group, dialog, key, flags):
if key != 99:
logging.warning("Ignoring key %s" % key)
return
store, paths = view_selection.get_selected_rows()
text = []
for path in paths:
row = store[path]
text.append(row[-1])
logging.info("\n".join(text))
clipboard.set_text("\n".join(text), -1)
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
dialog = Gtk.Dialog(icon_name="blueman", title="blueman")
dialog.set_transient_for(parent)
dialog_content_area = dialog.get_content_area()
label = Gtk.Label()
label.set_markup("<big>Select row(s) and use <i>Control + C</i> to copy</big>")
label.show()
dialog_content_area.pack_start(label, True, False, 0)
accelgroup = Gtk.AccelGroup()
dialog.add_accel_group(accelgroup)
key, mod = Gtk.accelerator_parse("<Control>C")
accelgroup.connect(key, mod, Gtk.AccelFlags.MASK, on_accel_activated)
store = Gtk.ListStore(str, str)
view = Gtk.TreeView(model=store, headers_visible=False)
view_selection = view.get_selection()
view_selection.set_mode(Gtk.SelectionMode.MULTIPLE)
for i in range(2):
column = Gtk.TreeViewColumn()
cell = Gtk.CellRendererText()
column.pack_start(cell, True)
column.add_attribute(cell, 'text', i)
view.append_column(column)
dialog_content_area.pack_start(view, True, False, 0)
view.show_all()
properties = (
('Address', None),
('AddressType', None),
('Name', None),
('Alias', None),
('Class', lambda x: "0x{:06x}".format(x)),
('Appearance', lambda x: "0x{:04x}".format(x)),
('Icon', None),
('Paired', format_boolean),
('Trusted', format_boolean),
('Blocked', format_boolean),
('LegacyPairing', format_boolean),
('RSSI', format_rssi),
('Connected', format_boolean),
('UUIDs', format_uuids),
('Modalias', None),
('Adapter', None),
# FIXME below 3 we need some sample data to decode and display properly
('ManufacturerData', lambda x: str(x)),
('ServiceData', lambda x: str(x)),
('AdvertisingData', lambda x: str(x))
)
for name, func in properties:
try:
if func is None:
store.append((name, device.get(name)))
else:
store.append((name, func(device.get(name))))
except BluezDBusException:
logging.info("Could not get property %s" % name)
pass
except ValueError:
logging.info("Could not add property %s" % name)
pass
dialog.run()
dialog.destroy()
|
def show_info(device, parent):
def format_boolean(x):
return _('yes') if x else _('no')
def format_rssi(rssi):
if rssi in [0x99, 0x7f]:
'invalid (0x{:02x})'.format(rssi)
else:
'{} dBm (0x{:02x})'.format(rssi, rssi)
def format_uuids(uuids):
return "\n".join([uuid + ' ' + ServiceUUID(uuid).name for uuid in uuids])
def on_accel_activated(group, dialog, key, flags):
if key != 99:
logging.warning("Ignoring key %s" % key)
return
store, paths = view_selection.get_selected_rows()
text = []
for path in paths:
row = store[path]
text.append(row[-1])
logging.info("\n".join(text))
clipboard.set_text("\n".join(text), -1)
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
dialog = Gtk.Dialog(icon_name="blueman", title="blueman")
dialog.set_transient_for(parent)
dialog_content_area = dialog.get_content_area()
label = Gtk.Label()
label.set_markup("<big>Select row(s) and use <i>Control + C</i> to copy</big>")
label.show()
dialog_content_area.pack_start(label, True, False, 0)
accelgroup = Gtk.AccelGroup()
dialog.add_accel_group(accelgroup)
key, mod = Gtk.accelerator_parse("<Control>C")
accelgroup.connect(key, mod, Gtk.AccelFlags.MASK, on_accel_activated)
store = Gtk.ListStore(str, str)
view = Gtk.TreeView(model=store, headers_visible=False)
view_selection = view.get_selection()
view_selection.set_mode(Gtk.SelectionMode.MULTIPLE)
for i in range(2):
column = Gtk.TreeViewColumn()
cell = Gtk.CellRendererText()
column.pack_start(cell, True)
column.add_attribute(cell, 'text', i)
view.append_column(column)
dialog_content_area.pack_start(view, True, False, 0)
view.show_all()
properties = (
('Address', None),
('AddressType', None),
('Name', None),
('Alias', None),
('Class', lambda x: "0x{:06x}".format(x)),
('Appearance', lambda x: "0x{:04x}".format(x)),
('Icon', None),
('Paired', format_boolean),
('Trusted', format_boolean),
('Blocked', format_boolean),
('LegacyPairing', format_boolean),
('RSSI', format_rssi),
('Connected', format_boolean),
('UUIDs', format_uuids),
('Modalias', None),
('Adapter', None),
# FIXME below 3 we need some sample data to decode and display properly
('ManufacturerData', str),
('ServiceData', lambda x: str(x)),
('AdvertisingData', lambda x: str(x))
)
for name, func in properties:
try:
if func is None:
store.append((name, device.get(name)))
else:
store.append((name, func(device.get(name))))
except BluezDBusException:
logging.info("Could not get property %s" % name)
pass
except ValueError:
logging.info("Could not add property %s" % name)
pass
dialog.run()
dialog.destroy()
|
32,624 |
def rescan_status(client: Client, args: dict) -> CommandResults:
"""
Retrieves scan status for previously submitted domain
name or URL.
"""
domain = args["domain"]
if not domain:
raise ValueError('domain is missing')
rescan = client._api_request(domain=domain, request_type="PUT", operation="rescan")
readable_output = f'Rescan of {domain}'
return CommandResults(
outputs_prefix="QutteraWebsiteMalwareScanning.Status",
outputs_key_field="error",
outputs=rescan
)
|
def rescan_status(client: Client, args: dict) -> CommandResults:
"""
Retrieves scan status for previously submitted domain
name or URL.
"""
domain = args.get('domain')
if not domain:
raise ValueError('domain is missing')
rescan = client._api_request(domain=domain, request_type="PUT", operation="rescan")
readable_output = f'Rescan of {domain}'
return CommandResults(
outputs_prefix="QutteraWebsiteMalwareScanning.Status",
outputs_key_field="error",
outputs=rescan
)
|
33,053 |
def make_reproducible(args: MkosiArgs, root: Path, do_run_build_script: bool, for_cache: bool) -> None:
verb = "build" if do_run_build_script else "final"
with complete_step("Attempting to make build reproducible…"):
env = collections.ChainMap(dict(BUILDROOT=str(root), OUTPUTDIR=str(output_dir(args))),
args.environment,
os.environ)
file = f"reproducible.{args.distribution}.sh"
if importlib.resources.is_resource("mkosi.resources", file):
with importlib.resources.path("mkosi.resources", file) as p:
run(["bash", p, verb], env=env)
else:
warn("Reproducible images is not support on {arge.distribution}")
# TODO: Support SOURCE_DATE_EPOCH
reset_timestamps = ["find", root, "-mindepth", "1", "-execdir", "touch", "-hcd", "@0", "{}", "+"]
run(reset_timestamps)
|
def make_reproducible(args: MkosiArgs, root: Path, do_run_build_script: bool, for_cache: bool) -> None:
verb = "build" if do_run_build_script else "final"
with complete_step("Attempting to make build reproducible…"):
env = collections.ChainMap(dict(BUILDROOT=str(root), OUTPUTDIR=str(output_dir(args))),
args.environment,
os.environ)
file = f"reproducible.{args.distribution}.sh"
if importlib.resources.is_resource("mkosi.resources", file):
with importlib.resources.path("mkosi.resources", file) as p:
run(["bash", p, verb], env=env)
else:
warn("Reproducible images is not support on {arge.distribution}")
# TODO: Support SOURCE_DATE_EPOCH
reset_timestamps = ["find", root, "-mindepth", "1", "-execdir", "touch", "--no-dereference", "--no-create", "--date=@0", "{}", "+"]
run(reset_timestamps)
|
43,266 |
def test_unsupervised_embeddings_prediction():
#
# Tests for embedding generation using inductive GCNs
#
edge_ids_test = np.array([[1, 2], [2, 3], [1, 3]])
graph = example_graph_1(feature_size=2)
# base_model, keras_model, generator, train_gen
gnn_models = [create_Unsupervised_graphSAGE_model(graph)]
for gnn_model in gnn_models:
keras_model = gnn_model[1]
generator = gnn_model[2]
train_gen = gnn_model[3]
ens = Ensemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=binary_crossentropy, weighted_metrics=["acc"]
)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.predict_generator(generator=generator, predict_data=edge_ids_test)
# We won't train the model instead use the initial random weights to test
# the evaluate_generator method.
ens.models = [
keras.Model(inputs=model.input, outputs=model.output[-1])
for model in ens.models
]
test_predictions = ens.predict_generator(train_gen, summarise=False)
print("test_predictions embeddings shape {}".format(test_predictions.shape))
assert test_predictions.shape[0] == ens.n_estimators
assert test_predictions.shape[1] == ens.n_predictions
assert (
test_predictions.shape[2] > 1
) # Embeddings dim is > than binary prediction
#
# Repeat for BaggingEnsemble
ens = BaggingEnsemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=binary_crossentropy, weighted_metrics=["acc"]
)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.predict_generator(generator=train_gen, predict_data=edge_ids_test)
# We won't train the model instead use the initial random weights to test
# the evaluate_generator method.
test_predictions = ens.predict_generator(train_gen, summarise=False)
print("test_predictions shape {}".format(test_predictions.shape))
assert test_predictions.shape[1] == 1
test_predictions = ens.predict_generator(train_gen, summarise=False)
assert test_predictions.shape[0] == ens.n_estimators
assert test_predictions.shape[1] == ens.n_predictions
assert test_predictions.shape[2] > 1
|
def test_unsupervised_embeddings_prediction():
#
# Tests for embedding generation using inductive GCNs
#
edge_ids_test = np.array([[1, 2], [2, 3], [1, 3]])
graph = example_graph_1(feature_size=2)
# base_model, keras_model, generator, train_gen
gnn_models = [create_Unsupervised_graphSAGE_model(graph)]
for gnn_model in gnn_models:
keras_model = gnn_model[1]
generator = gnn_model[2]
train_gen = gnn_model[3]
ens = Ensemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=binary_crossentropy, weighted_metrics=["acc"]
)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.predict_generator(generator=generator, predict_data=edge_ids_test)
# We won't train the model instead use the initial random weights to test
# the evaluate_generator method.
ens.models = [
Model(inputs=model.input, outputs=model.output[-1])
for model in ens.models
]
test_predictions = ens.predict_generator(train_gen, summarise=False)
print("test_predictions embeddings shape {}".format(test_predictions.shape))
assert test_predictions.shape[0] == ens.n_estimators
assert test_predictions.shape[1] == ens.n_predictions
assert (
test_predictions.shape[2] > 1
) # Embeddings dim is > than binary prediction
#
# Repeat for BaggingEnsemble
ens = BaggingEnsemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=binary_crossentropy, weighted_metrics=["acc"]
)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.predict_generator(generator=train_gen, predict_data=edge_ids_test)
# We won't train the model instead use the initial random weights to test
# the evaluate_generator method.
test_predictions = ens.predict_generator(train_gen, summarise=False)
print("test_predictions shape {}".format(test_predictions.shape))
assert test_predictions.shape[1] == 1
test_predictions = ens.predict_generator(train_gen, summarise=False)
assert test_predictions.shape[0] == ens.n_estimators
assert test_predictions.shape[1] == ens.n_predictions
assert test_predictions.shape[2] > 1
|
44,136 |
def apply_to_measurement(func: Callable):
"""
Apply an arbitrary function to a `MeasurementValue` or set of `MeasurementValue`s.
(func should be a "pure" function)
Ex:
.. code-block:: python
m0 = qml.measure(0)
m0_sin = qml.apply_to_measurement(np.sin)(m0)
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
partial = MeasurementLeaf()
for arg in args:
if not isinstance(arg, MeasurementValue):
arg = MeasurementLeaf(arg)
partial = partial.merge(arg)
partial.transform_leaves_inplace(
lambda *unwrapped: func(*unwrapped, **kwargs) # pylint: disable=unnecessary-lambda
)
return partial
return wrapper
|
def apply_to_measurement(func: Callable):
"""
Apply an arbitrary function to a `MeasurementValue` or set of `MeasurementValue`s.
(func should be a "pure" function)
**Example:**
.. code-block:: python
m0 = qml.measure(0)
m0_sin = qml.apply_to_measurement(np.sin)(m0)
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
partial = MeasurementLeaf()
for arg in args:
if not isinstance(arg, MeasurementValue):
arg = MeasurementLeaf(arg)
partial = partial.merge(arg)
partial.transform_leaves_inplace(
lambda *unwrapped: func(*unwrapped, **kwargs) # pylint: disable=unnecessary-lambda
)
return partial
return wrapper
|
31,209 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
token = demisto.params().get('token')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/rest')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = demisto.params().get('fetch_time', '3 days').strip()
proxy = demisto.params().get('proxy', False)
headers = {
"Authorization": token
}
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers
)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'mantis-get-issue-by-id':
mantis_get_issue_by_id_command(client, args)
elif demisto.command() == 'mantis-get-issues':
mantis_get_all_issues_command(client, args)
elif demisto.command() == 'mantis-create-issue':
mantis_create_issue_command(client, args)
elif demisto.command() == 'mantis-add-note':
matis_create_note_command(client, args)
elif demisto.command() == 'mantis-close-issue':
mantis_close_issue_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
token = demisto.params().get('token')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/rest')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = demisto.params().get('fetch_time', '3 days').strip()
proxy = demisto.params().get('proxy', False)
headers = {
"Authorization": token
}
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers
)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'mantis-get-issue-by-id':
mantis_get_issue_by_id_command(client, args)
elif demisto.command() == 'mantis-get-issues':
mantis_get_all_issues_command(client, args)
elif demisto.command() == 'mantis-create-issue':
return_results(mantis_create_issue_command(client, args))
elif demisto.command() == 'mantis-add-note':
matis_create_note_command(client, args)
elif demisto.command() == 'mantis-close-issue':
mantis_close_issue_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
12,227 |
def execute_config(args, parser):
stdout_write = getLogger("conda.stdout").info
stderr_write = getLogger("conda.stderr").info
json_warnings = []
json_get = {}
if args.show_sources:
if context.json:
stdout_write(json.dumps(
context.collect_all(), sort_keys=True, indent=2, separators=(',', ': '),
cls=EntityEncoder
))
else:
lines = []
for source, reprs in context.collect_all().items():
lines.append("==> %s <==" % source)
lines.extend(format_dict(reprs))
lines.append('')
stdout_write('\n'.join(lines))
return
if args.show is not None:
if args.show:
paramater_names = args.show
all_names = context.list_parameters()
not_params = set(paramater_names) - set(all_names)
if not_params:
from ..exceptions import ArgumentError
from ..common.io import dashlist
raise ArgumentError("Invalid configuration parameters: %s" % dashlist(not_params))
else:
paramater_names = context.list_parameters()
d = {key: getattr(context, key) for key in paramater_names}
if context.json:
stdout_write(json.dumps(
d, sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
))
else:
# Add in custom formatting
if 'custom_channels' in d:
d['custom_channels'] = {
channel.name: "%s://%s" % (channel.scheme, channel.location)
for channel in d['custom_channels'].values()
}
if 'custom_multichannels' in d:
from ..common.io import dashlist
d['custom_multichannels'] = {
multichannel_name: dashlist(channels, indent=4)
for multichannel_name, channels in d['custom_multichannels'].items()
}
stdout_write('\n'.join(format_dict(d)))
context.validate_configuration()
return
if args.describe is not None:
if args.describe:
paramater_names = args.describe
all_names = context.list_parameters()
not_params = set(paramater_names) - set(all_names)
if not_params:
from ..exceptions import ArgumentError
from ..common.io import dashlist
raise ArgumentError("Invalid configuration parameters: %s" % dashlist(not_params))
if context.json:
stdout_write(json.dumps(
[context.describe_parameter(name) for name in paramater_names],
sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
))
else:
builder = []
builder.extend(concat(parameter_description_builder(name)
for name in paramater_names))
stdout_write('\n'.join(builder))
else:
if context.json:
skip_categories = ('CLI-only', 'Hidden and Undocumented')
paramater_names = sorted(concat(
parameter_names for category, parameter_names in context.category_map.items()
if category not in skip_categories
))
stdout_write(json.dumps(
[context.describe_parameter(name) for name in paramater_names],
sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
))
else:
stdout_write(describe_all_parameters())
return
if args.validate:
context.validate_all()
return
if args.system:
rc_path = sys_rc_path
elif args.env:
if 'CONDA_PREFIX' in os.environ:
rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')
else:
rc_path = user_rc_path
elif args.file:
rc_path = args.file
else:
rc_path = user_rc_path
if args.write_default:
if isfile(rc_path):
with open(rc_path) as fh:
data = fh.read().strip()
if data:
raise CondaError("The file '%s' "
"already contains configuration information.\n"
"Remove the file to proceed.\n"
"Use `conda config --describe` to display default configuration."
% rc_path)
with open(rc_path, 'w') as fh:
fh.write(describe_all_parameters())
return
# read existing condarc
if os.path.exists(rc_path):
with open(rc_path, 'r') as fh:
# round trip load required because... we need to round trip
rc_config = yaml_round_trip_load(fh) or {}
elif os.path.exists(sys_rc_path):
# In case the considered rc file doesn't exist, fall back to the system rc
with open(sys_rc_path, 'r') as fh:
rc_config = yaml_round_trip_load(fh) or {}
else:
rc_config = {}
grouped_paramaters = groupby(lambda p: context.describe_parameter(p)['parameter_type'],
context.list_parameters())
primitive_parameters = grouped_paramaters['primitive']
sequence_parameters = grouped_paramaters['sequence']
map_parameters = grouped_paramaters['map']
all_parameters = primitive_parameters + sequence_parameters + map_parameters
# Get
if args.get is not None:
context.validate_all()
if args.get == []:
args.get = sorted(rc_config.keys())
value_not_found = object()
for key in args.get:
key_parts = key.split(".")
if key_parts[0] not in all_parameters:
message = "unknown key %s" % key_parts[0]
if not context.json:
stderr_write(message)
else:
json_warnings.append(message)
continue
remaining_rc_config = rc_config
for k in key_parts:
if k in remaining_rc_config:
remaining_rc_config = remaining_rc_config[k]
else:
remaining_rc_config = value_not_found
break
if remaining_rc_config is value_not_found:
pass
elif context.json:
json_get[key] = remaining_rc_config
else:
print_config_item(key, remaining_rc_config)
if args.stdin:
content = timeout(5, sys.stdin.read)
if not content:
return
try:
# round trip load required because... we need to round trip
parsed = yaml_round_trip_load(content)
rc_config.update(parsed)
except Exception: # pragma: no cover
from ..exceptions import ParseError
raise ParseError("invalid yaml content:\n%s" % content)
# prepend, append, add
for arg, prepend in zip((args.prepend, args.append), (True, False)):
for key, item in arg:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key == 'channels' and key not in rc_config:
rc_config[key] = ['defaults']
if key in sequence_parameters:
arglist = rc_config.setdefault(key, [])
elif key in map_parameters:
arglist = rc_config.setdefault(key, {}).setdefault(subkey, [])
else:
from ..exceptions import CondaValueError
raise CondaValueError("Key '%s' is not a known sequence parameter." % key)
if not (isinstance(arglist, Sequence) and not
isinstance(arglist, str)):
from ..exceptions import CouldntParseError
bad = rc_config[key].__class__.__name__
raise CouldntParseError("key %r should be a list, not %s." % (key, bad))
if item in arglist:
message_key = key + "." + subkey if subkey is not None else key
# Right now, all list keys should not contain duplicates
message = "Warning: '%s' already in '%s' list, moving to the %s" % (
item, message_key, "top" if prepend else "bottom")
if subkey is None:
arglist = rc_config[key] = [p for p in arglist if p != item]
else:
arglist = rc_config[key][subkey] = [p for p in arglist if p != item]
if not context.json:
stderr_write(message)
else:
json_warnings.append(message)
arglist.insert(0 if prepend else len(arglist), item)
# Set
for key, item in args.set:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key in primitive_parameters:
value = context.typify_parameter(key, item, "--set parameter")
rc_config[key] = value
elif key in map_parameters:
argmap = rc_config.setdefault(key, {})
argmap[subkey] = item
else:
from ..exceptions import CondaValueError
raise CondaValueError("Key '%s' is not a known primitive parameter." % key)
# Remove
for key, item in args.remove:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key not in rc_config:
if key != 'channels':
from ..exceptions import CondaKeyError
raise CondaKeyError(key, "key %r is not in the config file" % key)
rc_config[key] = ['defaults']
if item not in rc_config[key]:
from ..exceptions import CondaKeyError
raise CondaKeyError(key, "%r is not in the %r key of the config file" %
(item, key))
rc_config[key] = [i for i in rc_config[key] if i != item]
# Clear
for key, in args.clear:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key == 'channels' and key not in rc_config:
rc_config[key] = ['defaults']
if key in sequence_parameters:
arglist = rc_config.setdefault(key, [])
elif key in map_parameters:
arglist = rc_config.setdefault(key, {}).setdefault(subkey, [])
else:
from ..exceptions import CondaValueError
raise CondaValueError("Key '%s' is not a known sequence parameter." % key)
if not (isinstance(arglist, Sequence) and not
isinstance(arglist, str)):
from ..exceptions import CouldntParseError
bad = rc_config[key].__class__.__name__
raise CouldntParseError("key %r should be a list, not %s." % (key, bad))
if key in sequence_parameters:
rc_config[key] = []
elif key in map_parameters:
argmap = rc_config.setdefault(key, {})
argmap[subkey] = []
# Remove Key
for key, in args.remove_key:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key not in rc_config:
from ..exceptions import CondaKeyError
raise CondaKeyError(key, "key %r is not in the config file" %
key)
del rc_config[key]
# config.rc_keys
if not args.get:
# Add representers for enums.
# Because a representer cannot be added for the base Enum class (it must be added for
# each specific Enum subclass - and because of import rules), I don't know of a better
# location to do this.
def enum_representer(dumper, data):
return dumper.represent_str(str(data))
yaml.representer.RoundTripRepresenter.add_representer(SafetyChecks, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(PathConflict, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(DepsModifier, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(UpdateModifier, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(ChannelPriority, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(SatSolverChoice, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(
ExperimentalSolverChoice, enum_representer
)
try:
with open(rc_path, 'w') as rc:
rc.write(yaml_round_trip_dump(rc_config))
except (IOError, OSError) as e:
raise CondaError('Cannot write to condarc file at %s\n'
'Caused by %r' % (rc_path, e))
if context.json:
from .common import stdout_json_success
stdout_json_success(
rc_path=rc_path,
warnings=json_warnings,
get=json_get
)
return
|
def execute_config(args, parser):
stdout_write = getLogger("conda.stdout").info
stderr_write = getLogger("conda.stderr").info
json_warnings = []
json_get = {}
if args.show_sources:
if context.json:
stdout_write(json.dumps(
context.collect_all(), sort_keys=True, indent=2, separators=(',', ': '),
cls=EntityEncoder
))
else:
lines = []
for source, reprs in context.collect_all().items():
lines.append("==> %s <==" % source)
lines.extend(format_dict(reprs))
lines.append('')
stdout_write('\n'.join(lines))
return
if args.show is not None:
if args.show:
paramater_names = args.show
all_names = context.list_parameters()
not_params = set(paramater_names) - set(all_names)
if not_params:
from ..exceptions import ArgumentError
from ..common.io import dashlist
raise ArgumentError("Invalid configuration parameters: %s" % dashlist(not_params))
else:
paramater_names = context.list_parameters()
d = {key: getattr(context, key) for key in paramater_names}
if context.json:
stdout_write(json.dumps(
d, sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
))
else:
# Add in custom formatting
if 'custom_channels' in d:
d['custom_channels'] = {
channel.name: "%s://%s" % (channel.scheme, channel.location)
for channel in d['custom_channels'].values()
}
if 'custom_multichannels' in d:
from ..common.io import dashlist
d['custom_multichannels'] = {
multichannel_name: dashlist(channels, indent=4)
for multichannel_name, channels in d['custom_multichannels'].items()
}
stdout_write('\n'.join(format_dict(d)))
context.validate_configuration()
return
if args.describe is not None:
if args.describe:
paramater_names = args.describe
all_names = context.list_parameters()
not_params = set(paramater_names) - set(all_names)
if not_params:
from ..exceptions import ArgumentError
from ..common.io import dashlist
raise ArgumentError("Invalid configuration parameters: %s" % dashlist(not_params))
if context.json:
stdout_write(json.dumps(
[context.describe_parameter(name) for name in paramater_names],
sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
))
else:
builder = []
builder.extend(concat(parameter_description_builder(name)
for name in paramater_names))
stdout_write('\n'.join(builder))
else:
if context.json:
skip_categories = ('CLI-only', 'Hidden and Undocumented')
paramater_names = sorted(concat(
parameter_names for category, parameter_names in context.category_map.items()
if category not in skip_categories
))
stdout_write(json.dumps(
[context.describe_parameter(name) for name in paramater_names],
sort_keys=True, indent=2, separators=(',', ': '), cls=EntityEncoder
))
else:
stdout_write(describe_all_parameters())
return
if args.validate:
context.validate_all()
return
if args.system:
rc_path = sys_rc_path
elif args.env:
if 'CONDA_PREFIX' in os.environ:
rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')
else:
rc_path = user_rc_path
elif args.file:
rc_path = args.file
else:
rc_path = user_rc_path
if args.write_default:
if isfile(rc_path):
with open(rc_path) as fh:
data = fh.read().strip()
if data:
raise CondaError("The file '%s' "
"already contains configuration information.\n"
"Remove the file to proceed.\n"
"Use `conda config --describe` to display default configuration."
% rc_path)
with open(rc_path, 'w') as fh:
fh.write(describe_all_parameters())
return
# read existing condarc
if os.path.exists(rc_path):
with open(rc_path, 'r') as fh:
# round trip load required because... we need to round trip
rc_config = yaml_round_trip_load(fh) or {}
elif os.path.exists(sys_rc_path):
# In case the considered rc file doesn't exist, fall back to the system rc
with open(sys_rc_path, 'r') as fh:
rc_config = yaml_round_trip_load(fh) or {}
else:
rc_config = {}
grouped_paramaters = groupby(lambda p: context.describe_parameter(p)['parameter_type'],
context.list_parameters())
primitive_parameters = grouped_paramaters['primitive']
sequence_parameters = grouped_paramaters['sequence']
map_parameters = grouped_paramaters['map']
all_parameters = primitive_parameters + sequence_parameters + map_parameters
# Get
if args.get is not None:
context.validate_all()
if args.get == []:
args.get = sorted(rc_config.keys())
value_not_found = object()
for key in args.get:
key_parts = key.split(".")
if key_parts[0] not in all_parameters:
message = "unknown key %s" % key_parts[0]
if not context.json:
stderr_write(message)
else:
json_warnings.append(message)
continue
remaining_rc_config = rc_config
for k in key_parts:
if k in remaining_rc_config:
remaining_rc_config = remaining_rc_config[k]
else:
remaining_rc_config = value_not_found
break
if remaining_rc_config is value_not_found:
pass
elif context.json:
json_get[key] = remaining_rc_config
else:
print_config_item(key, remaining_rc_config)
if args.stdin:
content = timeout(5, sys.stdin.read)
if not content:
return
try:
# round trip load required because... we need to round trip
parsed = yaml_round_trip_load(content)
rc_config.update(parsed)
except Exception: # pragma: no cover
from ..exceptions import ParseError
raise ParseError("invalid yaml content:\n%s" % content)
# prepend, append, add
for arg, prepend in zip((args.prepend, args.append), (True, False)):
for key, item in arg:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key == 'channels' and key not in rc_config:
rc_config[key] = ['defaults']
if key in sequence_parameters:
arglist = rc_config.setdefault(key, [])
elif key in map_parameters:
arglist = rc_config.setdefault(key, {}).setdefault(subkey, [])
else:
from ..exceptions import CondaValueError
raise CondaValueError("Key '%s' is not a known sequence parameter." % key)
if not (isinstance(arglist, Sequence) and not
isinstance(arglist, str)):
from ..exceptions import CouldntParseError
bad = rc_config[key].__class__.__name__
raise CouldntParseError("key %r should be a list, not %s." % (key, bad))
if item in arglist:
message_key = key + "." + subkey if subkey is not None else key
# Right now, all list keys should not contain duplicates
message = "Warning: '%s' already in '%s' list, moving to the %s" % (
item, message_key, "top" if prepend else "bottom")
if subkey is None:
arglist = rc_config[key] = [p for p in arglist if p != item]
else:
arglist = rc_config[key][subkey] = [p for p in arglist if p != item]
if not context.json:
stderr_write(message)
else:
json_warnings.append(message)
arglist.insert(0 if prepend else len(arglist), item)
# Set
for key, item in args.set:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key in primitive_parameters:
value = context.typify_parameter(key, item, "--set parameter")
rc_config[key] = value
elif key in map_parameters:
argmap = rc_config.setdefault(key, {})
argmap[subkey] = item
else:
from ..exceptions import CondaValueError
raise CondaValueError("Key '%s' is not a known primitive parameter." % key)
# Remove
for key, item in args.remove:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key not in rc_config:
if key != 'channels':
from ..exceptions import CondaKeyError
raise CondaKeyError(key, "key %r is not in the config file" % key)
rc_config[key] = ['defaults']
if item not in rc_config[key]:
from ..exceptions import CondaKeyError
raise CondaKeyError(key, "%r is not in the %r key of the config file" %
(item, key))
rc_config[key] = [i for i in rc_config[key] if i != item]
# Clear
for key in args.clear:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key == 'channels' and key not in rc_config:
rc_config[key] = ['defaults']
if key in sequence_parameters:
arglist = rc_config.setdefault(key, [])
elif key in map_parameters:
arglist = rc_config.setdefault(key, {}).setdefault(subkey, [])
else:
from ..exceptions import CondaValueError
raise CondaValueError("Key '%s' is not a known sequence parameter." % key)
if not (isinstance(arglist, Sequence) and not
isinstance(arglist, str)):
from ..exceptions import CouldntParseError
bad = rc_config[key].__class__.__name__
raise CouldntParseError("key %r should be a list, not %s." % (key, bad))
if key in sequence_parameters:
rc_config[key] = []
elif key in map_parameters:
argmap = rc_config.setdefault(key, {})
argmap[subkey] = []
# Remove Key
for key, in args.remove_key:
key, subkey = key.split('.', 1) if '.' in key else (key, None)
if key not in rc_config:
from ..exceptions import CondaKeyError
raise CondaKeyError(key, "key %r is not in the config file" %
key)
del rc_config[key]
# config.rc_keys
if not args.get:
# Add representers for enums.
# Because a representer cannot be added for the base Enum class (it must be added for
# each specific Enum subclass - and because of import rules), I don't know of a better
# location to do this.
def enum_representer(dumper, data):
return dumper.represent_str(str(data))
yaml.representer.RoundTripRepresenter.add_representer(SafetyChecks, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(PathConflict, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(DepsModifier, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(UpdateModifier, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(ChannelPriority, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(SatSolverChoice, enum_representer)
yaml.representer.RoundTripRepresenter.add_representer(
ExperimentalSolverChoice, enum_representer
)
try:
with open(rc_path, 'w') as rc:
rc.write(yaml_round_trip_dump(rc_config))
except (IOError, OSError) as e:
raise CondaError('Cannot write to condarc file at %s\n'
'Caused by %r' % (rc_path, e))
if context.json:
from .common import stdout_json_success
stdout_json_success(
rc_path=rc_path,
warnings=json_warnings,
get=json_get
)
return
|
31,726 |
def run_ssdeep_command(anchor_hash: str, hashes_to_compare: str):
with tempfile.NamedTemporaryFile() as hash1:
with tempfile.NamedTemporaryFile() as hash2:
hash1.write(bytes(anchor_hash, encoding='utf-8'))
hash1.flush()
hash2.write(bytes(hashes_to_compare, encoding='utf-8'))
hash2.flush()
stream = os.popen(f"ssdeep -k {hash1.name} {hash2.name} -c -a") # nosec
return stream.read().split('\n')
|
def run_ssdeep_command(anchor_hash: str, hashes_to_compare: str) -> list:
with tempfile.NamedTemporaryFile() as hash1:
with tempfile.NamedTemporaryFile() as hash2:
hash1.write(bytes(anchor_hash, encoding='utf-8'))
hash1.flush()
hash2.write(bytes(hashes_to_compare, encoding='utf-8'))
hash2.flush()
stream = os.popen(f"ssdeep -k {hash1.name} {hash2.name} -c -a") # nosec
return stream.read().split('\n')
|
13,594 |
def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy array. Projection of the input matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
32,205 |
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, str],
first_fetch_time: str, query: Optional[str], mirror_direction: str,
mirror_tag: List[str]) -> Tuple[Dict[str, str], List[dict]]:
"""This function retrieves new incidents every interval (default is 1 minute).
:type client: ``Client``
:param client: XSOAR client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, str]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[str]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type query: ``Optional[str]``
:param query:
query to fetch the relevant incidents
:type mirror_direction: ``str``
:param mirror_direction:
Mirror direction for the fetched incidents
:type mirror_tag: ``List[str]``
:param mirror_tag:
The tags that you will mirror out of the incident.
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
last_fetch = last_run.get('last_fetch')
if not last_fetch:
last_fetch = first_fetch_time # type: ignore
latest_created_time = dateparser.parse(last_fetch)
incidents_result: List[Dict[str, Any]] = []
if query:
query += f' and created:>="{last_fetch}"'
else:
query = f'created:>="{last_fetch}"'
demisto.debug(f'Fetching incidents since last fetch: {last_fetch}')
incidents = client.search_incidents(
query=query,
max_results=max_results,
start_time=last_fetch
)
for incident in incidents:
incident_result: Dict[str, Any] = dict()
incident_result['dbotMirrorDirection'] = MIRROR_DIRECTION[mirror_direction] # type: ignore
incident['dbotMirrorInstance'] = demisto.integrationInstance()
incident_result['dbotMirrorTags'] = mirror_tag if mirror_tag else None # type: ignore
incident_result['dbotMirrorId'] = incident['id']
for key, value in incident.items():
if key in FIELDS_TO_COPY_FROM_REMOTE_INCIDENT:
incident_result[key] = value
incident_result['rawJSON'] = json.dumps(incident)
file_attachments = []
if incident.get('attachment') and len(incident.get('attachment', [])) > 0 and incident.get('investigationId'):
entries = client.get_incident_entries(
incident_id=incident['investigationId'], # type: ignore
from_date=0,
max_results=10,
categories=['attachments'],
tags=None,
tags_and_operator=False
)
for entry in entries:
if 'file' in entry and entry.get('file'):
file_entry_content = client.get_file_entry(entry.get('id')) # type: ignore
file_result = fileResult(entry['file'], file_entry_content)
if any(attachment.get('name') == entry['file'] for attachment in incident.get('attachment', [])):
if file_result['Type'] == EntryType.ERROR:
raise Exception(f"Error getting attachment: {str(file_result.get('Contents', ''))}")
file_attachments.append({
'path': file_result.get('FileID', ''),
'name': file_result.get('File', '')
})
incident_result['attachment'] = file_attachments
incidents_result.append(incident_result)
incident_created_time = dateparser.parse(incident.get('created')) # type: ignore
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time: # type: ignore
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': (latest_created_time + timedelta(microseconds=1)).strftime(XSOAR_DATE_FORMAT)} # type: ignore
return next_run, incidents_result
|
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, str],
first_fetch_time: str, query: Optional[str], mirror_direction: str,
mirror_tag: List[str]) -> Tuple[Dict[str, str], List[dict]]:
"""This function retrieves new incidents every interval (default is 1 minute).
:type client: ``Client``
:param client: XSOAR client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, str]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[str]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type query: ``Optional[str]``
:param query:
query to fetch the relevant incidents
:type mirror_direction: ``str``
:param mirror_direction:
Mirror direction for the fetched incidents
:type mirror_tag: ``List[str]``
:param mirror_tag:
The tags that you will mirror out of the incident.
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
last_fetch = last_run.get('last_fetch')
if not last_fetch:
last_fetch = first_fetch_time # type: ignore
latest_created_time = dateparser.parse(last_fetch)
incidents_result: List[Dict[str, Any]] = []
if query:
query += f' and created:>="{last_fetch}"'
else:
query = f'created:>="{last_fetch}"'
demisto.debug(f'Fetching incidents since last fetch: {last_fetch}')
incidents = client.search_incidents(
query=query,
max_results=max_results,
start_time=last_fetch
)
for incident in incidents:
incident_result: Dict[str, Any] = dict()
incident_result['dbotMirrorDirection'] = MIRROR_DIRECTION[mirror_direction] # type: ignore
incident['dbotMirrorInstance'] = demisto.integrationInstance()
incident_result['dbotMirrorTags'] = mirror_tag if mirror_tag else None # type: ignore
incident_result['dbotMirrorId'] = incident['id']
for key, value in incident.items():
if key in FIELDS_TO_COPY_FROM_REMOTE_INCIDENT:
incident_result[key] = value
incident_result['rawJSON'] = json.dumps(incident)
file_attachments = []
if incident.get('attachment') and len(incident.get('attachment', [])) > 0 and incident.get('investigationId'):
entries = client.get_incident_entries(
incident_id=incident['investigationId'], # type: ignore
from_date=0,
max_results=10,
categories=['attachments'],
tags=None,
tags_and_operator=False
)
for entry in entries:
if 'file' in entry and entry.get('file'):
file_entry_content = client.get_file_entry(entry.get('id')) # type: ignore
file_result = fileResult(entry['file'], file_entry_content)
if any(attachment.get('name') == entry['file'] for attachment in incident.get('attachment', [])):
if file_result['Type'] == EntryType.ERROR:
raise Exception(f"Error getting attachment: {str(file_result.get('Contents', ''))}")
file_attachments.append({
'path': file_result.get('FileID', ''),
'name': file_result.get('File', '')
})
incident_result['attachment'] = file_attachments
incidents_result.append(incident_result)
incident_created_time = dateparser.parse(incident.get('created')) # type: ignore[arg-type]
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time: # type: ignore
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': (latest_created_time + timedelta(microseconds=1)).strftime(XSOAR_DATE_FORMAT)} # type: ignore
return next_run, incidents_result
|
30,622 |
def get_edl_ioc_values(on_demand: bool,
request_args: RequestArguments,
last_update_data={},
cache_refresh_rate=None) -> str:
"""
Get the ioc list to return in the edl
"""
if last_update_data is None:
last_update_data = {}
last_run = last_update_data.get('last_run')
last_query = last_update_data.get('last_query')
current_iocs = last_update_data.get('current_iocs')
# on_demand ignores cache
if on_demand:
if request_args.is_request_change(last_update_data):
values_str = get_ioc_values_str_from_context(request_args=request_args, iocs=current_iocs)
else:
values_str = get_ioc_values_str_from_context(request_args=request_args)
else:
if last_run:
cache_time, _ = parse_date_range(cache_refresh_rate, to_timestamp=True)
if last_run <= cache_time or request_args.is_request_change(last_update_data) or \
request_args.query != last_query:
values_str = refresh_edl_context(request_args)
else:
values_str = get_ioc_values_str_from_context(request_args=request_args)
else:
values_str = refresh_edl_context(request_args)
return values_str
|
def get_edl_ioc_values(on_demand: bool,
request_args: RequestArguments,
last_update_data,
cache_refresh_rate=None) -> str:
"""
Get the ioc list to return in the edl
"""
if last_update_data is None:
last_update_data = {}
last_run = last_update_data.get('last_run')
last_query = last_update_data.get('last_query')
current_iocs = last_update_data.get('current_iocs')
# on_demand ignores cache
if on_demand:
if request_args.is_request_change(last_update_data):
values_str = get_ioc_values_str_from_context(request_args=request_args, iocs=current_iocs)
else:
values_str = get_ioc_values_str_from_context(request_args=request_args)
else:
if last_run:
cache_time, _ = parse_date_range(cache_refresh_rate, to_timestamp=True)
if last_run <= cache_time or request_args.is_request_change(last_update_data) or \
request_args.query != last_query:
values_str = refresh_edl_context(request_args)
else:
values_str = get_ioc_values_str_from_context(request_args=request_args)
else:
values_str = refresh_edl_context(request_args)
return values_str
|
22,280 |
def unicodify(value, encoding=DEFAULT_ENCODING, error='replace', strip_null=False):
u"""
Returns a Unicode string or None.
>>> assert unicodify(None) is None
>>> assert unicodify('simple string') == u'simple string'
>>> assert unicodify(3) == u'3'
>>> assert unicodify(bytearray([115, 116, 114, 196, 169, 195, 177, 103])) == u'strĩñg'
>>> assert unicodify(Exception(u'strĩñg')) == u'strĩñg'
>>> assert unicodify('cómplǐcḁtëd strĩñg') == u'cómplǐcḁtëd strĩñg'
>>> s = u'cómplǐcḁtëd strĩñg'; assert unicodify(s) == s
>>> s = u'lâtín strìñg'; assert unicodify(s.encode('latin-1'), 'latin-1') == s
>>> s = u'lâtín strìñg'; assert unicodify(s.encode('latin-1')) == u'l\ufffdt\ufffdn str\ufffd\ufffdg'
>>> s = u'lâtín strìñg'; assert unicodify(s.encode('latin-1'), error='ignore') == u'ltn strg'
>>> assert unicodify(Exception(u'¼ cup of flour'.encode('latin-1')), error='ignore') == ' cup of flour'
"""
if value is None:
return value
try:
if isinstance(value, Exception):
# This is for backwards compatibility for python 2, but works on python 3 as well
if value.args:
value = value.args[0]
if isinstance(value, bytearray):
value = bytes(value)
elif not isinstance(value, string_types) and not isinstance(value, binary_type):
# In Python 2, value is not an instance of basestring (i.e. str or unicode)
# In Python 3, value is not an instance of bytes or str
value = text_type(value)
# Now in Python 2, value is an instance of basestring, but may be not unicode
# Now in Python 3, value is an instance of bytes or str
if not isinstance(value, text_type):
value = text_type(value, encoding, error)
except Exception:
msg = "Value '%s' could not be coerced to Unicode" % value
log.exception(msg)
raise Exception(msg)
if strip_null:
return value.replace('\0', '')
return value
|
def unicodify(value, encoding=DEFAULT_ENCODING, error='replace', strip_null=False):
u"""
Returns a Unicode string or None.
>>> assert unicodify(None) is None
>>> assert unicodify('simple string') == u'simple string'
>>> assert unicodify(3) == u'3'
>>> assert unicodify(bytearray([115, 116, 114, 196, 169, 195, 177, 103])) == u'strĩñg'
>>> assert unicodify(Exception(u'strĩñg')) == u'strĩñg'
>>> assert unicodify('cómplǐcḁtëd strĩñg') == u'cómplǐcḁtëd strĩñg'
>>> s = u'cómplǐcḁtëd strĩñg'; assert unicodify(s) == s
>>> s = u'lâtín strìñg'; assert unicodify(s.encode('latin-1'), 'latin-1') == s
>>> s = u'lâtín strìñg'; assert unicodify(s.encode('latin-1')) == u'l\ufffdt\ufffdn str\ufffd\ufffdg'
>>> s = u'lâtín strìñg'; assert unicodify(s.encode('latin-1'), error='ignore') == u'ltn strg'
>>> assert unicodify(Exception(u'¼ cup of flour'.encode('latin-1')), error='ignore') == ' cup of flour'
"""
if value is None:
return value
try:
if isinstance(value, Exception):
# This is for backwards compatibility for python 2, but works on python 3 as well
if len(value.args) == 1:
value = value.args[0]
if isinstance(value, bytearray):
value = bytes(value)
elif not isinstance(value, string_types) and not isinstance(value, binary_type):
# In Python 2, value is not an instance of basestring (i.e. str or unicode)
# In Python 3, value is not an instance of bytes or str
value = text_type(value)
# Now in Python 2, value is an instance of basestring, but may be not unicode
# Now in Python 3, value is an instance of bytes or str
if not isinstance(value, text_type):
value = text_type(value, encoding, error)
except Exception:
msg = "Value '%s' could not be coerced to Unicode" % value
log.exception(msg)
raise Exception(msg)
if strip_null:
return value.replace('\0', '')
return value
|
35,273 |
def parafac2(tensor_slices, rank, n_iter_max=100, init='random', svd='numpy_svd', normalize_factors=False,
tol=1e-8, random_state=None, verbose=False, return_errors=False, n_iter_parafac=5):
r"""PARAFAC2 decomposition [1]_ via alternating least squares (ALS)
Computes a rank-`rank` PARAFAC2 decomposition of the tensor defined by `tensor_slices`.
The decomposition is on the form :math:`(A [B_i] C)` such that the i-th frontal slice,
:math:`X_i`, of :math:`X` is given by
.. math::
X_i = B_i diag(a_i) C^T,
where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries are equal to
the :math:`i`-th row of the :math:`I \times R` factor matrix :math:`A`, :math:`B_i`
is a :math:`J_i \times R` factor matrix such that the cross product matrix :math:`B_{i_1}^T B_{i_1}`
is constant for all :math:`i`, and :math:`C` is a :math:`K \times R` factor matrix.
To compute this decomposition, we reformulate the expression for :math:`B_i` such that
.. math::
B_i = P_i B,
where :math:`P_i` is a :math:`J_i \times R` orthogonal matrix and :math:`B` is a
:math:`R \times R` matrix.
An alternative formulation of the PARAFAC2 decomposition is that the tensor element
:math:`X_{ijk}` is given by
.. math::
X_{ijk} = \sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
with the same constraints hold for :math:`B_i` as above.
Parameters
----------
tensor_slices : ndarray or list of ndarrays
Either a third order tensor or a list of second order tensors that may have different number of rows.
rank : int
Number of components.
n_iter_max : int
Maximum number of iteration
init : {'svd', 'random', KruskalTensor, Parafac2Tensor}
Type of factor matrix initialization. See `initialize_factors`.
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
normalize_factors : if True, aggregate the weights of each factor in a 1D-tensor
of shape (rank, ), which will contain the norms of the factors
tol : float, optional
(Default: 1e-8) Relative reconstruction error tolerance. The
algorithm is considered to have found the global minimum when the
reconstruction error is less than `tol`.
random_state : {None, int, np.random.RandomState}
verbose : int, optional
Level of verbosity
return_errors : bool, optional
Activate return of iteration errors
n_iter_parafac: int, optional
Number of PARAFAC iterations to perform for each PARAFAC2 iteration
Returns
-------
Parafac2Tensor : (weight, factors, projection_matrices)
* weights : 1D array of shape (rank, )
all ones if normalize_factors is False (default),
weights of the (normalized) factors otherwise
* factors : List of factors of the CP decomposition element `i` is of shape
(tensor.shape[i], rank)
* projection_matrices : List of projection matrices used to create evolving
factors.
errors : list
A list of reconstruction errors at each iteration of the algorithms.
References
----------
.. [1] Kiers, H.A.L., ten Berge, J.M.F. and Bro, R. (1999),
PARAFAC2—Part I. A direct fitting algorithm for the PARAFAC2 model.
J. Chemometrics, 13: 275-294.
Notes
-----
This formulation of the PARAFAC2 decomposition is slightly different from the one in [1]_.
The difference lies in that here, the second mode changes over the first mode, whereas in
[1]_, the second mode changes over the third mode. We made this change since that means
that the function accept both lists of matrices and a single nd-array as input without
any reordering of the modes.
"""
epsilon = 10e-12
weights, factors, projections = initialize_decomposition(tensor_slices, rank, random_state=random_state)
rec_errors = []
norm_tensor = tl.sqrt(sum(tl.norm(tensor_slice, 2) for tensor_slice in tensor_slices))
svd_fun = _get_svd(svd)
projected_tensor = tl.zeros([factor.shape[0] for factor in factors])
for iteration in range(n_iter_max):
if verbose:
print("Starting iteration", iteration)
factors[1] *= weights.reshape(1, -1)
weights = T.ones(weights.shape, **tl.context(tensor_slices[0]))
projections = _compute_projections(tensor_slices, factors, svd_fun, out=projections)
projected_tensor = _project_tensor_slices(tensor_slices, projections, out=projected_tensor)
_, factors = parafac(projected_tensor, rank, n_iter_max=n_iter_parafac, init=(weights, factors),
svd=svd, orthogonalise=False, verbose=verbose, return_errors=False,
normalize_factors=False, mask=None, random_state=random_state, tol=1e-100)
if normalize_factors:
for factor in factors:
norms = T.norm(factor, axis=0)
weights *= norms
factor /= norms + epsilon
if tol:
rec_error = _parafac2_reconstruction_error(tensor_slices, (weights, factors, projections))
rec_errors.append(rec_error)
if iteration >= 1:
if verbose:
print('PARAFAC2 reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
else:
if verbose:
print('PARAFAC2 reconstruction error={}'.format(rec_errors[-1]))
parafac2_tensor = Parafac2Tensor((weights, factors, projections))
if return_errors:
return parafac2_tensor, rec_errors
else:
return parafac2_tensor
|
def parafac2(tensor_slices, rank, n_iter_max=100, init='random', svd='numpy_svd', normalize_factors=False,
tol=1e-8, random_state=None, verbose=False, return_errors=False, n_iter_parafac=5):
r"""PARAFAC2 decomposition [1]_ via alternating least squares (ALS)
Computes a rank-`rank` PARAFAC2 decomposition of the tensor defined by `tensor_slices`.
The decomposition is on the form :math:`(A [B_i] C)` such that the i-th frontal slice,
:math:`X_i`, of :math:`X` is given by
.. math::
X_i = B_i diag(a_i) C^T,
where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries are equal to
the :math:`i`-th row of the :math:`I \times R` factor matrix :math:`A`, :math:`B_i`
is a :math:`J_i \times R` factor matrix such that the cross product matrix :math:`B_{i_1}^T B_{i_1}`
is constant for all :math:`i`, and :math:`C` is a :math:`K \times R` factor matrix.
To compute this decomposition, we reformulate the expression for :math:`B_i` such that
.. math::
B_i = P_i B,
where :math:`P_i` is a :math:`J_i \times R` orthogonal matrix and :math:`B` is a
:math:`R \times R` matrix.
An alternative formulation of the PARAFAC2 decomposition is that the tensor element
:math:`X_{ijk}` is given by
.. math::
X_{ijk} = \sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
with the same constraints hold for :math:`B_i` as above.
Parameters
----------
tensor_slices : ndarray or list of ndarrays
Either a third order tensor or a list of second order tensors that may have different number of rows.
rank : int
Number of components.
n_iter_max : int
Maximum number of iteration
init : {'svd', 'random', KruskalTensor, Parafac2Tensor}
Type of factor matrix initialization. See `initialize_factors`.
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
normalize_factors : if True, aggregate the weights of each factor in a 1D-tensor
of shape (rank, ), which will contain the norms of the factors
tol : float, optional
(Default: 1e-8) Relative reconstruction error tolerance. The
algorithm is considered to have found the global minimum when the
reconstruction error is less than `tol`.
random_state : {None, int, np.random.RandomState}
verbose : int, optional
Level of verbosity
return_errors : bool, optional
Activate return of iteration errors
n_iter_parafac: int, optional
Number of PARAFAC iterations to perform for each PARAFAC2 iteration
Returns
-------
Parafac2Tensor : (weight, factors, projection_matrices)
* weights : 1D array of shape (rank, )
all ones if normalize_factors is False (default),
weights of the (normalized) factors otherwise
* factors : List of factors of the CP decomposition element `i` is of shape
(tensor.shape[i], rank)
* projection_matrices : List of projection matrices used to create evolving
factors.
errors : list
A list of reconstruction errors at each iteration of the algorithms.
References
----------
.. [1] Kiers, H.A.L., ten Berge, J.M.F. and Bro, R. (1999),
PARAFAC2—Part I. A direct fitting algorithm for the PARAFAC2 model.
J. Chemometrics, 13: 275-294.
Notes
-----
This formulation of the PARAFAC2 decomposition is slightly different from the one in [1]_.
The difference lies in that here, the second mode changes over the first mode, whereas in
[1]_, the second mode changes over the third mode. We made this change since that means
that the function accept both lists of matrices and a single nd-array as input without
any reordering of the modes.
"""
epsilon = 10e-12
weights, factors, projections = initialize_decomposition(tensor_slices, rank, random_state=random_state)
rec_errors = []
norm_tensor = tl.sqrt(sum(tl.norm(tensor_slice, 2) for tensor_slice in tensor_slices))
svd_fun = _get_svd(svd)
projected_tensor = tl.zeros([factor.shape[0] for factor in factors], **T.context(factors[0]))
for iteration in range(n_iter_max):
if verbose:
print("Starting iteration", iteration)
factors[1] *= weights.reshape(1, -1)
weights = T.ones(weights.shape, **tl.context(tensor_slices[0]))
projections = _compute_projections(tensor_slices, factors, svd_fun, out=projections)
projected_tensor = _project_tensor_slices(tensor_slices, projections, out=projected_tensor)
_, factors = parafac(projected_tensor, rank, n_iter_max=n_iter_parafac, init=(weights, factors),
svd=svd, orthogonalise=False, verbose=verbose, return_errors=False,
normalize_factors=False, mask=None, random_state=random_state, tol=1e-100)
if normalize_factors:
for factor in factors:
norms = T.norm(factor, axis=0)
weights *= norms
factor /= norms + epsilon
if tol:
rec_error = _parafac2_reconstruction_error(tensor_slices, (weights, factors, projections))
rec_errors.append(rec_error)
if iteration >= 1:
if verbose:
print('PARAFAC2 reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
else:
if verbose:
print('PARAFAC2 reconstruction error={}'.format(rec_errors[-1]))
parafac2_tensor = Parafac2Tensor((weights, factors, projections))
if return_errors:
return parafac2_tensor, rec_errors
else:
return parafac2_tensor
|
27,705 |
def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]:
"""Return the explanation for the diff between text.
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
"""
from difflib import ndiff
from wcwidth import wcswidth
explanation = [] # type: List[str]
if verbose < 1:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [
"Skipping %s identical leading characters in diff, use -v to show" % i
]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [
"Skipping {} identical trailing "
"characters in diff, use -v to show".format(i)
]
left = left[:-i]
right = right[:-i]
keepends = True
if left.isspace() or right.isspace():
left = repr(str(left))
right = repr(str(right))
explanation += ["Strings contain only whitespace, escaping them using repr()"]
left_lines = left.splitlines(keepends)
right_lines = right.splitlines(keepends)
if any(wcswidth(x) == -1 for x in left_lines + right_lines):
left_lines = [repr(x) for x in left_lines]
right_lines = [repr(x) for x in right_lines]
explanation += [
"Strings contain non-printable/escape characters, escaping them using repr()"
]
explanation += [line.strip("\n") for line in ndiff(left_lines, right_lines)]
return explanation
|
def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]:
"""Return the explanation for the diff between text.
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
"""
from difflib import ndiff
from wcwidth import wcswidth
explanation = [] # type: List[str]
if verbose < 1:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [
"Skipping %s identical leading characters in diff, use -v to show" % i
]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [
"Skipping {} identical trailing "
"characters in diff, use -v to show".format(i)
]
left = left[:-i]
right = right[:-i]
keepends = True
if left.isspace() or right.isspace():
left = repr(str(left))
right = repr(str(right))
explanation += ["Strings contain only whitespace, escaping them using repr()"]
left_lines = left.splitlines(keepends)
right_lines = right.splitlines(keepends)
if any(wcswidth(x) == -1 for x in left_lines + right_lines):
left_lines = [repr(x) for x in left_lines]
right_lines = [repr(x) for x in right_lines]
explanation += [
"Strings contain non-printable/escape characters. Escaping them using repr()."
]
explanation += [line.strip("\n") for line in ndiff(left_lines, right_lines)]
return explanation
|
30,008 |
def build_categorical(param):
if param.weights is not None:
raise ValueError('The pcs format does not support categorical hyperparameters with '
'assigend weights (for hyperparameter %s)' % param.name)
cat_template = "%s {%s} [%s]"
return cat_template % (param.name,
", ".join([str(value) for value in param.choices]),
str(param.default_value))
|
def build_categorical(param):
if param.weights is not None:
raise ValueError('The pcs format does not support categorical hyperparameters with '
'assigned weights (for hyperparameter %s)' % param.name)
cat_template = "%s {%s} [%s]"
return cat_template % (param.name,
", ".join([str(value) for value in param.choices]),
str(param.default_value))
|
3,783 |
def vector_norm(x: Array, /, *, axis: Tuple[int, int] = (-2, -1), keepdims: bool = False, ord: Optional[Union[int, float, Literal[np.inf, -np.inf, 'fro', 'nuc']]] = 'fro') -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.norm <numpy.linalg.norm>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.norm.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in norm')
return Array._new(np.linalg.norm(x._array, axis=axis, keepdims=keepdims, ord=ord))
|
def vector_norm(x: Array, /, *, axis: Tuple[int, int] = (-2, -1), keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.norm <numpy.linalg.norm>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.norm.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in norm')
return Array._new(np.linalg.norm(x._array, axis=axis, keepdims=keepdims, ord=ord))
|
45,663 |
def layout():
return html.Div(id='oncoprint-body', children=[
dash_bio.OncoPrint(
id='oncoprint-chart',
height=550,
data=[]
),
html.Div(id='oncoprint-control-tabs', children=[
dcc.Tabs(
id='oncoprint-tabs',
children=[
dcc.Tab(
label='About',
value='what-is',
children=html.Div(className='oncoprint-tab', children=[
html.H4(
"What is OncoPrint?"
),
html.P(
"""
The OncoPrint component is used to view multiple genetic
alteration events through an interactive and zoomable
heatmap. It is a React/Dash port of the popular
oncoPrint() function from the BioConductor R
package. Under the hood, the rendering is done using
Plotly.js built upon D3. Plotly's interactivity allows
the user to bind clicks and hovers to genetic events,
allowing the user to create complex bioinformatic apps
or workflows that rely on crossfiltering.
"""
),
html.P(
"""
Read more about the component here:
https://github.com/plotly/react-oncoprint
"""
)
])
),
dcc.Tab(
label='Data',
value='data',
children=html.Div(className='oncoprint-tab', children=[
html.Div([
html.Div(
className='oncoprint-option-name',
children='Select dataset'
),
dcc.Dropdown(
id='oncoprint-dropdown',
className='oncoprint-select',
options=[
{
'label': '{}.json'.format(ds),
'value': ds
}
for ds in DATASETS
],
value='cBioPortalData',
),
]),
html.Hr(
className='oncoprint-separator'
),
html.Div([
html.H4('Hover, click, or event data'),
html.Div(
id='oncoprint-events'
),
])
])
),
dcc.Tab(
label='View',
value='view',
children=html.Div(className='oncoprint-tab', children=[
html.H4('Layout'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Overview'
),
daq.ToggleSwitch(
id='oncoprint-show-overview',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Legend'
),
daq.ToggleSwitch(
id='oncoprint-show-legend',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Padding'
),
dcc.Slider(
className='oncoprint-slider',
id='oncoprint-padding-input',
value=0.05,
min=0,
max=0.1,
step=0.01,
marks={
'0': '0',
'0.02': '0.02',
'0.04': '0.04',
'0.06': '0.06',
'0.08': '0.08',
'0.1': '0.1',
},
),
html.Br(),
html.Div(
'Adjust the padding (as percentage) '
'between two tracks.'
),
],
),
html.Hr(className='oncoprint-separator'),
html.Div([
html.H4('Colors'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Track color'
),
html.P(
'Change the default background '
'color for the tracks.'
),
daq.ColorPicker(
id='oncoprint-tracks-color',
value={'hex': '#AAAAAA'}
),
],
),
html.Hr(className='oncoprint-separator'),
html.H6("Mutation colors"),
html.P(
"Select a mutation type and a color "
"to customize its look."
),
html.Div(children=[
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation type'
),
dcc.Dropdown(
id='oncoprint-colorscale-mutation-dropdown',
options=[
{'label': mut_type, 'value': mut_type}
for mut_type in COLORSCALE_MUTATIONS_OPT
],
value=COLORSCALE_MUTATIONS_OPT[0],
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation color'
),
daq.ColorPicker(
id='oncoprint-mutation-color',
value={'hex': COLORSCALE_COLORS_OPT[0]}
)
],
),
])
])
])
)
]
)
]),
dcc.Store(id='oncoprint-store'),
]),
|
def layout():
return html.Div(id='oncoprint-body', children=[
dash_bio.OncoPrint(
id='oncoprint-chart',
height=550,
data=[]
),
html.Div(id='oncoprint-control-tabs', children=[
dcc.Tabs(
id='oncoprint-tabs',
children=[
dcc.Tab(
label='About',
value='what-is',
children=html.Div(className='oncoprint-tab', children=[
html.H4(
"What is OncoPrint?"
),
html.P(
"""
The OncoPrint component is used to view multiple genetic
alteration events through an interactive and zoomable
heatmap. It is a React/Dash port of the popular
oncoPrint() function from the BioConductor R
package. Under the hood, the rendering is done using
Plotly.js built upon D3. Plotly's interactivity allows
the user to bind clicks and hovers to genetic events,
allowing the user to create complex bioinformatic apps
or workflows that rely on crossfiltering.
"""
),
html.P(
"""
Read more about the component here:
https://github.com/plotly/react-oncoprint
"""
)
])
),
dcc.Tab(
label='Data',
value='data',
children=html.Div(className='oncoprint-tab', children=[
html.Div([
html.Div(
className='oncoprint-option-name',
children='Select dataset'
),
dcc.Dropdown(
id='oncoprint-dropdown',
className='oncoprint-select',
options=[
{
'label': '{}.json'.format(ds),
'value': ds
}
for ds in DATASETS
],
value='cBioPortalData',
),
]),
html.Hr(
className='oncoprint-separator'
),
html.Div([
html.H4('Hover, click, or event data'),
html.Div(
id='oncoprint-events'
),
])
])
),
dcc.Tab(
label='View',
value='view',
children=html.Div(className='oncoprint-tab', children=[
html.H4('Layout'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Overview'
),
daq.ToggleSwitch(
id='oncoprint-show-overview',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Legend'
),
daq.ToggleSwitch(
id='oncoprint-show-legend',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Padding'
),
dcc.Slider(
className='oncoprint-slider',
id='oncoprint-padding-input',
value=0.05,
min=0,
max=0.1,
step=0.01,
marks={
'0': '0',
'0.02': '0.02',
'0.04': '0.04',
'0.06': '0.06',
'0.08': '0.08',
'0.1': '0.1',
},
),
html.Br(),
html.Div(
'Adjust padding (as percentage) '
'between two tracks.'
),
],
),
html.Hr(className='oncoprint-separator'),
html.Div([
html.H4('Colors'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Track color'
),
html.P(
'Change the default background '
'color for the tracks.'
),
daq.ColorPicker(
id='oncoprint-tracks-color',
value={'hex': '#AAAAAA'}
),
],
),
html.Hr(className='oncoprint-separator'),
html.H6("Mutation colors"),
html.P(
"Select a mutation type and a color "
"to customize its look."
),
html.Div(children=[
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation type'
),
dcc.Dropdown(
id='oncoprint-colorscale-mutation-dropdown',
options=[
{'label': mut_type, 'value': mut_type}
for mut_type in COLORSCALE_MUTATIONS_OPT
],
value=COLORSCALE_MUTATIONS_OPT[0],
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation color'
),
daq.ColorPicker(
id='oncoprint-mutation-color',
value={'hex': COLORSCALE_COLORS_OPT[0]}
)
],
),
])
])
])
)
]
)
]),
dcc.Store(id='oncoprint-store'),
]),
|
32,045 |
def delete_list(args: dict, sg):
listID = args.get('list_id')
params = {}
deleteContacts = args.get('delete_contacts')
if deleteContacts:
params['delete_contacts'] = False if deleteContacts == 'False' else True
response = sg.client.marketing.lists._(listID).delete(query_params=params)
if response.status_code == 200:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.DeleteListJobId': body['job_id']}
md = tableToMarkdown('The delete has been accepted and is processing. \
You can check the status using the Job ID: ', body['job_id'])
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
elif response.status_code == 204:
return 'The delete has been successful '
else:
return 'List delete has been failed: ' + str(response.body)
|
def delete_list(args: dict, sg):
listID = args.get('list_id')
params = {}
deleteContacts = args.get('delete_contacts')
if deleteContacts:
params['delete_contacts'] = False if deleteContacts == 'False' else True
response = sg.client.marketing.lists._(listID).delete(query_params=params)
if response.status_code == 200:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.DeleteListJobId': body['job_id']}
md = tableToMarkdown('The delete has been accepted and is processing. \
You can check the status using the Job ID: ', body['job_id'])
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
elif response.status_code == 204:
return 'Deletion completed successfully'
else:
return 'List delete has been failed: ' + str(response.body)
|
32,575 |
def get_html_from_response(response):
text = response.text
open_tag = text.find('<html')
close_tag = text.find('</html>')
return text[open_tag: close_tag + len('</html>')]
|
def get_html_from_response(response):
text = response.text
open_tag = text.lower().find('<html')
close_tag = text.find('</html>')
return text[open_tag: close_tag + len('</html>')]
|
24,865 |
def is_inside_lambda(node: nodes.NodeNG) -> bool:
"""Return whether the given node is inside a lambda"""
warnings.warn(
"utils.is_inside_lambda will be removed in favour of calling "
"utils.get_node_first_ancestor_of_type(x, nodes.Lambda)",
DeprecationWarning,
)
return any(isinstance(parent, nodes.Lambda) for parent in node.node_ancestors())
|
def is_inside_lambda(node: nodes.NodeNG) -> bool:
"""Return whether the given node is inside a lambda"""
warnings.warn(
"utils.is_inside_lambda will be removed in favour of calling "
"utils.get_node_first_ancestor_of_type(x, nodes.Lambda) in pylint 3.0",
DeprecationWarning,
)
return any(isinstance(parent, nodes.Lambda) for parent in node.node_ancestors())
|
31,882 |
def test_malwarebazaar_comment_add(requests_mock):
"""
Given:
- Hash of file and comment to add to MalwareBazaar db about this file
When:
- Running a comment add command
Then:
- Display a success message or raise exception when it fails
"""
mock_response = {
'query_status': 'success'
}
readable_output = 'Comment added to 094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d malware ' \
'sample successfully'
outputs = {
'sha256_hash': '094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d',
'comment': 'test'
}
requests_mock.post(BASE_URL, json=mock_response)
client = create_client()
args = {"sha256_hash": "094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d",
"comment": "test"}
response = MalwareBazaar.malwarebazaar_comment_add_command(client, args)
assert response.readable_output == readable_output
assert response.outputs_key_field == "sha256_hash"
assert response.outputs == outputs
assert response.outputs_prefix == 'MalwareBazaar.MalwarebazaarCommentAdd'
|
def test_malwarebazaar_comment_add(requests_mock):
"""
Given:
- Hash of file and comment to add to MalwareBazaar db about this file
When:
- Running a comment add command
Then:
- Make sure a success message is returned.
"""
mock_response = {
'query_status': 'success'
}
readable_output = 'Comment added to 094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d malware ' \
'sample successfully'
outputs = {
'sha256_hash': '094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d',
'comment': 'test'
}
requests_mock.post(BASE_URL, json=mock_response)
client = create_client()
args = {"sha256_hash": "094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d",
"comment": "test"}
response = MalwareBazaar.malwarebazaar_comment_add_command(client, args)
assert response.readable_output == readable_output
assert response.outputs_key_field == "sha256_hash"
assert response.outputs == outputs
assert response.outputs_prefix == 'MalwareBazaar.MalwarebazaarCommentAdd'
|
32,323 |
def get_push_status(
topology: Topology,
match_job_id: List[str] = None,
) -> List[PushStatus]:
"""
Returns the status of the push (commit-all) jobs from Panorama.
:param topology: `Topology` instance !no-auto-argument
:param match_job_id: job ID or list of Job IDs to return.
"""
return PanoramaCommand.get_push_status(topology, match_job_id)
|
def get_push_status(
topology: Topology,
match_job_id: Optional[List[str]] = None,
) -> List[PushStatus]:
"""
Returns the status of the push (commit-all) jobs from Panorama.
:param topology: `Topology` instance !no-auto-argument
:param match_job_id: job ID or list of Job IDs to return.
"""
return PanoramaCommand.get_push_status(topology, match_job_id)
|
34,134 |
def evaluate_entities(
targets, predictions, tokens, extractors, report_folder, ner_filename
): # pragma: no cover
"""Creates summary statistics for each entity extractor.
Logs precision, recall, and F1 per entity type for each extractor."""
aligned_predictions = align_all_entity_predictions(
targets, predictions, tokens, extractors
)
merged_targets = merge_labels(aligned_predictions)
merged_targets = substitute_labels(merged_targets, "O", "no_entity")
result = {}
for extractor in extractors:
merged_predictions = merge_labels(aligned_predictions, extractor)
merged_predictions = substitute_labels(merged_predictions, "O", "no_entity")
logger.info("Evaluation for entity extractor: {} ".format(extractor))
if report_folder:
report, precision, f1, accuracy = get_evaluation_metrics(
merged_targets, merged_predictions, output_dict=True
)
report_filename = extractor + "_report.json"
extractor_report = os.path.join(report_folder, report_filename)
save_json(report, extractor_report)
logger.info(
"Classification report for '{}' saved to '{}'."
"".format(extractor, extractor_report)
)
else:
report, precision, f1, accuracy = get_evaluation_metrics(
merged_targets, merged_predictions
)
log_evaluation_table(report, precision, f1, accuracy)
if "ner_crf" == extractor:
# save classified samples to file for debugging
collect_ner_results(targets, predictions, ner_filename)
result[extractor] = {
"report": report,
"precision": precision,
"f1_score": f1,
"accuracy": accuracy,
}
return result
|
def evaluate_entities(
targets, predictions, tokens, extractors, report_folder, ner_filename
): # pragma: no cover
"""Creates summary statistics for each entity extractor.
Logs precision, recall, and F1 per entity type for each extractor."""
aligned_predictions = align_all_entity_predictions(
targets, predictions, tokens, extractors
)
merged_targets = merge_labels(aligned_predictions)
merged_targets = substitute_labels(merged_targets, "O", "no_entity")
result = {}
for extractor in extractors:
merged_predictions = merge_labels(aligned_predictions, extractor)
merged_predictions = substitute_labels(merged_predictions, "O", "no_entity")
logger.info("Evaluation for entity extractor: {} ".format(extractor))
if report_folder:
report, precision, f1, accuracy = get_evaluation_metrics(
merged_targets, merged_predictions, output_dict=True
)
report_filename = extractor + "_report.json"
extractor_report = os.path.join(report_folder, report_filename)
save_json(report, extractor_report)
logger.info(
"Classification report for '{}' saved to '{}'."
"".format(extractor, extractor_report)
)
else:
report, precision, f1, accuracy = get_evaluation_metrics(
merged_targets, merged_predictions
)
log_evaluation_table(report, precision, f1, accuracy)
if extractor == "CRFEntityExtractor":
# save classified samples to file for debugging
collect_ner_results(targets, predictions, ner_filename)
result[extractor] = {
"report": report,
"precision": precision,
"f1_score": f1,
"accuracy": accuracy,
}
return result
|
14,465 |
def rules_as_rich(rules: RulesCollection) -> str:
"""Print documentation for a list of rules, returns empty string."""
con = console.Console()
for d in rules:
table = Table(show_header=True, header_style="bold magenta", box=box.MINIMAL)
table.add_column(d.id, style="dim", width=16)
table.add_column(Markdown(d.shortdesc))
table.add_row("description", Markdown(d.description))
table.add_row("version_added", d.version_added)
table.add_row("tags", ", ".join(d.tags))
table.add_row("severity", d.severity)
con.print(table)
return ""
|
def rules_as_rich(rules: RulesCollection) -> str:
"""Print documentation for a list of rules, returns empty string."""
con = console.Console()
for d in rules:
table = Table(show_header=True, header_style="bold yellow", box=box.MINIMAL)
table.add_column(d.id, style="dim", width=16)
table.add_column(Markdown(d.shortdesc))
table.add_row("description", Markdown(d.description))
table.add_row("version_added", d.version_added)
table.add_row("tags", ", ".join(d.tags))
table.add_row("severity", d.severity)
con.print(table)
return ""
|
36,302 |
def search_form(content, cgiuser, cgipassword):
"""Search for a HTML form in the given HTML content that has the given
CGI fields. If no form is found return None.
"""
soup = htmlsoup.make_soup(content)
cginames = {x for x in (cgiuser, cgipassword) if x is not None}
for form_element in soup.find_all("form", action=True):
form = Form(form_element["action"])
for input_element in form_element.find_all("input",
attrs={"name": True}):
form.add_value(
input_element["name"], input_element.attrs.get("value"))
if cginames <= set(form.data):
log.debug(LOG_CHECK, "Found form %s", form)
return form
# not found
log.warn(LOG_CHECK, "Form with fields %s not found", ",".join(cginames))
return None
|
def search_form(content, cgiuser, cgipassword):
"""Search for a HTML form in the given HTML content that has the given
CGI fields. If no form is found return None.
"""
soup = htmlsoup.make_soup(content)
cginames = {cgiuser, cgipassword} - {None}
for form_element in soup.find_all("form", action=True):
form = Form(form_element["action"])
for input_element in form_element.find_all("input",
attrs={"name": True}):
form.add_value(
input_element["name"], input_element.attrs.get("value"))
if cginames <= set(form.data):
log.debug(LOG_CHECK, "Found form %s", form)
return form
# not found
log.warn(LOG_CHECK, "Form with fields %s not found", ",".join(cginames))
return None
|
25,596 |
def update_monitoring_service_from_balance_proof(
raiden: "RaidenService",
chain_state: ChainState,
new_balance_proof: BalanceProofSignedState,
non_closing_participant: Address,
) -> None:
if raiden.config.services.monitoring_enabled is False:
return
msg = "Monitoring is enabled but the default monitoring service address is None."
assert raiden.default_msc_address is not None, msg
channel_state = views.get_channelstate_by_canonical_identifier(
chain_state=chain_state, canonical_identifier=new_balance_proof.canonical_identifier
)
msg = (
f"Failed to update monitoring service due to inability to find "
f"channel: {new_balance_proof.channel_identifier} "
f"token_network_address: {to_checksum_address(new_balance_proof.token_network_address)}."
)
assert channel_state, msg
msg = "Monitoring is enabled but the `UserDeposit` contract is None."
assert raiden.default_user_deposit is not None, msg
rei_balance = raiden.default_user_deposit.effective_balance(raiden.address, BLOCK_ID_LATEST)
if rei_balance < MONITORING_REWARD:
rdn_balance = to_rdn(rei_balance)
rdn_reward = to_rdn(MONITORING_REWARD)
log.warning(
f"Skipping update to Monitoring service. "
f"Your deposit balance {rdn_balance} is less than "
f"the required monitoring service reward of {rdn_reward}"
)
return
# In production there should be no MonitoringRequest if
# channel balance is below a certain threshold. This is
# a naive approach that needs to be worked on in the future
if raiden.config.environment_type == Environment.PRODUCTION:
message = (
"Skipping update to Monitoring service. "
"Your channel balance {channel_balance} is less than "
"the required minimum balance of {min_balance} "
"that you have set before sending the MonitorRequest,"
" token address {token_address}"
)
dai_token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=raiden.default_registry.address,
token_address=DAI_TOKEN_ADDRESS,
)
weth_token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=raiden.default_registry.address,
token_address=WETH_TOKEN_ADDRESS,
)
channel_balance = get_balance(
sender=channel_state.our_state, receiver=channel_state.partner_state,
)
if channel_state.canonical_identifier.token_network_address == dai_token_network_address:
if channel_balance < MIN_MONITORING_AMOUNT_DAI:
log.warning(
message.format(
channel_balance=channel_balance,
min_balance=MIN_MONITORING_AMOUNT_DAI,
token_address=to_checksum_address(DAI_TOKEN_ADDRESS)
)
)
return
if channel_state.canonical_identifier.token_network_address == weth_token_network_address:
if channel_balance < MIN_MONITORING_AMOUNT_WETH:
log.warning(
message.format(
channel_balance=channel_balance,
min_balance=MIN_MONITORING_AMOUNT_WETH,
token_address=to_checksum_address(WETH_TOKEN_ADDRESS)
)
)
return
log.info(
"Received new balance proof, creating message for Monitoring Service.",
node=to_checksum_address(raiden.address),
balance_proof=new_balance_proof,
)
monitoring_message = RequestMonitoring.from_balance_proof_signed_state(
balance_proof=new_balance_proof,
non_closing_participant=non_closing_participant,
reward_amount=MONITORING_REWARD,
monitoring_service_contract_address=raiden.default_msc_address,
)
monitoring_message.sign(raiden.signer)
raiden.transport.broadcast(constants.MONITORING_BROADCASTING_ROOM, monitoring_message)
|
def update_monitoring_service_from_balance_proof(
raiden: "RaidenService",
chain_state: ChainState,
new_balance_proof: BalanceProofSignedState,
non_closing_participant: Address,
) -> None:
if raiden.config.services.monitoring_enabled is False:
return
msg = "Monitoring is enabled but the default monitoring service address is None."
assert raiden.default_msc_address is not None, msg
channel_state = views.get_channelstate_by_canonical_identifier(
chain_state=chain_state, canonical_identifier=new_balance_proof.canonical_identifier
)
msg = (
f"Failed to update monitoring service due to inability to find "
f"channel: {new_balance_proof.channel_identifier} "
f"token_network_address: {to_checksum_address(new_balance_proof.token_network_address)}."
)
assert channel_state, msg
msg = "Monitoring is enabled but the `UserDeposit` contract is None."
assert raiden.default_user_deposit is not None, msg
rei_balance = raiden.default_user_deposit.effective_balance(raiden.address, BLOCK_ID_LATEST)
if rei_balance < MONITORING_REWARD:
rdn_balance = to_rdn(rei_balance)
rdn_reward = to_rdn(MONITORING_REWARD)
log.warning(
f"Skipping update to Monitoring service. "
f"Your deposit balance {rdn_balance} is less than "
f"the required monitoring service reward of {rdn_reward}"
)
return
# In production there should be no MonitoringRequest if
# channel balance is below a certain threshold. This is
# a naive approach that needs to be worked on in the future
if raiden.config.environment_type == Environment.PRODUCTION:
message = (
"Skipping update to Monitoring service. "
"Your channel balance {channel_balance} is less than "
"the required minimum balance of {min_balance} "
"that you have set before sending the MonitorRequest,"
" token address {token_address}"
)
dai_token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=raiden.default_registry.address,
token_address=DAI_TOKEN_ADDRESS,
)
weth_token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=raiden.default_registry.address,
token_address=WETH_TOKEN_ADDRESS,
)
channel_balance = get_balance(
sender=channel_state.our_state, receiver=channel_state.partner_state,
)
if channel_state.canonical_identifier.token_network_address == dai_token_network_address:
if channel_balance < MIN_MONITORING_AMOUNT_DAI:
log.warning(
message.format(
channel_balance=channel_balance,
min_balance=MIN_MONITORING_AMOUNT_DAI,
token_address=to_checksum_address(DAI_TOKEN_ADDRESS),
)
)
return
if channel_state.canonical_identifier.token_network_address == weth_token_network_address:
if channel_balance < MIN_MONITORING_AMOUNT_WETH:
log.warning(
message.format(
channel_balance=channel_balance,
min_balance=MIN_MONITORING_AMOUNT_WETH,
token_address=to_checksum_address(WETH_TOKEN_ADDRESS)
)
)
return
log.info(
"Received new balance proof, creating message for Monitoring Service.",
node=to_checksum_address(raiden.address),
balance_proof=new_balance_proof,
)
monitoring_message = RequestMonitoring.from_balance_proof_signed_state(
balance_proof=new_balance_proof,
non_closing_participant=non_closing_participant,
reward_amount=MONITORING_REWARD,
monitoring_service_contract_address=raiden.default_msc_address,
)
monitoring_message.sign(raiden.signer)
raiden.transport.broadcast(constants.MONITORING_BROADCASTING_ROOM, monitoring_message)
|
25,350 |
def italics(text: str, escape_formatting: bool = True) -> str:
"""Get the given text in italics.
Note: This escapes text prior to italicising
Parameters
----------
text : str
The text to be marked up.
escape_formatting : `bool`, optional
Set to :code:`False` to not escape markdown formatting in the text.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=escape_formatting)
return "*{}*".format(text)
|
def italics(text: str, escape_formatting: bool = True) -> str:
"""Get the given text in italics.
Note: By default, this function will escape ``text`` prior to italicising.
Parameters
----------
text : str
The text to be marked up.
escape_formatting : `bool`, optional
Set to :code:`False` to not escape markdown formatting in the text.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=escape_formatting)
return "*{}*".format(text)
|
47,868 |
def get_non_provider_id(name):
"""
Not all resources have an ID and some services allow the use of "." in names, which break's Scout2's
recursion scheme if name is used as an ID. Use SHA1(name) instead.
:param name: Name of the resource to
:return: SHA1(name)
"""
m = sha1()
m.update(name.encode('utf-8'))
return m.hexdigest()
|
def get_non_provider_id(name):
"""
Not all resources have an ID and some services allow the use of "." in names, which breaks Scout's
recursion scheme if name is used as an ID. Use SHA1(name) instead.
:param name: Name of the resource to
:return: SHA1(name)
"""
m = sha1()
m.update(name.encode('utf-8'))
return m.hexdigest()
|
55,432 |
def merge_run_task_kwargs(opts1: dict, opts2: dict) -> dict:
"""Merge two `run_task_kwargs` dicts, given precedence to `opts2`.
Values are merged with the following heuristics:
- Anything outside of `overrides.containerOverrides` is merged directly,
with precedence given to `opts2`
- Dicts in the `overrides.containerOverrides` list are matched on their
`"name"` fields, then merged directly (with precedence given to `opts2`).
Args:
- opts1 (dict): A dict of kwargs for `run_task`
- opts2 (dict): A second dict of kwargs for `run_task`.
Returns:
- dict: A merged dict of kwargs
"""
out = deepcopy(opts1)
# Everything except 'overrides' merge directly
for k, v in opts2.items():
if k != "overrides":
out[k] = v
# Everything in `overrides` except `containerOverrides` merge directly
overrides = opts2.get("overrides", {})
if overrides:
out_overrides = out.setdefault("overrides", {})
for k, v in overrides.items():
if k != "containerOverrides":
out_overrides[k] = v
# Entries in `containerOverrides` are paired by name, and then merged
container_overrides = overrides.get("containerOverrides")
if container_overrides:
out_container_overrides = out_overrides.setdefault("containerOverrides", [])
for entry in container_overrides:
for out_entry in out_container_overrides:
if out_entry.get("name") == entry.get("name"):
out_entry.update(entry)
break
else:
out_container_overrides.append(entry)
return out
|
def merge_run_task_kwargs(opts1: dict, opts2: dict) -> dict:
"""Merge two `run_task_kwargs` dicts, given precedence to `opts2`.
Values are merged with the following heuristics:
- Anything outside of `overrides.containerOverrides` is merged directly,
with precedence given to `opts2`
- Dicts in the `overrides.containerOverrides` list are matched on their
`"name"` fields, then merged directly (with precedence given to `opts2`).
Args:
- opts1 (dict): A dict of kwargs for `run_task`
- opts2 (dict): A second dict of kwargs for `run_task`.
Returns:
- dict: A merged dict of kwargs
"""
# Everything except 'overrides' merge directly
overrides = opts2.pop("overrides", {})
out = {**opts1, **opts2}
out = deepcopy(out)
# Everything in `overrides` except `containerOverrides` merge directly
if overrides:
out_overrides = out.setdefault("overrides", {})
for k, v in overrides.items():
if k != "containerOverrides":
out_overrides[k] = v
# Entries in `containerOverrides` are paired by name, and then merged
container_overrides = overrides.get("containerOverrides")
if container_overrides:
out_container_overrides = out_overrides.setdefault("containerOverrides", [])
for entry in container_overrides:
for out_entry in out_container_overrides:
if out_entry.get("name") == entry.get("name"):
out_entry.update(entry)
break
else:
out_container_overrides.append(entry)
return out
|
49,642 |
def validate_html_logo(app: Sphinx, config: Config) -> None:
"""Check html_logo setting."""
if config.html_logo and not path.isfile(path.join(app.confdir, config.html_logo)) \
and not urlparse(config.html_logo).scheme:
logger.warning(__('logo file %r does not exist'), config.html_logo)
config.html_logo = None # type: ignore
|
def validate_html_logo(app: Sphinx, config: Config) -> None:
"""Check html_logo setting."""
if (config.html_logo and not path.isfile(path.join(app.confdir, config.html_logo)) and
not urlparse(config.html_logo).scheme):
logger.warning(__('logo file %r does not exist'), config.html_logo)
config.html_logo = None # type: ignore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.