id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
6,219 |
def getCPUTime(cpuNormalizationFactor):
""" Trying to get CPUTime left for execution (in seconds).
It will first look to get the work left looking for batch system information useing the TimeLeft utility.
If it succeeds, it will convert it in real second, and return it.
If it fails, it tries to get it from the static info found in CS.
If it fails, it returns the default, which is a large 9999999, that we may consider as "Infinite".
This is a generic method, independent from the middleware of the resource if TimeLeft doesn't return a value
args:
cpuNormalizationFactor (float): the CPU power of the current Worker Node.
If not passed in, it's get from the local configuration
returns:
cpuTimeLeft (int): the CPU time left, in seconds
"""
cpuTimeLeft = 0.
cpuWorkLeft = gConfig.getValue('/LocalSite/CPUTimeLeft', 0)
if not cpuWorkLeft:
# Try and get the information from the CPU left utility
result = TimeLeft().getTimeLeft()
if result['OK']:
cpuWorkLeft = result['Value']
if cpuWorkLeft > 0:
# This is in HS06sseconds
# We need to convert in real seconds
if not cpuNormalizationFactor: # if cpuNormalizationFactor passed in is 0, try get it from the local cfg
cpuNormalizationFactor = gConfig.getValue('/LocalSite/CPUNormalizationFactor', 0.0)
if cpuNormalizationFactor:
cpuTimeLeft = cpuWorkLeft / cpuNormalizationFactor
if not cpuTimeLeft:
# now we know that we have to find the CPUTimeLeft by looking in the CS
# this is not granted to be correct as the CS units may not be real seconds
gridCE = gConfig.getValue('/LocalSite/GridCE')
ceQueue = gConfig.getValue('/LocalSite/CEQueue')
if not ceQueue:
# we have to look for a ceQueue in the CS
# A bit hacky. We should better profit from something generic
gLogger.warn("No CEQueue in local configuration, looking to find one in CS")
siteName = siteName()
queueSection = '/Resources/Sites/%s/%s/CEs/%s/Queues' % (siteName.split('.')[0], siteName, gridCE)
res = gConfig.getSections(queueSection)
if not res['OK']:
raise RuntimeError(res['Message'])
queues = res['Value']
cpuTimes = [gConfig.getValue(queueSection + '/' + queue + '/maxCPUTime', 9999999.) for queue in queues]
# These are (real, wall clock) minutes - damn BDII!
cpuTimeLeft = min(cpuTimes) * 60
else:
queueInfo = getQueueInfo('%s/%s' % (gridCE, ceQueue))
cpuTimeLeft = 9999999.
if not queueInfo['OK'] or not queueInfo['Value']:
gLogger.warn("Can't find a CE/queue, defaulting CPUTime to %d" % cpuTimeLeft)
else:
queueCSSection = queueInfo['Value']['QueueCSSection']
# These are (real, wall clock) minutes - damn BDII!
cpuTimeInMinutes = gConfig.getValue('%s/maxCPUTime' % queueCSSection, 0.)
if cpuTimeInMinutes:
cpuTimeLeft = cpuTimeInMinutes * 60.
gLogger.info("CPUTime for %s: %f" % (queueCSSection, cpuTimeLeft))
else:
gLogger.warn("Can't find maxCPUTime for %s, defaulting CPUTime to %f" % (queueCSSection, cpuTimeLeft))
return int(cpuTimeLeft)
|
def getCPUTime(cpuNormalizationFactor):
""" Trying to get CPUTime left for execution (in seconds).
It will first look to get the work left looking for batch system information useing the TimeLeft utility.
If it succeeds, it will convert it in real second, and return it.
If it fails, it tries to get it from the static info found in CS.
If it fails, it returns the default, which is a large 9999999, that we may consider as "Infinite".
This is a generic method, independent from the middleware of the resource if TimeLeft doesn't return a value
args:
cpuNormalizationFactor (float): the CPU power of the current Worker Node.
If not passed in, it's get from the local configuration
returns:
cpuTimeLeft (int): the CPU time left, in seconds
"""
cpuTimeLeft = 0.
cpuWorkLeft = gConfig.getValue('/LocalSite/CPUTimeLeft', 0)
if not cpuWorkLeft:
# Try and get the information from the CPU left utility
result = TimeLeft().getTimeLeft()
if result['OK']:
cpuWorkLeft = result['Value']
if cpuWorkLeft > 0:
# This is in HS06sseconds
# We need to convert in real seconds
if not cpuNormalizationFactor: # if cpuNormalizationFactor passed in is 0, try get it from the local cfg
cpuNormalizationFactor = gConfig.getValue('/LocalSite/CPUNormalizationFactor', 0.0)
if cpuNormalizationFactor:
cpuTimeLeft = cpuWorkLeft / cpuNormalizationFactor
if not cpuTimeLeft:
# now we know that we have to find the CPUTimeLeft by looking in the CS
# this is not granted to be correct as the CS units may not be real seconds
gridCE = gConfig.getValue('/LocalSite/GridCE')
ceQueue = gConfig.getValue('/LocalSite/CEQueue')
if not ceQueue:
# we have to look for a ceQueue in the CS
# A bit hacky. We should better profit from something generic
gLogger.warn("No CEQueue in local configuration, looking to find one in CS")
siteName = DIRAC.siteName()
queueSection = '/Resources/Sites/%s/%s/CEs/%s/Queues' % (siteName.split('.')[0], siteName, gridCE)
res = gConfig.getSections(queueSection)
if not res['OK']:
raise RuntimeError(res['Message'])
queues = res['Value']
cpuTimes = [gConfig.getValue(queueSection + '/' + queue + '/maxCPUTime', 9999999.) for queue in queues]
# These are (real, wall clock) minutes - damn BDII!
cpuTimeLeft = min(cpuTimes) * 60
else:
queueInfo = getQueueInfo('%s/%s' % (gridCE, ceQueue))
cpuTimeLeft = 9999999.
if not queueInfo['OK'] or not queueInfo['Value']:
gLogger.warn("Can't find a CE/queue, defaulting CPUTime to %d" % cpuTimeLeft)
else:
queueCSSection = queueInfo['Value']['QueueCSSection']
# These are (real, wall clock) minutes - damn BDII!
cpuTimeInMinutes = gConfig.getValue('%s/maxCPUTime' % queueCSSection, 0.)
if cpuTimeInMinutes:
cpuTimeLeft = cpuTimeInMinutes * 60.
gLogger.info("CPUTime for %s: %f" % (queueCSSection, cpuTimeLeft))
else:
gLogger.warn("Can't find maxCPUTime for %s, defaulting CPUTime to %f" % (queueCSSection, cpuTimeLeft))
return int(cpuTimeLeft)
|
53,914 |
def gen_vnet_config(mg_facts, num_vnet, num_routes, num_endpoints):
"""
@summary: Generates and stores the VNET configuration
@param mg_facts: Minigraph facts
@param num_vnet: Number of VNETs
@param num_routes: Number of routes
@param num_endpoints: Number of endpoints
"""
logger.info("Generating VNet configuration")
vc.VNET_CONFIG = yaml.safe_load(Template(open("templates/vnet_config.j2").read())
.render(mg_facts, ipv6_vxlan_test=vc.IPV6_VXLAN_TEST,
num_vnet=num_vnet, num_routes=num_routes, num_endpoints=num_endpoints))
|
def gen_vnet_config(mg_facts, num_vnet, num_routes, num_endpoints):
"""
@summary: Generates and stores the VNET configuration
@param mg_facts: Minigraph facts
@param num_vnet: Number of VNETs
@param num_routes: Number of routes
@param num_endpoints: Number of endpoints
"""
logger.info("Generating VNet configuration")
vc.VNET_CONFIG = yaml.safe_load(Template(open("templates/vnet_config.j2").read())
.render(mg_facts, ipv6_vxlan_test=vc.IPV6_VXLAN_TEST,
num_vnet=num_vnet, num_routes=num_routes, num_endpoints=num_endpoints))
|
8,297 |
def relation(obj, attribute, restricted=True):
"""Get related object.
Only valid if the attribute is the name of a relationChoice field on the object.
"""
if not attribute:
raise RuntimeError(u'Missing parameter "attribute"')
check_for_relationchoice(obj, attribute)
items = get_relations(obj, attribute=attribute, restricted=restricted)
if items:
return items[0]
|
def relation(obj, attribute, restricted=True):
"""Get related object.
Only valid if the attribute is the name of a relationChoice field on the object.
"""
if not attribute:
raise ValueError('Missing parameter "attribute"')
check_for_relationchoice(obj, attribute)
items = get_relations(obj, attribute=attribute, restricted=restricted)
if items:
return items[0]
|
3,870 |
def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):
r"""Compute the eigenvector centrality for the graph G.
Eigenvector centrality computes the centrality for a node based on the
centrality of its neighbors. The eigenvector centrality for node $i$ is
.. math::
Ax = \lambda x
where $A$ is the adjacency matrix of the graph G with eigenvalue $\lambda$.
By virtue of the Perron–Frobenius theorem, there is a unique and positive
solution if $\lambda$ is the largest eigenvalue associated with the
eigenvector of the adjacency matrix $A$ ([2]_).
Parameters
----------
G : graph
A networkx graph
weight : None or string, optional (default=None)
The name of the edge attribute used as weight.
If None, all edge weights are considered equal.
In this measure the weight is considered as the connection strenth.
max_iter : integer, optional (default=100)
Maximum number of iterations in power method.
tol : float, optional (default=1.0e-6)
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Returns
-------
nodes : dictionary
Dictionary of nodes with eigenvector centrality as the value.
Examples
--------
>>> G = nx.path_graph(4)
>>> centrality = nx.eigenvector_centrality_numpy(G)
>>> print([f"{node} {centrality[node]:0.2f}" for node in centrality])
['0 0.37', '1 0.60', '2 0.60', '3 0.37']
See Also
--------
eigenvector_centrality
pagerank
hits
Notes
-----
The measure was introduced by [1]_.
This algorithm uses the SciPy sparse eigenvalue solver (ARPACK) to
find the largest eigenvalue/eigenvector pair.
For directed graphs this is "left" eigenvector centrality which corresponds
to the in-edges in the graph. For out-edges eigenvector centrality
first reverse the graph with ``G.reverse()``.
Raises
------
NetworkXPointlessConcept
If the graph ``G`` is the null graph.
References
----------
.. [1] Phillip Bonacich:
Power and Centrality: A Family of Measures.
American Journal of Sociology 92(5):1170–1182, 1986
http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf
.. [2] Mark E. J. Newman:
Networks: An Introduction.
Oxford University Press, USA, 2010, pp. 169.
"""
import numpy as np
import scipy as sp
import scipy.sparse.linalg # call as sp.sparse.linalg
if len(G) == 0:
raise nx.NetworkXPointlessConcept(
"cannot compute centrality for the null graph"
)
M = nx.to_scipy_sparse_matrix(G, nodelist=list(G), weight=weight, dtype=float)
eigenvalue, eigenvector = sp.sparse.linalg.eigs(
M.T, k=1, which="LR", maxiter=max_iter, tol=tol
)
largest = eigenvector.flatten().real
norm = np.sign(largest.sum()) * sp.linalg.norm(largest)
return dict(zip(G, largest / norm))
|
def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):
r"""Compute the eigenvector centrality for the graph G.
Eigenvector centrality computes the centrality for a node based on the
centrality of its neighbors. The eigenvector centrality for node $i$ is
.. math::
Ax = \lambda x
where $A$ is the adjacency matrix of the graph G with eigenvalue $\lambda$.
By virtue of the Perron–Frobenius theorem, there is a unique and positive
solution if $\lambda$ is the largest eigenvalue associated with the
eigenvector of the adjacency matrix $A$ ([2]_).
Parameters
----------
G : graph
A networkx graph
weight : None or string, optional (default=None)
The name of the edge attribute used as weight.
If None, all edge weights are considered equal.
In this measure the weight is interpreted as the connection strength.
max_iter : integer, optional (default=100)
Maximum number of iterations in power method.
tol : float, optional (default=1.0e-6)
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Returns
-------
nodes : dictionary
Dictionary of nodes with eigenvector centrality as the value.
Examples
--------
>>> G = nx.path_graph(4)
>>> centrality = nx.eigenvector_centrality_numpy(G)
>>> print([f"{node} {centrality[node]:0.2f}" for node in centrality])
['0 0.37', '1 0.60', '2 0.60', '3 0.37']
See Also
--------
eigenvector_centrality
pagerank
hits
Notes
-----
The measure was introduced by [1]_.
This algorithm uses the SciPy sparse eigenvalue solver (ARPACK) to
find the largest eigenvalue/eigenvector pair.
For directed graphs this is "left" eigenvector centrality which corresponds
to the in-edges in the graph. For out-edges eigenvector centrality
first reverse the graph with ``G.reverse()``.
Raises
------
NetworkXPointlessConcept
If the graph ``G`` is the null graph.
References
----------
.. [1] Phillip Bonacich:
Power and Centrality: A Family of Measures.
American Journal of Sociology 92(5):1170–1182, 1986
http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf
.. [2] Mark E. J. Newman:
Networks: An Introduction.
Oxford University Press, USA, 2010, pp. 169.
"""
import numpy as np
import scipy as sp
import scipy.sparse.linalg # call as sp.sparse.linalg
if len(G) == 0:
raise nx.NetworkXPointlessConcept(
"cannot compute centrality for the null graph"
)
M = nx.to_scipy_sparse_matrix(G, nodelist=list(G), weight=weight, dtype=float)
eigenvalue, eigenvector = sp.sparse.linalg.eigs(
M.T, k=1, which="LR", maxiter=max_iter, tol=tol
)
largest = eigenvector.flatten().real
norm = np.sign(largest.sum()) * sp.linalg.norm(largest)
return dict(zip(G, largest / norm))
|
30,892 |
def test_function(client, _):
"""
Performs basic GET request to check if the API is reachable and authentication is successful.
Returns ok if successful.
"""
if demisto.params().get('self_deployed', False):
if demisto.command() == 'test-module':
# cannot use test module due to the lack of ability to set refresh token to integration context
# for self deployed app
raise Exception("When using a self-deployed configuration, Please use !msgraph-user-test instead")
if not demisto.params().get('auth_code'):
raise Exception("You must enter an authorization code in a self-deployed configuration.")
client.ms_client.http_request(method='GET', url_suffix='users/')
return 'ok', None, None
|
def test_function(client, _):
"""
Performs basic GET request to check if the API is reachable and authentication is successful.
Returns ok if successful.
"""
if demisto.params().get('self_deployed', False):
if demisto.command() == 'test-module':
# cannot use test module due to the lack of ability to set refresh token to integration context
# for self deployed app
raise Exception("When using a self-deployed configuration, Please enable the integration and use !msgraph-user-test in order to test it")
if not demisto.params().get('auth_code'):
raise Exception("You must enter an authorization code in a self-deployed configuration.")
client.ms_client.http_request(method='GET', url_suffix='users/')
return 'ok', None, None
|
41,204 |
def _Sdg(q, args, operations, qubits):
# Apply the tableau with S^+, so the inverse of operation is S
args.axes = [q]
protocols.act_on(ops.ZPowGate() ** 1.5, args, allow_decompose=False)
operations.append(ops.S(qubits[q]))
|
def _Sdg(q, args, operations, qubits):
# Apply the tableau with S^+, so the inverse of operation is S
args.axes = [q]
protocols.act_on(cirq.S ** (-1), args, allow_decompose=False)
operations.append(ops.S(qubits[q]))
|
34,431 |
def is_conversation_test_file(file_path: Text) -> bool:
"""Checks if a file is a Rasa conversation test file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a conversation test file, otherwise `False`.
"""
if not file_path.endswith(".md"):
return False
try:
dirname = os.path.dirname(file_path)
return is_story_file(file_path) and DEFAULT_E2E_TESTS_PATH in dirname
except Exception as e:
# catch-all because we might be loading files we are not expecting to load
logger.error(
f"Tried to check if '{file_path}' is a conversation test file, but failed "
f"to read it. If this file contains conversation test data, you should "
f"investigate this error, otherwise it is probably best to "
f"move the file to a different location. "
f"Error: {e}"
)
return False
|
def is_conversation_test_file(file_path: Text) -> bool:
"""Checks if a file is a Rasa conversation test file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a conversation test file, otherwise `False`.
"""
if not file_path.endswith(".md"):
return False
try:
dirname = os.path.dirname(file_path)
return DEFAULT_E2E_TESTS_PATH in dirname and is_story_file(file_path)
except Exception as e:
# catch-all because we might be loading files we are not expecting to load
logger.error(
f"Tried to check if '{file_path}' is a conversation test file, but failed "
f"to read it. If this file contains conversation test data, you should "
f"investigate this error, otherwise it is probably best to "
f"move the file to a different location. "
f"Error: {e}"
)
return False
|
34,616 |
def test_incomplete_rules_due_to_slots():
some_action = "some_action"
some_slot = "some_slot"
domain = Domain.from_yaml(
f"""
intents:
- {GREET_INTENT_NAME}
actions:
- {some_action}
slots:
{some_slot}:
type: text
"""
)
policy = RulePolicy()
complete_rule = TrackerWithCachedStates.from_events(
"complete_rule",
domain=domain,
slots=domain.slots,
evts=[
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(intent={"name": GREET_INTENT_NAME}),
ActionExecuted(some_action),
SlotSet(some_slot, "bla"),
ActionExecuted(ACTION_LISTEN_NAME),
],
is_rule_tracker=True,
)
incomplete_rule = TrackerWithCachedStates.from_events(
"incomplete_rule",
domain=domain,
slots=domain.slots,
evts=[
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(intent={"name": GREET_INTENT_NAME}),
ActionExecuted(some_action),
ActionExecuted(ACTION_LISTEN_NAME),
],
is_rule_tracker=True,
)
with pytest.raises(InvalidRule) as execinfo:
policy.train([complete_rule, incomplete_rule], domain, RegexInterpreter())
assert all(
name in execinfo.value.message
for name in {
some_action,
incomplete_rule.sender_id,
}
)
fixed_incomplete_rule = TrackerWithCachedStates.from_events(
"fixed_incomplete_rule",
domain=domain,
slots=domain.slots,
evts=[
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(intent={"name": GREET_INTENT_NAME}),
ActionExecuted(some_action),
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
ActionExecuted(ACTION_LISTEN_NAME),
],
is_rule_tracker=True,
)
policy.train([complete_rule, fixed_incomplete_rule], domain, RegexInterpreter())
|
def test_incomplete_rules_due_to_slots():
some_action = "some_action"
some_slot = "some_slot"
domain = Domain.from_yaml(
f"""
intents:
- {GREET_INTENT_NAME}
actions:
- {some_action}
slots:
{some_slot}:
type: text
"""
)
policy = RulePolicy()
complete_rule = TrackerWithCachedStates.from_events(
"complete_rule",
domain=domain,
slots=domain.slots,
evts=[
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(intent={"name": GREET_INTENT_NAME}),
ActionExecuted(some_action),
SlotSet(some_slot, "bla"),
ActionExecuted(ACTION_LISTEN_NAME),
],
is_rule_tracker=True,
)
incomplete_rule = TrackerWithCachedStates.from_events(
"incomplete_rule",
domain=domain,
slots=domain.slots,
evts=[
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(intent={"name": GREET_INTENT_NAME}),
ActionExecuted(some_action),
ActionExecuted(ACTION_LISTEN_NAME),
],
is_rule_tracker=True,
)
with pytest.raises(InvalidRule) as execinfo:
policy.train([complete_rule, incomplete_rule], domain, RegexInterpreter())
assert all(
name in execinfo.value.message
for name in {some_action, incomplete_rule.sender_id}
)
fixed_incomplete_rule = TrackerWithCachedStates.from_events(
"fixed_incomplete_rule",
domain=domain,
slots=domain.slots,
evts=[
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(intent={"name": GREET_INTENT_NAME}),
ActionExecuted(some_action),
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
ActionExecuted(ACTION_LISTEN_NAME),
],
is_rule_tracker=True,
)
policy.train([complete_rule, fixed_incomplete_rule], domain, RegexInterpreter())
|
55,034 |
def test_analytic_deprecation():
"""Tests if the kwarg `analytic` is used and displays error message.
"""
msg = "The attribute `analytic` has been replaced by `shots=None`. "
msg += "Please use `shots=None` instead of `analytic=True`."
with pytest.raises(
DeviceError,
match=msg,
):
qml.device("default.qubit", wires=1, shots=1, analytic=True)
|
def test_analytic_deprecation():
"""Tests if the kwarg `analytic` is used and displays error message.
"""
msg = "The analytic argument has been replaced by shots=None. "
msg += "Please use shots=None instead of analytic=True."
with pytest.raises(
DeviceError,
match=msg,
):
qml.device("default.qubit", wires=1, shots=1, analytic=True)
|
6,347 |
def set_trace(paused=True, tty=None):
"""
Start the debugger
If paused=False (the default is True), the debugger will not stop here
(same as immediately pressing 'c' to continue).
tty- Allow the user to control the debugger from seperate terminal given in tty
"""
import sys
dbg = _get_debugger(tty=tty)
import threading
if isinstance(threading.current_thread(), threading._MainThread):
set_interrupt_handler()
dbg.set_trace(sys._getframe().f_back, paused=paused)
|
def set_trace(paused=True, tty=None):
"""
Start the debugger
If paused=False (the default is True), the debugger will not stop here
(same as immediately pressing 'c' to continue).
:arg tty: Allow the user to control the debugger from seperate terminal given in tty
"""
import sys
dbg = _get_debugger(tty=tty)
import threading
if isinstance(threading.current_thread(), threading._MainThread):
set_interrupt_handler()
dbg.set_trace(sys._getframe().f_back, paused=paused)
|
25,836 |
def get_interface_row_class(record):
if not record.enabled:
return 'danger'
elif not record.is_connectable:
return 'primary'
else:
return get_cabletermination_row_class(record)
return ''
|
def get_interface_row_class(record):
if not record.enabled:
return 'danger'
elif not record.is_connectable:
return 'primary'
return get_cabletermination_row_class(record)
|
47,988 |
def main():
all_passed = True
index_file_paths = (
OMZ_ROOT / 'models/intel/index.md',
OMZ_ROOT / 'models/public/index.md',
OMZ_ROOT / 'demos/README.md',
)
all_md_files = tuple(find_md_files())
def complain(message):
nonlocal all_passed
all_passed = False
print(message, file=sys.stderr)
index_child_md_links = {}
for index_file_path in index_file_paths:
if not index_file_path.exists():
complain(f'{index_file_path}: file not found')
continue
required_md_links = []
for md_file in all_md_files:
if md_file.name == "README.md" and md_file.parent != index_file_path.parent:
try:
md_rel_path = md_file.relative_to(index_file_path.parent)
except ValueError:
continue
md_intermediate_parents = list(md_rel_path.parents)[1:-1] # removed root and first parent dirs
if not any((index_file_path.parent / parent_dir / 'README.md').exists()
for parent_dir in md_intermediate_parents):
required_md_links.append(md_file)
index_child_md_links[index_file_path] = sorted(required_md_links)
for md_path in sorted(all_md_files):
referenced_md_files = set()
md_path_rel = md_path.relative_to(OMZ_ROOT)
doc_page = omzdocs.DocumentationPage(md_path.read_text(encoding='UTF-8'))
# check local link validity
for url in sorted([ref.url for ref in doc_page.external_references()]):
try:
components = urllib.parse.urlparse(url)
except ValueError:
complain(f'{md_path_rel}: invalid URL reference {url!r}')
continue
if components.scheme: # non-local URLs
continue
if components.netloc or components.path.startswith('/'):
complain(f'{md_path_rel}: non-relative local URL reference "{url}"')
continue
if not components.path: # self-link
continue
target_path = (md_path.parent / urllib.request.url2pathname(components.path)).resolve()
if OMZ_ROOT not in target_path.parents:
complain(f'{md_path_rel}: URL reference "{url}" points outside the OMZ directory')
continue
if not target_path.is_file():
complain(f'{md_path_rel}: URL reference "{url}" target'
' does not exist or is not a file')
continue
if md_path in index_child_md_links:
referenced_md_files.add(target_path)
# check <omz_dir> link validity
for link in sorted([link for link in doc_page.omz_references() if link.startswith('<omz_dir>')]):
file_path = Path(link.replace('<omz_dir>', str(OMZ_ROOT)))
try:
file_relative_path = file_path.relative_to(OMZ_ROOT)
except ValueError:
complain(f'{md_path_rel}: invalid OMZ reference {file_path!r}')
continue
if str(file_relative_path) == md_path_rel: # self-link
continue
if not (file_path.is_file() or file_path.is_dir()):
complain(f'{md_path_rel}: OMZ reference "{file_relative_path}" target'
' does not exist')
# check for existence of links to README.md files of models and demos
if md_path in index_child_md_links:
for md_file in index_child_md_links[md_path]:
if md_file not in referenced_md_files:
complain(f"{md_path_rel}: {md_file.relative_to(OMZ_ROOT)} is not referenced")
# check for HTML fragments that are unsupported by Doxygen
for html_fragment in doc_page.html_fragments():
match = HTML_FRAGMENT_RE.match(html_fragment)
if not match:
complain(f'{md_path_rel}: cannot parse HTML fragment {html_fragment!r}')
continue
if match.group(1).lower() not in ALLOWED_HTML_ELEMENTS:
complain(f'{md_path_rel}: unknown/disallowed HTML element in {html_fragment!r}')
continue
sys.exit(0 if all_passed else 1)
|
def main():
all_passed = True
index_file_paths = (
OMZ_ROOT / 'models/intel/index.md',
OMZ_ROOT / 'models/public/index.md',
OMZ_ROOT / 'demos/README.md',
)
all_md_files = tuple(find_md_files())
def complain(message):
nonlocal all_passed
all_passed = False
print(message, file=sys.stderr)
index_child_md_links = {}
for index_file_path in index_file_paths:
if not index_file_path.exists():
complain(f'{index_file_path}: file not found')
continue
required_md_links = []
for md_file in all_md_files:
if md_file.name == "README.md" and md_file.parent != index_file_path.parent:
try:
md_rel_path = md_file.relative_to(index_file_path.parent)
except ValueError:
continue
md_intermediate_parents = list(md_rel_path.parents)[1:-1] # removed root and first parent dirs
if not any((index_file_path.parent / parent_dir / 'README.md').exists()
for parent_dir in md_intermediate_parents):
required_md_links.append(md_file)
index_child_md_links[index_file_path] = sorted(required_md_links)
for md_path in sorted(all_md_files):
referenced_md_files = set()
md_path_rel = md_path.relative_to(OMZ_ROOT)
doc_page = omzdocs.DocumentationPage(md_path.read_text(encoding='UTF-8'))
# check local link validity
for url in sorted([ref.url for ref in doc_page.external_references()]):
try:
components = urllib.parse.urlparse(url)
except ValueError:
complain(f'{md_path_rel}: invalid URL reference {url!r}')
continue
if components.scheme: # non-local URLs
continue
if components.netloc or components.path.startswith('/'):
complain(f'{md_path_rel}: non-relative local URL reference "{url}"')
continue
if not components.path: # self-link
continue
target_path = (md_path.parent / urllib.request.url2pathname(components.path)).resolve()
if OMZ_ROOT not in target_path.parents:
complain(f'{md_path_rel}: URL reference "{url}" points outside the OMZ directory')
continue
if not target_path.is_file():
complain(f'{md_path_rel}: URL reference "{url}" target'
' does not exist or is not a file')
continue
if md_path in index_child_md_links:
referenced_md_files.add(target_path)
# check <omz_dir> link validity
for link in sorted([link for link in doc_page.omz_references() if link.startswith('<omz_dir>')]):
file_path = Path(link.replace('<omz_dir>', str(OMZ_ROOT)))
try:
file_relative_path = file_path.relative_to(OMZ_ROOT)
except ValueError:
complain(f'{md_path_rel}: invalid OMZ reference {file_path!r}')
continue
if str(file_relative_path) == md_path_rel: # self-link
continue
if not (file_path.is_file() or file_path.is_dir()):
complain(f'{md_path_rel}: OMZ reference "{link}" target'
' does not exist')
# check for existence of links to README.md files of models and demos
if md_path in index_child_md_links:
for md_file in index_child_md_links[md_path]:
if md_file not in referenced_md_files:
complain(f"{md_path_rel}: {md_file.relative_to(OMZ_ROOT)} is not referenced")
# check for HTML fragments that are unsupported by Doxygen
for html_fragment in doc_page.html_fragments():
match = HTML_FRAGMENT_RE.match(html_fragment)
if not match:
complain(f'{md_path_rel}: cannot parse HTML fragment {html_fragment!r}')
continue
if match.group(1).lower() not in ALLOWED_HTML_ELEMENTS:
complain(f'{md_path_rel}: unknown/disallowed HTML element in {html_fragment!r}')
continue
sys.exit(0 if all_passed else 1)
|
5,359 |
def _uninstall(
action="remove",
name=None,
version=None,
pkgs=None,
normalize=True,
ignore_epoch=None,
**kwargs
):
"""
Common function for package removal
"""
if action not in ("remove", "purge"):
return {
"name": name,
"changes": {},
"result": False,
"comment": "Invalid action '{}'. " "This is probably a bug.".format(action),
}
try:
pkg_params = __salt__["pkg_resource.parse_targets"](
name, pkgs, normalize=normalize
)[0]
except MinionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while parsing targets: "
"{}".format(exc),
}
targets = _find_remove_targets(
name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs
)
if isinstance(targets, dict) and "result" in targets:
return targets
elif not isinstance(targets, list):
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while checking targets: "
"{}".format(targets),
}
if action == "purge":
old_removed = __salt__["pkg.list_pkgs"](
versions_as_list=True, removed=True, **kwargs
)
targets.extend([x for x in pkg_params if x in old_removed])
targets.sort()
if not targets:
return {
"name": name,
"changes": {},
"result": True,
"comment": "None of the targeted packages are installed"
"{}".format(" or partially installed" if action == "purge" else ""),
}
if __opts__["test"]:
return {
"name": name,
"changes": {},
"result": None,
"comment": "The following packages will be {}d: "
"{}.".format(action, ", ".join(targets)),
}
changes = __salt__["pkg.{}".format(action)](
name, pkgs=pkgs, version=version, **kwargs
)
new = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
failed = []
for param in pkg_params:
if __grains__["os_family"] in ["Suse", "RedHat"]:
# Check if the package version set to be removed is actually removed:
if param in new and not pkg_params[param]:
failed.append(param)
elif param in new and pkg_params[param] in new[param]:
failed.append(param + "-" + pkg_params[param])
elif param in new:
failed.append(param)
if action == "purge":
new_removed = __salt__["pkg.list_pkgs"](
versions_as_list=True, removed=True, **kwargs
)
failed.extend([x for x in pkg_params if x in new_removed])
failed.sort()
if failed:
return {
"name": name,
"changes": changes,
"result": False,
"comment": "The following packages failed to {}: "
"{}.".format(action, ", ".join(failed)),
}
comments = []
not_installed = sorted([x for x in pkg_params if x not in targets])
if not_installed:
comments.append(
"The following packages were not installed: "
"{}".format(", ".join(not_installed))
)
comments.append(
"The following packages were {}d: " "{}.".format(action, ", ".join(targets))
)
else:
comments.append("All targeted packages were {}d.".format(action))
return {
"name": name,
"changes": changes,
"result": True,
"comment": " ".join(comments),
}
|
def _uninstall(
action="remove",
name=None,
version=None,
pkgs=None,
normalize=True,
ignore_epoch=None,
**kwargs
):
"""
Common function for package removal
"""
if action not in ("remove", "purge"):
return {
"name": name,
"changes": {},
"result": False,
"comment": "Invalid action '{}'. " "This is probably a bug.".format(action),
}
try:
pkg_params = __salt__["pkg_resource.parse_targets"](
name, pkgs, normalize=normalize
)[0]
except MinionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while parsing targets: "
"{}".format(exc),
}
targets = _find_remove_targets(
name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs
)
if isinstance(targets, dict) and "result" in targets:
return targets
elif not isinstance(targets, list):
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while checking targets: "
"{}".format(targets),
}
if action == "purge":
old_removed = __salt__["pkg.list_pkgs"](
versions_as_list=True, removed=True, **kwargs
)
targets.extend([x for x in pkg_params if x in old_removed])
targets.sort()
if not targets:
return {
"name": name,
"changes": {},
"result": True,
"comment": "None of the targeted packages are installed"
"{}".format(" or partially installed" if action == "purge" else ""),
}
if __opts__["test"]:
return {
"name": name,
"changes": {},
"result": None,
"comment": "The following packages will be {}d: "
"{}.".format(action, ", ".join(targets)),
}
changes = __salt__["pkg.{}".format(action)](
name, pkgs=pkgs, version=version, **kwargs
)
new = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
failed = []
for param in pkg_params:
if __grains__["os_family"] in ["Suse", "RedHat"]:
# Check if the package version set to be removed is actually removed:
if param in new and not pkg_params[param]:
failed.append(param)
elif param in new and pkg_params[param] in new[param]:
failed.append(param + "-" + pkg_params[param])
elif param in new:
failed.append(param)
if action == "purge":
new_removed = __salt__["pkg.list_pkgs"](
versions_as_list=True, removed=True, **kwargs
)
failed.extend([x for x in pkg_params if x in new_removed])
failed.sort()
if failed:
return {
"name": name,
"changes": changes,
"result": False,
"comment": "The following packages failed to {}: "
"{}.".format(action, ", ".join(failed)),
}
comments = []
not_installed = sorted([x for x in pkg_params if x not in targets])
if not_installed:
comments.append(
"The following packages were not installed: "
"{}".format(", ".join(not_installed))
)
comments.append(
"The following packages were {}d: {}.".format(action, ", ".join(targets))
)
else:
comments.append("All targeted packages were {}d.".format(action))
return {
"name": name,
"changes": changes,
"result": True,
"comment": " ".join(comments),
}
|
54,502 |
def test_plot_parallel_coordinate_unique_hyper_param() -> None:
# Test case when one unique value is suggested during the optimization.
study_categorical_params = create_study()
study_categorical_params.add_trial(
create_trial(
value=0.0,
params={"category_a": "preferred", "param_b": 30},
distributions={
"category_a": CategoricalDistribution(("preferred", "opt")),
"param_b": LogUniformDistribution(1, 1000),
},
)
)
# both hyperparameters contain unique values
figure = plot_parallel_coordinate(study_categorical_params)
assert len(figure.get_lines()) == 0
study_categorical_params.add_trial(
create_trial(
value=2.0,
params={"category_a": "preferred", "param_b": 20},
distributions={
"category_a": CategoricalDistribution(("preferred", "opt")),
"param_b": LogUniformDistribution(1, 1000),
},
)
)
# still "category_a" contains unique suggested value during the optimization.
figure = plot_parallel_coordinate(study_categorical_params)
assert len(figure.get_lines()) == 0
|
def test_plot_parallel_coordinate_unique_hyper_param() -> None:
# Test case when one unique value is suggested during the optimization.
study_categorical_params = create_study()
study_categorical_params.add_trial(
create_trial(
value=0.0,
params={"category_a": "preferred", "param_b": 30},
distributions={
"category_a": CategoricalDistribution(("preferred", "opt")),
"param_b": LogUniformDistribution(1, 1000),
},
)
)
# both hyperparameters contain unique values
figure = plot_parallel_coordinate(study_categorical_params)
assert len(figure.data[0]["dimensions"]) == 3
assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
assert figure.data[0]["dimensions"][0]["range"] == (0.0, 0.0)
assert figure.data[0]["dimensions"][0]["values"] == (0.0,)
assert figure.data[0]["dimensions"][1]["label"] == "category_a"
assert figure.data[0]["dimensions"][1]["range"] == (0, 0)
assert figure.data[0]["dimensions"][1]["values"] == (0.0,)
assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred",)
assert figure.data[0]["dimensions"][2]["label"] == "param_b"
assert figure.data[0]["dimensions"][2]["range"] == (math.log10(30), math.log10(30))
assert figure.data[0]["dimensions"][2]["values"] == (math.log10(30),)
assert figure.data[0]["dimensions"][2]["ticktext"] == ("30",)
assert figure.data[0]["dimensions"][2]["tickvals"] == (math.log10(30),)
study_categorical_params.add_trial(
create_trial(
value=2.0,
params={"category_a": "preferred", "param_b": 20},
distributions={
"category_a": CategoricalDistribution(("preferred", "opt")),
"param_b": LogUniformDistribution(1, 1000),
},
)
)
# still "category_a" contains unique suggested value during the optimization.
figure = plot_parallel_coordinate(study_categorical_params)
assert len(figure.get_lines()) == 0
|
31,511 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
url = str(params.get("url"))
if url[-1] == "/":
base_url = url + "api/v2/"
else:
base_url = url + "/api/v2/"
indicator_collections = params.get('indicator_collections', [])
indicators_first_fetch = params.get('indicators_first_fetch', '3 days').strip()
requests_count = int(params.get('requests_count', 2))
args = demisto.args()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
headers={"Accept": "*/*"})
commands = {'gibtia-get-indicators': get_indicators_command}
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif command == 'fetch-indicators':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, indicators = fetch_indicators_command(client=client, last_run=demisto.getIntegrationContext(),
first_fetch_time=indicators_first_fetch,
indicator_collections=indicator_collections,
requests_count=requests_count)
demisto.setIntegrationContext(next_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
return_results(commands[command](client, args))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
url = str(params.get("url"))
if url[-1] == "/":
base_url = url + "api/v2/"
else:
base_url = url + "/api/v2/"
indicator_collections = params.get('indicator_collections', [])
indicators_first_fetch = params.get('indicators_first_fetch', '3 days').strip()
requests_count = int(params.get('requests_count', 2))
args = demisto.args()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
headers={"Accept": "*/*"})
commands = {'gibtia-get-indicators': get_indicators_command}
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif command == 'fetch-indicators':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, indicators = fetch_indicators_command(client=client, last_run=demisto.getIntegrationContext(),
first_fetch_time=indicators_first_fetch,
indicator_collections=indicator_collections,
requests_count=requests_count)
set_integration_context(next_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
return_results(commands[command](client, args))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
31,172 |
def create_endpoint_context(audit_logs):
endpoints = []
for log in audit_logs:
endpoint_details = {
'ID': log.get('ENDPOINTID'),
'Hostname': log.get('ENDPOINTNAME'),
'Domain': log.get('DOMAIN')
}
remove_nulls_from_dictionary(endpoint_details)
if endpoint_details:
endpoints.append(endpoint_details)
return endpoints
|
def create_endpoint_context(audit_logs):
endpoints = []
for log in audit_logs:
endpoint_details = {
'ID': log.get('ENDPOINTID'),
'Hostname': log.get('ENDPOINTNAME'),
'Domain': log.get('DOMAIN'),
}
remove_nulls_from_dictionary(endpoint_details)
if endpoint_details:
endpoints.append(endpoint_details)
return endpoints
|
14,485 |
def _run_cli_entrypoint() -> None:
"""Invoke the main entrypoint with current CLI args.
This function also processes the runtime exceptions.
"""
try:
sys.exit(main(sys.argv))
except IOError as exc:
# NOTE: Only "broken pipe" is acceptable to ignore
if exc.errno != errno.EPIPE:
raise
except KeyboardInterrupt:
sys.exit(EXIT_CRTL_C)
except RuntimeError as e:
raise SystemExit(str(e))
|
def _run_cli_entrypoint() -> None:
"""Invoke the main entrypoint with current CLI args.
This function also processes the runtime exceptions.
"""
try:
sys.exit(main(sys.argv))
except IOError as exc:
# NOTE: Only "broken pipe" is acceptable to ignore
if exc.errno != errno.EPIPE:
raise
except KeyboardInterrupt:
sys.exit(EXIT_CTRL_C)
except RuntimeError as e:
raise SystemExit(str(e))
|
31,116 |
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: circleCI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:returns None.
"""
if force_upload:
# If we force upload we don't want to update the commit in the index.json file,
# this is to be able to identify all changed packs in the next upload
commit = previous_commit_hash
logging.info('Force upload flow - Index commit hash shuould not be changed')
logging.debug('commit hash is: {commit}')
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
logging.info('Updating production index commit hash to master last commit hash')
logging.debug('commit hash is: {commit}')
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json"), "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
|
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: circleCI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:returns None.
"""
if force_upload:
# If we force upload we don't want to update the commit in the index.json file,
# this is to be able to identify all changed packs in the next upload
commit = previous_commit_hash
logging.info('Force upload flow - Index commit hash shuould not be changed')
logging.debug('commit hash is: {commit}')
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
logging.info('Updating production index commit hash to master last commit hash')
logging.debug(f'commit hash is: {commit}')
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json"), "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
|
84 |
def load(rec, account_key: str = None):
"""Given a record, tries to add/match that edition in the system.
Record is a dictionary containing all the metadata of the edition.
The following fields are mandatory:
* title: str
* source_records: list
:param dict rec: Edition record to add
:rtype: dict
:return: a dict to be converted into a JSON HTTP response, same as load_data()
"""
required_fields = ['title', 'source_records'] # ['authors', 'publishers', 'publish_date']
for field in required_fields:
if not rec.get(field):
raise RequiredField(field)
if not isinstance(rec['source_records'], list):
rec['source_records'] = [rec['source_records']]
# Split subtitle if required and not already present
if ':' in rec.get('title') and not rec.get('subtitle'):
title, subtitle = split_subtitle(rec.get('title'))
if subtitle:
rec['title'] = title
rec['subtitle'] = subtitle
rec = normalize_record_isbns(rec)
edition_pool = build_pool(rec)
if not edition_pool:
# No match candidates found, add edition
return load_data(rec, account_key=account_key)
match = early_exit(rec)
if not match:
match = find_exact_match(rec, edition_pool)
if not match:
rec['full_title'] = rec['title']
if rec.get('subtitle'):
rec['full_title'] += ' ' + rec['subtitle']
e1 = build_marc(rec)
add_db_name(e1)
match = find_match(e1, edition_pool)
if not match:
# No match found, add edition
return load_data(rec, account_key=account_key)
# We have an edition match at this point
need_work_save = need_edition_save = False
w = None
e = web.ctx.site.get(match)
# check for, and resolve, author redirects
for a in e.authors:
while is_redirect(a):
if a in e.authors:
e.authors.remove(a)
a = web.ctx.site.get(a.location)
if not is_redirect(a):
e.authors.append(a)
if e.get('works'):
w = e.works[0].dict()
work_created = False
else:
# Found an edition without a work
work_created = need_work_save = need_edition_save = True
w = new_work(e.dict(), rec)
e.works = [{'key': w['key']}]
# Add subjects to work, if not already present
if 'subjects' in rec:
work_subjects = list(w.get('subjects', []))
for s in rec['subjects']:
if s not in work_subjects:
work_subjects.append(s)
need_work_save = True
if need_work_save and work_subjects:
w['subjects'] = work_subjects
# Add cover to edition
if 'cover' in rec and not e.get_covers():
cover_url = rec['cover']
cover_id = add_cover(cover_url, e.key, account_key=account_key)
if cover_id:
e['covers'] = [cover_id]
need_edition_save = True
# Add cover to work, if needed
if not w.get('covers') and e.get_covers():
w['covers'] = [e['covers'][0]]
need_work_save = True
# Add description to work, if needed
if not w.get('description') and e.get('description'):
w['description'] = e['description']
need_work_save = True
# Add authors to work, if needed
if not w.get('authors'):
authors = [import_author(a) for a in rec.get('authors', [])]
w['authors'] = [{'type':{'key': '/type/author_role'}, 'author': a.key} for a in authors if a.get('key')]
if w.get('authors'):
need_work_save = True
# Add ocaid to edition (str), if needed
if 'ocaid' in rec and not e.ocaid:
e['ocaid'] = rec['ocaid']
need_edition_save = True
# Add list fields to edition as needed
edition_fields = [
'local_id',
'lccn',
'lc_classifications',
'source_records',
]
for f in edition_fields:
if f not in rec:
continue
# ensure values is a list
values = rec[f] if isinstance(rec[f], list) else [rec[f]]
if f in e:
# get values from rec that are not currently on the edition
to_add = [v for v in values if v not in e[f]]
e[f] += to_add
else:
e[f] = to_add = values
if to_add:
need_edition_save = True
edits = []
reply = {
'success': True,
'edition': {'key': match, 'status': 'matched'},
'work': {'key': w['key'], 'status': 'matched'},
}
if need_edition_save:
reply['edition']['status'] = 'modified'
edits.append(e.dict())
if need_work_save:
reply['work']['status'] = 'created' if work_created else 'modified'
edits.append(w)
if edits:
web.ctx.site.save_many(edits, comment='import existing book', action='edit-book')
if 'ocaid' in rec:
update_ia_metadata_for_ol_edition(match.split('/')[-1])
return reply
|
def load(rec, account_key=None):
"""Given a record, tries to add/match that edition in the system.
Record is a dictionary containing all the metadata of the edition.
The following fields are mandatory:
* title: str
* source_records: list
:param dict rec: Edition record to add
:rtype: dict
:return: a dict to be converted into a JSON HTTP response, same as load_data()
"""
required_fields = ['title', 'source_records'] # ['authors', 'publishers', 'publish_date']
for field in required_fields:
if not rec.get(field):
raise RequiredField(field)
if not isinstance(rec['source_records'], list):
rec['source_records'] = [rec['source_records']]
# Split subtitle if required and not already present
if ':' in rec.get('title') and not rec.get('subtitle'):
title, subtitle = split_subtitle(rec.get('title'))
if subtitle:
rec['title'] = title
rec['subtitle'] = subtitle
rec = normalize_record_isbns(rec)
edition_pool = build_pool(rec)
if not edition_pool:
# No match candidates found, add edition
return load_data(rec, account_key=account_key)
match = early_exit(rec)
if not match:
match = find_exact_match(rec, edition_pool)
if not match:
rec['full_title'] = rec['title']
if rec.get('subtitle'):
rec['full_title'] += ' ' + rec['subtitle']
e1 = build_marc(rec)
add_db_name(e1)
match = find_match(e1, edition_pool)
if not match:
# No match found, add edition
return load_data(rec, account_key=account_key)
# We have an edition match at this point
need_work_save = need_edition_save = False
w = None
e = web.ctx.site.get(match)
# check for, and resolve, author redirects
for a in e.authors:
while is_redirect(a):
if a in e.authors:
e.authors.remove(a)
a = web.ctx.site.get(a.location)
if not is_redirect(a):
e.authors.append(a)
if e.get('works'):
w = e.works[0].dict()
work_created = False
else:
# Found an edition without a work
work_created = need_work_save = need_edition_save = True
w = new_work(e.dict(), rec)
e.works = [{'key': w['key']}]
# Add subjects to work, if not already present
if 'subjects' in rec:
work_subjects = list(w.get('subjects', []))
for s in rec['subjects']:
if s not in work_subjects:
work_subjects.append(s)
need_work_save = True
if need_work_save and work_subjects:
w['subjects'] = work_subjects
# Add cover to edition
if 'cover' in rec and not e.get_covers():
cover_url = rec['cover']
cover_id = add_cover(cover_url, e.key, account_key=account_key)
if cover_id:
e['covers'] = [cover_id]
need_edition_save = True
# Add cover to work, if needed
if not w.get('covers') and e.get_covers():
w['covers'] = [e['covers'][0]]
need_work_save = True
# Add description to work, if needed
if not w.get('description') and e.get('description'):
w['description'] = e['description']
need_work_save = True
# Add authors to work, if needed
if not w.get('authors'):
authors = [import_author(a) for a in rec.get('authors', [])]
w['authors'] = [{'type':{'key': '/type/author_role'}, 'author': a.key} for a in authors if a.get('key')]
if w.get('authors'):
need_work_save = True
# Add ocaid to edition (str), if needed
if 'ocaid' in rec and not e.ocaid:
e['ocaid'] = rec['ocaid']
need_edition_save = True
# Add list fields to edition as needed
edition_fields = [
'local_id',
'lccn',
'lc_classifications',
'source_records',
]
for f in edition_fields:
if f not in rec:
continue
# ensure values is a list
values = rec[f] if isinstance(rec[f], list) else [rec[f]]
if f in e:
# get values from rec that are not currently on the edition
to_add = [v for v in values if v not in e[f]]
e[f] += to_add
else:
e[f] = to_add = values
if to_add:
need_edition_save = True
edits = []
reply = {
'success': True,
'edition': {'key': match, 'status': 'matched'},
'work': {'key': w['key'], 'status': 'matched'},
}
if need_edition_save:
reply['edition']['status'] = 'modified'
edits.append(e.dict())
if need_work_save:
reply['work']['status'] = 'created' if work_created else 'modified'
edits.append(w)
if edits:
web.ctx.site.save_many(edits, comment='import existing book', action='edit-book')
if 'ocaid' in rec:
update_ia_metadata_for_ol_edition(match.split('/')[-1])
return reply
|
3,912 |
def nodes(G):
"""Returns an iterator over the graph nodes.
Examples
--------
>>> G = nx.Graph()
>>> G.add_nodes_from([1, 2, 3, 4, 5])
>>> nx.nodes(G)
NodeView((1, 2, 3, 4, 5))
"""
return G.nodes()
|
def nodes(G):
"""Returns an iterator over the graph nodes.
Examples
--------
>>> G = nx.path_graph(5)
>>> nx.nodes(G)
NodeView((1, 2, 3, 4, 5))
"""
return G.nodes()
|
36,367 |
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
if sys.platform == "darwin":
# Perform first-time customization of compiler-related
# config vars on OS X now that we know we need a compiler.
# This is primarily to support Pythons from binary
# installers. The kind and paths to build tools on
# the user system may vary significantly from the system
# that Python itself was built on. Also the user OS
# version and build tools may not support the same set
# of CPU architectures for universal builds.
global _config_vars
# Use get_config_var() to ensure _config_vars is initialized.
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
import _osx_support
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
(cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
if 'CC' in os.environ:
newcc = os.environ['CC']
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
# On OS X, if CC is overridden, use that as the default
# command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = cflags + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = shlib_suffix
|
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
if sys.platform == "darwin":
# Perform first-time customization of compiler-related
# config vars on OS X now that we know we need a compiler.
# This is primarily to support Pythons from binary
# installers. The kind and paths to build tools on
# the user system may vary significantly from the system
# that Python itself was built on. Also the user OS
# version and build tools may not support the same set
# of CPU architectures for universal builds.
global _config_vars
# Use get_config_var() to ensure _config_vars is initialized.
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
import _osx_support
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
(cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
if 'CC' in os.environ:
newcc = os.environ['CC']
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
# On OS X, if CC is overridden, use that as the default
# command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + cflags + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = shlib_suffix
|
23,062 |
def _sample_map_partitions(population, k=1, replace=True):
"""
Map function used on the sample and choices functions.
Parameters
----------
population : list
List of elements to sample.
k : int, optional
Number of elements to sample. Default is a single value is returned.
replace : boolean, optional
Whether the sample is with or without replacement
Returns a tuple, composed by:
- list of k samples;
- total number of elements from where the sample was drawn;
- total number of elements to be sampled;
- boolean which is True whether the sample is with or without replacement.
"""
lx = len(population)
real_k = k if k <= lx else lx
sample_fun = random.choices if replace else random.sample
# because otherwise it raises IndexError:
sampled = [] if real_k == 0 else sample_fun(population, k=real_k)
return sampled, lx, k, replace
|
def _sample_map_partitions(population, k=1, replace=True):
"""
Map function used on the sample and choices functions.
Parameters
----------
population : list
List of elements to sample.
k : int, optional
Number of elements to sample. Default is 1.
replace : boolean, optional
Whether the sample is with or without replacement
Returns a tuple, composed by:
- list of k samples;
- total number of elements from where the sample was drawn;
- total number of elements to be sampled;
- boolean which is True whether the sample is with or without replacement.
"""
lx = len(population)
real_k = k if k <= lx else lx
sample_fun = random.choices if replace else random.sample
# because otherwise it raises IndexError:
sampled = [] if real_k == 0 else sample_fun(population, k=real_k)
return sampled, lx, k, replace
|
55,473 |
def test_from_parquet_pandas_index():
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 100_000, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
pandas_df.set_index("idx").to_parquet("tmp.parquet")
# read the same parquet using modin.pandas
df_equals(pd.read_parquet("tmp.parquet"), pandas.read_parquet("tmp.parquet"))
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 100_000, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
pandas_df.set_index(["idx", "A"]).to_parquet("tmp.parquet")
df_equals(pd.read_parquet("tmp.parquet"), pandas.read_parquet("tmp.parquet"))
|
def test_from_parquet_pandas_index():
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 100_000, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
pandas_df.set_index("idx").to_parquet("tmp.parquet")
# read the same parquet using modin.pandas
df_equals(pd.read_parquet("tmp.parquet"), pandas.read_parquet("tmp.parquet"))
pandas_df.set_index(["idx", "A"]).to_parquet("tmp.parquet")
df_equals(pd.read_parquet("tmp.parquet"), pandas.read_parquet("tmp.parquet"))
|
32,359 |
def main():
demisto.info(f'Command is {demisto.command}')
try:
args = demisto.args()
if demisto.command() == 'test-module':
demisto.results(test())
if demisto.command() == 'bt-get-tree':
demisto.results(get_tree(args))
except Exception as e:
demisto.error(e)
raise
|
def main():
demisto.info(f'Command is {demisto.command}')
try:
args = demisto.args()
if demisto.command() == 'test-module':
return_results(test())
if demisto.command() == 'bt-get-tree':
demisto.results(get_tree(args))
except Exception as e:
demisto.error(e)
raise
|
39,110 |
def run(argv):
"""Main entry point; defines and runs the wordcount pipeline."""
opts = PipelineOptions()
opts.view_as(SetupOptions).save_main_session = True
o = opts.view_as(MyOptions)
with beam.Pipeline(options=opts) as p:
# Read the text file[pattern] into a PCollection.
(p | 'read' >> ReadFromText(o.input)
| 'Get DLP Findings' >> beam.ParDo(DlpFindingDoFn(o.project_id))
| 'Determine if client SSN' >> beam.ParDo(ExistingSSNsDoFn(o.project_id, o.salt, o.secret_name, o.collection_name))
| 'Count findings' >> beam.combiners.Count.Globally()
| 'Write to Pubsub' >> beam.ParDo(WriteToPubsub(o.project_id, o.topic, o.input)))
|
def run(argv):
"""Main entry point; defines and runs the wordcount pipeline."""
opts = MyOptions() # or your improved options class name :)
opts.view_as(SetupOptions).save_main_session = True
o = opts.view_as(MyOptions)
with beam.Pipeline(options=opts) as p:
# Read the text file[pattern] into a PCollection.
(p | 'read' >> ReadFromText(o.input)
| 'Get DLP Findings' >> beam.ParDo(DlpFindingDoFn(o.project_id))
| 'Determine if client SSN' >> beam.ParDo(ExistingSSNsDoFn(o.project_id, o.salt, o.secret_name, o.collection_name))
| 'Count findings' >> beam.combiners.Count.Globally()
| 'Write to Pubsub' >> beam.ParDo(WriteToPubsub(o.project_id, o.topic, o.input)))
|
57,609 |
def fetch_exchange(zone_key1='CR', zone_key2='PA', session=None,
target_datetime=None, logger=logging.getLogger(__name__)) -> dict:
"""
Requests the last known power exchange (in MW) between two countries.
"""
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
r = session or requests.session()
url = EXCHANGE_URL
response = r.get(url)
assert response.status_code == 200
df = pd.read_html(response.text)[0]
# A positive value on website indicates a flow from country specified to PA.
net_flow_cr = round(float(df[4][1]) + float(df[4][3]) + float(df[4][5]) + float(df[1][8]) + float(df[1][10]), 2)
net_flow_ni = round(float(df[4][8]) + float(df[4][10]) + float(df[4][13]) + float(df[4][15]), 2)
net_flow_hn = round(float(df[1][13]) + float(df[1][15]) + float(df[1][18]) + float(df[1][20]) + float(df[1][23]), 2)
net_flow_sv = round(float(df[4][18]) + float(df[4][20]) + float(df[1][26]) + float(df[1][28]), 2)
net_flow_gt = round(float(df[4][23]) + float(df[4][26]) + float(df[4][28]) + float(df[1][31]), 2)
net_flows = {
"CR->PA": net_flow_cr, # Costa Rica to Panama
"GT->PA": net_flow_gt, # Guatemala to Panama
"HN->PA": net_flow_hn, # Honduras to Panama
"NI->PA": net_flow_ni, # Nicaragua to Panama
"PA->SV": net_flow_sv, # Panama to El Salvador
}
# Invert sign of flows to account for correct flow direction
net_flows["PA->SV"] = -1 * net_flows["PA->SV"]
if sorted_zone_keys not in net_flows:
raise NotImplementedError(
'This exchange pair is not implemented: {}'.format(sorted_zone_keys))
data = {
'datetime': arrow.now(TIMEZONE).datetime,
'netFlow': net_flows[sorted_zone_keys],
'sortedZoneKeys': sorted_zone_keys,
'source': url
}
return data
|
def fetch_exchange(zone_key1='CR', zone_key2='PA', session=None,
target_datetime=None, logger=logging.getLogger(__name__)) -> dict:
"""
Requests the last known power exchange (in MW) between two countries.
"""
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
r = session or requests.session()
url = EXCHANGE_URL
response = r.get(url)
assert response.status_code == 200
df = pd.read_html(response.text)[0]
# A positive value on website indicates a flow from country specified to PA.
net_flow_cr = round(float(df[4][1]) + float(df[4][3]) + float(df[4][5]) + float(df[1][8]) + float(df[1][10]), 2)
net_flow_ni = round(float(df[4][8]) + float(df[4][10]) + float(df[4][13]) + float(df[4][15]), 2)
net_flow_hn = round(float(df[1][13]) + float(df[1][15]) + float(df[1][18]) + float(df[1][20]) + float(df[1][23]), 2)
net_flow_sv = round(float(df[4][18]) + float(df[4][20]) + float(df[1][26]) + float(df[1][28]), 2)
net_flow_gt = round(float(df[4][23]) + float(df[4][26]) + float(df[4][28]) + float(df[1][31]), 2)
net_flows = {
"CR->PA": net_flow_cr, # Costa Rica to Panama
"GT->PA": net_flow_gt, # Guatemala to Panama
"HN->PA": net_flow_hn, # Honduras to Panama
"NI->PA": net_flow_ni, # Nicaragua to Panama
"PA->SV": net_flow_sv, # Panama to El Salvador
}
# Invert sign of flows to account for correct flow direction
net_flows["PA->SV"] = -1 * net_flows["PA->SV"]
if sorted_zone_keys not in net_flows:
raise NotImplementedError(f"This exchange pair is not implemented: {sorted_zone_keys}")
data = {
'datetime': arrow.now(TIMEZONE).datetime,
'netFlow': net_flows[sorted_zone_keys],
'sortedZoneKeys': sorted_zone_keys,
'source': url
}
return data
|
58,032 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
command_list_noarg = {
'opnsense-interfaces-list': interfaces_list_command,
'opnsense-alias-apply': alias_apply_command,
'opnsense-alias-list': alias_list_command,
'opnsense-category-list': category_list_command,
'opnsense-rule-list': fw_rule_list_command,
'opnsense-rule-savepoint': fw_rule_savepoint_command,
'opnsense-firmware-info': firmware_info_command,
'opnsense-firmware-status': firmware_status_command,
'opnsense-firmware-upgradestatus': firmware_upgradestatus_command,
'opnsense-firmware-update': firmware_update_command,
'opnsense-firmware-upgrade': firmware_upgrade_command,
'opnsense-device-reboot': device_reboot_command,
}
command_list = {
'opnsense-alias-add': alias_add_command,
'opnsense-alias-del': alias_del_command,
'opnsense-alias-mod': alias_mod_command,
'opnsense-alias-mod-additem': alias_mod_additem_command,
'opnsense-alias-mod-delitem': alias_mod_delitem_command,
'opnsense-alias-get': alias_get_command,
'opnsense-alias-get-uuid': alias_getuuid_command,
'opnsense-category-add': category_add_command,
'opnsense-category-del': category_del_command,
'opnsense-category-get': category_get_command,
'opnsense-category-mod': category_mod_command,
'opnsense-rule-apply': fw_rule_apply_command,
'opnsense-rule-revert': fw_rule_revert_command,
'opnsense-rule-get': fw_rule_get_command,
'opnsense-rule-del': fw_rule_del_command,
'opnsense-rule-add': fw_rule_add_command,
'opnsense-rule-mod': fw_rule_mod_command,
'opnsense-logs-search': logs_search_command,
'opnsense-states-search': states_search_command,
'opnsense-state-del': state_del_command,
}
params = {
'base_url': urljoin(demisto.params()['url'], '/api'),
'auth': (
demisto.params().get('apikey'),
demisto.params().get('apisecret')),
'verify_cert': not demisto.params().get('insecure', False),
'proxy': demisto.params().get('proxy', False),
'timeout': 60
}
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(params)
cmd = demisto.command()
if cmd == 'test-module':
return_results(test_module(client))
elif cmd in command_list_noarg.keys():
return_results(command_list_noarg[cmd](client))
elif cmd in command_list.keys():
return_results(command_list[cmd](client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
command_list_noarg = {
'opnsense-interfaces-list': interfaces_list_command,
'opnsense-alias-apply': alias_apply_command,
'opnsense-alias-list': alias_list_command,
'opnsense-category-list': category_list_command,
'opnsense-rule-list': fw_rule_list_command,
'opnsense-rule-savepoint': fw_rule_savepoint_command,
'opnsense-firmware-info': firmware_info_command,
'opnsense-firmware-status': firmware_status_command,
'opnsense-firmware-upgradestatus': firmware_upgradestatus_command,
'opnsense-firmware-update': firmware_update_command,
'opnsense-firmware-upgrade': firmware_upgrade_command,
'opnsense-device-reboot': device_reboot_command,
}
command_list = {
'opnsense-alias-add': alias_add_command,
'opnsense-alias-del': alias_del_command,
'opnsense-alias-mod': alias_mod_command,
'opnsense-alias-mod-additem': alias_mod_additem_command,
'opnsense-alias-mod-delitem': alias_mod_delitem_command,
'opnsense-alias-get': alias_get_command,
'opnsense-alias-get-uuid': alias_getuuid_command,
'opnsense-category-add': category_add_command,
'opnsense-category-del': category_del_command,
'opnsense-category-get': category_get_command,
'opnsense-category-mod': category_mod_command,
'opnsense-rule-apply': fw_rule_apply_command,
'opnsense-rule-revert': fw_rule_revert_command,
'opnsense-rule-get': fw_rule_get_command,
'opnsense-rule-del': fw_rule_del_command,
'opnsense-rule-add': fw_rule_add_command,
'opnsense-rule-mod': fw_rule_mod_command,
'opnsense-logs-search': logs_search_command,
'opnsense-states-search': states_search_command,
'opnsense-state-del': state_del_command,
}
params = {
'base_url': urljoin(demisto.params()['url'], '/api'),
'auth': (
demisto.params().get('apikey'),
demisto.params().get('apisecret')),
'verify_cert': not demisto.params().get('insecure', False),
'proxy': demisto.params().get('proxy', False),
'timeout': 60
}
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(params)
cmd = demisto.command()
if cmd == 'test-module':
return_results(test_module(client))
elif cmd in command_list_noarg:
return_results(command_list_noarg[cmd](client))
elif cmd in command_list:
return_results(command_list[cmd](client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
9,517 |
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete=dict(type='bool', default=False),
route=dict(type='bool', default=False),
insert=dict(type='str'),
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
interface=dict(type='str', aliases=['if']),
log=dict(type='bool', default=False),
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
from_port=dict(type='str'),
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
to_port=dict(type='str', aliases=['port']),
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp']),
app=dict(type='str', aliases=['name']),
comment=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['app', 'proto', 'logging']
],
)
cmds = []
ipv4_regexp = compile_ipv4_regexp()
ipv6_regexp = compile_ipv6_regexp()
def filter_line_that_not_start_with(pattern, content):
return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
def filter_line_that_contains(pattern, content):
return [line for line in content.splitlines(True) if pattern in line]
def filter_line_that_not_contains(pattern, content):
return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
def filter_line_that_match_func(match_func, content):
return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
def filter_line_that_contains_ipv4(content):
return filter_line_that_match_func(ipv4_regexp.search, content)
def filter_line_that_contains_ipv6(content):
return filter_line_that_match_func(ipv6_regexp.search, content)
def is_starting_by_ipv4(ip):
return ipv4_regexp.match(ip) is not None
def is_starting_by_ipv6(ip):
return ipv6_regexp.match(ip) is not None
def execute(cmd, ignore_error=False):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
if rc != 0 and not ignore_error:
module.fail_json(msg=err or out, commands=cmds)
return out
def get_current_rules():
user_rules_files = ["/lib/ufw/user.rules",
"/lib/ufw/user6.rules",
"/etc/ufw/user.rules",
"/etc/ufw/user6.rules",
"/var/lib/ufw/user.rules",
"/var/lib/ufw/user6.rules"]
cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
cmd.extend([[f] for f in user_rules_files])
return execute(cmd, ignore_error=True)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
out = execute([[ufw_bin], ["--version"]])
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if (params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
grep_bin = module.get_bin_path('grep', True)
# Save the pre state and rules in order to recognize changes
pre_state = execute([[ufw_bin], ['status verbose']])
pre_rules = get_current_rules()
changed = False
# Execute filter
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = {'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset'}
if value in ['reloaded', 'reset']:
changed = True
if module.check_mode:
# "active" would also match "inactive", hence the space
ufw_enabled = pre_state.find(" active") != -1
if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
changed = True
else:
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
extract = re.search(r'Logging: (on|off) \(([a-z]+)\)', pre_state)
if extract:
current_level = extract.group(2)
current_on_off_value = extract.group(1)
if value != "off":
if value != "on" and (value != current_level or current_on_off_value == "off"):
changed = True
elif current_on_off_value != "off":
changed = True
else:
changed = True
if not module.check_mode:
execute(cmd + [[command], [value]])
elif command == 'default':
if params['direction'] in ['in', 'out', None]:
module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed".')
if module.check_mode:
regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
extract = re.search(regexp, pre_state)
if extract is not None:
current_default_values = {}
current_default_values["incoming"] = extract.group(1)
current_default_values["outgoing"] = extract.group(2)
current_default_values["routed"] = extract.group(3)
if current_default_values[params['direction']] != value:
changed = True
else:
changed = True
else:
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
if params['direction'] in ['outgoing', 'incoming', 'routed']:
module.fail_json(msg='For rules, direction must be one of "in" and "out".')
# Rules are constructed according to the long format
#
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
('to_ip', "to %s"), ('to_port', "port %s"),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, _ = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
rules_dry = execute(cmd)
if module.check_mode:
nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
# ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
changed = True
elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
changed = True
elif pre_rules != rules_dry:
changed = True
# Get the new state
if module.check_mode:
return module.exit_json(changed=changed, commands=cmds)
else:
post_state = execute([[ufw_bin], ['status'], ['verbose']])
if not changed:
post_rules = get_current_rules()
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
|
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete=dict(type='bool', default=False),
route=dict(type='bool', default=False),
insert=dict(type='str'),
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
interface=dict(type='str', aliases=['if']),
log=dict(type='bool', default=False),
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
from_port=dict(type='str'),
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
to_port=dict(type='str', aliases=['port']),
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp']),
app=dict(type='str', aliases=['name']),
comment=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['app', 'proto', 'logging']
],
)
cmds = []
ipv4_regexp = compile_ipv4_regexp()
ipv6_regexp = compile_ipv6_regexp()
def filter_line_that_not_start_with(pattern, content):
return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
def filter_line_that_contains(pattern, content):
return [line for line in content.splitlines(True) if pattern in line]
def filter_line_that_not_contains(pattern, content):
return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
def filter_line_that_match_func(match_func, content):
return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
def filter_line_that_contains_ipv4(content):
return filter_line_that_match_func(ipv4_regexp.search, content)
def filter_line_that_contains_ipv6(content):
return filter_line_that_match_func(ipv6_regexp.search, content)
def is_starting_by_ipv4(ip):
return ipv4_regexp.match(ip) is not None
def is_starting_by_ipv6(ip):
return ipv6_regexp.match(ip) is not None
def execute(cmd, ignore_error=False):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
if rc != 0 and not ignore_error:
module.fail_json(msg=err or out, commands=cmds)
return out
def get_current_rules():
user_rules_files = ["/lib/ufw/user.rules",
"/lib/ufw/user6.rules",
"/etc/ufw/user.rules",
"/etc/ufw/user6.rules",
"/var/lib/ufw/user.rules",
"/var/lib/ufw/user6.rules"]
cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
cmd.extend([[f] for f in user_rules_files])
return execute(cmd, ignore_error=True)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
out = execute([[ufw_bin], ["--version"]])
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if (params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
grep_bin = module.get_bin_path('grep', True)
# Save the pre state and rules in order to recognize changes
pre_state = execute([[ufw_bin], ['status verbose']])
pre_rules = get_current_rules()
changed = False
# Execute filter
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = {'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset'}
if value in ['reloaded', 'reset']:
changed = True
if module.check_mode:
# "active" would also match "inactive", hence the space
ufw_enabled = pre_state.find(" active") != -1
if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
changed = True
else:
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
extract = re.search(r'Logging: (on|off) \(([a-z]+)\)', pre_state)
if extract:
current_level = extract.group(2)
current_on_off_value = extract.group(1)
if value != "off":
if value != "on" and (value != current_level or current_on_off_value == "off"):
changed = True
elif current_on_off_value != "off":
changed = True
else:
changed = True
if not module.check_mode:
execute(cmd + [[command], [value]])
elif command == 'default':
if params['direction'] in ['in', 'out', None]:
module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed".')
if module.check_mode:
regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
extract = re.search(regexp, pre_state)
if extract is not None:
current_default_values = {}
current_default_values["incoming"] = extract.group(1)
current_default_values["outgoing"] = extract.group(2)
current_default_values["routed"] = extract.group(3)
if current_default_values[params['direction']] != value:
changed = True
else:
changed = True
else:
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
if params['direction'] not in ['in', 'out']:
module.fail_json(msg='For rules, direction must be one of "in" and "out".')
# Rules are constructed according to the long format
#
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
('to_ip', "to %s"), ('to_port', "port %s"),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, _ = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
rules_dry = execute(cmd)
if module.check_mode:
nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
# ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
changed = True
elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
changed = True
elif pre_rules != rules_dry:
changed = True
# Get the new state
if module.check_mode:
return module.exit_json(changed=changed, commands=cmds)
else:
post_state = execute([[ufw_bin], ['status'], ['verbose']])
if not changed:
post_rules = get_current_rules()
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
|
43,007 |
def get_api_section_safely(loaded_config, filepath):
"""Gets the API section from the loaded configuration.
Args:
loaded_config (dict): the configuration that was loaded from the toml
file
filepath (str): path to the configuration file
Returns:
dict[str, Union[str, bool, int]]: the api section of the configuration
Raises:
ConfigurationError: if the api section was not defined in the
configuration
"""
try:
return loaded_config["api"]
except KeyError:
log = create_logger(__name__)
log.error(
"The configuration from the %s file does not 'contain an \"api\" section.'", filepath
)
raise ConfigurationError()
|
def get_api_section_safely(loaded_config, filepath):
"""Gets the API section from the loaded configuration.
Args:
loaded_config (dict): the configuration that was loaded from the TOML config
file
filepath (str): path to the configuration file
Returns:
dict[str, Union[str, bool, int]]: the api section of the configuration
Raises:
ConfigurationError: if the api section was not defined in the
configuration
"""
try:
return loaded_config["api"]
except KeyError:
log = create_logger(__name__)
log.error(
"The configuration from the %s file does not 'contain an \"api\" section.'", filepath
)
raise ConfigurationError()
|
20,402 |
def tools_migrations_list(pending=False, done=False):
"""
List existing migrations
"""
# Check for option conflict
if pending and done:
raise YunohostError("migrations_list_conflict_pending_done")
# Get all migrations
migrations = _get_migrations_list()
# Reduce to dictionnaries
migrations = [{"id": migration.id,
"number": migration.number,
"name": migration.name,
"mode": migration.mode,
"state": migration.state,
"description": migration.description,
"disclaimer": migration.disclaimer} for migration in migrations]
# If asked, filter pending or done migrations
if pending or done:
if done:
migrations = [m for m in migrations if m["state"] != "pending"]
if pending:
migrations = [m for m in migrations if m["state"] == "pending"]
return {"migrations": migrations}
|
def tools_migrations_list(pending=False, done=False):
"""
List existing migrations
"""
# Check for option conflict
if pending and done:
raise YunohostError("migrations_list_conflict_pending_done")
# Get all migrations
migrations = _get_migrations_list()
# Reduce to dictionnaries
migrations = [{"id": migration.id,
"number": migration.number,
"name": migration.name,
"mode": migration.mode,
"state": migration.state,
"description": migration.description,
"disclaimer": migration.disclaimer} for migration in migrations]
# If asked, filter pending or done migrations
if pending or done:
if done:
migrations = [m for m in migrations if m["state"] != "done"]
if pending:
migrations = [m for m in migrations if m["state"] == "pending"]
return {"migrations": migrations}
|
39,359 |
def process_opacity(mesh, opacity, preference, n_colors, scalars, use_transparency):
"""Process opacity.
This function accepts an opacity string or array and always
returns an array that can be applied to a dataset for plotting.
Parameters
----------
mesh : pyvista.DataSet
Dataset to process the opacity for.
opacity : str, numpy.ndarray
String or array. If string, must be a cell or point data array.
preference : str, optional
When ``mesh.n_points == mesh.n_cells``, this parameter
sets how the scalars will be mapped to the mesh. Default
``'points'``, causes the scalars will be associated with
the mesh points. Can be either ``'points'`` or
``'cells'``.
n_colors : int, optional
Number of colors to use when displaying the opacity.
scalars : numpy.ndarray, optional
Dataset scalars.
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond
to transparency.
Returns
-------
_custom_opac : bool
If using custom opacity.
opacity : numpy.ndarray
Array containing the opacity.
"""
_custom_opac = False
if isinstance(opacity, str):
try:
# Get array from mesh
opacity = get_array(mesh, opacity,
preference=preference, err=True)
if np.any(opacity > 1):
warnings.warn("Opacity scalars contain values over 1")
if np.any(opacity < 0):
warnings.warn("Opacity scalars contain values less than 0")
_custom_opac = True
except:
# Or get opacity transfer function
opacity = opacity_transfer_function(opacity, n_colors)
else:
if scalars.shape[0] != opacity.shape[0]:
raise ValueError(
"Opacity array and scalars array must have the same number "
"of elements."
)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
_custom_opac = True
else:
opacity = opacity_transfer_function(opacity, n_colors)
if use_transparency and np.max(opacity) <= 1.0:
opacity = 1 - opacity
elif use_transparency and isinstance(opacity, np.ndarray):
opacity = 255 - opacity
return _custom_opac, opacity
|
def process_opacity(mesh, opacity, preference, n_colors, scalars, use_transparency):
"""Process opacity.
This function accepts an opacity string or array and always
returns an array that can be applied to a dataset for plotting.
Parameters
----------
mesh : pyvista.DataSet
Dataset to process the opacity for.
opacity : str, numpy.ndarray
String or array. If string, must be a cell or point data array.
preference : str, optional
When ``mesh.n_points == mesh.n_cells``, this parameter
sets how the scalars will be mapped to the mesh. Default
``'points'``, causes the scalars will be associated with
the mesh points. Can be either ``'points'`` or
``'cells'``.
n_colors : int, optional
Number of colors to use when displaying the opacity.
scalars : numpy.ndarray, optional
Dataset scalars.
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond
to transparency.
Returns
-------
_custom_opac : bool
If using custom opacity.
opacity : numpy.ndarray
Array containing the opacity.
"""
_custom_opac = False
if isinstance(opacity, str):
try:
# Get array from mesh
opacity = get_array(mesh, opacity,
preference=preference, err=True)
if np.any(opacity > 1):
warnings.warn("Opacity scalars contain values over 1")
if np.any(opacity < 0):
warnings.warn("Opacity scalars contain values less than 0")
_custom_opac = True
except:
# Or get opacity transfer function
opacity = opacity_transfer_function(opacity, n_colors)
else:
if scalars.shape[0] != opacity.shape[0]:
raise ValueError(
"Opacity array and scalars array must have the same number "
"of elements."
)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
_custom_opac = True
else:
opacity = opacity_transfer_function(opacity, n_colors)
if use_transparency and np.max(opacity) <= 1.0:
opacity = 1 - opacity
elif use_transparency and isinstance(opacity, np.ndarray):
opacity = 255 - opacity
return _custom_opac, opacity
|
24,313 |
def update_link_metadata(checks, core_workflow=True):
root = get_root()
ensure_dir_exists(path_join(root, LINK_DIR))
# Sign only what affects each wheel
products = []
for check in checks:
products.append(path_join(check, 'datadog_checks'))
products.append(path_join(check, 'setup.py'))
if core_workflow:
key_id = get_key_id(GPG_COMMAND)
# Find this latest signed link metadata file on disk.
# NOTE: in-toto currently uses the first 8 characters of the signing keyId.
key_id_prefix = key_id[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'gpg_keyid': key_id}
else:
signing_key_path = os.getenv('IN_TOTO_SIGNING_KEY_PATH', '')
signing_key = util.import_rsa_key_from_file(signing_key_path, os.getenv('IN_TOTO_SIGNING_KEY_PASSWORD'))
# NOTE: in-toto currently uses the first 8 characters of the signing keyId.
key_id_prefix = signing_key_path[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'signing_key': signing_key}
# Final location of metadata file.
metadata_file = path_join(LINK_DIR, tag_link)
with chdir(root):
# We should ignore products untracked and ignored by git.
run_in_toto(products, **options)
# Check whether each signed product is being tracked AND ignored by git.
# NOTE: We have to check now *AFTER* signing the tag link file, so that
# we can check against the actual complete list of products.
with open(tag_link) as tag_json:
tag = json.load(tag_json)
products = tag['signed']['products']
for product in products:
# If NOT tracked...
if not tracked_by_git(product):
# First, delete the tag link off disk so as not to pollute.
os.remove(tag_link)
# AND NOT ignored, then it most likely means the developer
# forgot to add the file to git.
if not ignored_by_git(product):
raise NeitherTrackedNorIgnoredFileException(product)
# AND ignored, then it most likely means that incorrectly
# recorded with in-toto files ignored by git.
else:
raise UntrackedButIgnoredFileException(product)
# Move it to the expected location.
shutil.move(tag_link, metadata_file)
return (metadata_file,)
|
def update_link_metadata(checks, core_workflow=True):
root = get_root()
ensure_dir_exists(path_join(root, LINK_DIR))
# Sign only what affects each wheel
products = []
for check in checks:
products.append(path_join(check, 'datadog_checks'))
products.append(path_join(check, 'setup.py'))
if core_workflow:
key_id = get_key_id(GPG_COMMAND)
# Find this latest signed link metadata file on disk.
# NOTE: in-toto currently uses the first 8 characters of the signing keyId.
key_id_prefix = key_id[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'gpg_keyid': key_id}
else:
signing_key_path = os.getenv('IN_TOTO_SIGNING_KEY_PATH', '')
signing_key = util.import_rsa_key_from_file(signing_key_path, os.getenv('IN_TOTO_SIGNING_KEY_PASSWORD'))
# NOTE: in-toto currently uses the first 8 characters of the signing keyID, the latter of which we assume is the key filename.
key_id_prefix = signing_key_path[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'signing_key': signing_key}
# Final location of metadata file.
metadata_file = path_join(LINK_DIR, tag_link)
with chdir(root):
# We should ignore products untracked and ignored by git.
run_in_toto(products, **options)
# Check whether each signed product is being tracked AND ignored by git.
# NOTE: We have to check now *AFTER* signing the tag link file, so that
# we can check against the actual complete list of products.
with open(tag_link) as tag_json:
tag = json.load(tag_json)
products = tag['signed']['products']
for product in products:
# If NOT tracked...
if not tracked_by_git(product):
# First, delete the tag link off disk so as not to pollute.
os.remove(tag_link)
# AND NOT ignored, then it most likely means the developer
# forgot to add the file to git.
if not ignored_by_git(product):
raise NeitherTrackedNorIgnoredFileException(product)
# AND ignored, then it most likely means that incorrectly
# recorded with in-toto files ignored by git.
else:
raise UntrackedButIgnoredFileException(product)
# Move it to the expected location.
shutil.move(tag_link, metadata_file)
return (metadata_file,)
|
23,077 |
def order(dsk, dependencies=None):
"""Order nodes in dask graph
This produces an ordering over our tasks that we use to break ties when
executing. We do this ahead of time to reduce a bit of stress on the
scheduler and also to assist in static analysis.
This currently traverses the graph as a single-threaded scheduler would
traverse it. It breaks ties in the following ways:
1. Begin at a leaf node that is a dependency of a root node that has the
largest subgraph (start hard things first)
2. Prefer tall branches with few dependents (start hard things first and
try to avoid memory usage)
3. Prefer dependents that are dependencies of root nodes that have
the smallest subgraph (do small goals that can terminate quickly)
Examples
--------
>>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')}
>>> order(dsk)
{'a': 0, 'c': 1, 'b': 2, 'd': 3}
"""
if not dsk:
return {}
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
dependents = reverse_dict(dependencies)
num_needed, total_dependencies = ndependencies(dependencies, dependents)
metrics = graph_metrics(dependencies, dependents, total_dependencies)
if len(metrics) != len(dsk):
cycle = getcycle(dsk, None)
raise RuntimeError(
"Cycle detected between the following keys:\n -> %s"
% "\n -> ".join(str(x) for x in cycle)
)
# Single root nodes that depend on everything. These cause issues for
# the current ordering algorithm, since we often hit the root node
# and fell back to the key tie-breaker to choose which immediate dependency
# to finish next, rather than finishing off subtrees.
# So under the special case of a single root node that depends on the entire
# tree, we skip processing it normally.
# See https://github.com/dask/dask/issues/6745
root_nodes = {k for k in metrics if metrics[k][1] == total_dependencies[k]}
skip_root_node = len(root_nodes) == 1 and len(dsk) > 1
# Leaf nodes. We choose one--the initial node--for each weakly connected subgraph.
# Let's calculate the `initial_stack_key` as we determine `init_stack` set.
init_stack = {
# First prioritize large, tall groups, then prioritize the same as ``dependents_key``.
key: (
# at a high-level, work towards a large goal (and prefer tall and narrow)
-max_dependencies,
num_dependents - max_heights,
# tactically, finish small connected jobs first
min_dependencies,
num_dependents - min_heights, # prefer tall and narrow
-total_dependents, # take a big step
# try to be memory efficient
num_dependents,
# tie-breaker
StrComparable(key),
)
for key, num_dependents, (
total_dependents,
min_dependencies,
max_dependencies,
min_heights,
max_heights,
) in (
(key, len(dependents[key]), metrics[key])
for key, val in dependencies.items()
if not val
)
}
# `initial_stack_key` chooses which task to run at the very beginning.
# This value is static, so we pre-compute as the value of this dict.
initial_stack_key = init_stack.__getitem__
def dependents_key(x):
"""Choose a path from our starting task to our tactical goal
This path is connected to a large goal, but focuses on completing
a small goal and being memory efficient.
"""
return (
# Focus on being memory-efficient
len(dependents[x]) - len(dependencies[x]) + num_needed[x],
-metrics[x][3], # min_heights
# tie-breaker
StrComparable(x),
)
def dependencies_key(x):
"""Choose which dependency to run as part of a reverse DFS
This is very similar to both ``initial_stack_key``.
"""
num_dependents = len(dependents[x])
(
total_dependents,
min_dependencies,
max_dependencies,
min_heights,
max_heights,
) = metrics[x]
# Prefer short and narrow instead of tall in narrow, because we're going in
# reverse along dependencies.
return (
# at a high-level, work towards a large goal (and prefer short and narrow)
-max_dependencies,
num_dependents + max_heights,
# tactically, finish small connected jobs first
min_dependencies,
num_dependents + min_heights, # prefer short and narrow
-total_dependencies[x], # go where the work is
# try to be memory efficient
num_dependents - len(dependencies[x]) + num_needed[x],
num_dependents,
total_dependents, # already found work, so don't add more
# tie-breaker
StrComparable(x),
)
def finish_now_key(x):
""" Determine the order of dependents that are ready to run and be released"""
return (-len(dependencies[x]), StrComparable(x))
# Computing this for all keys can sometimes be relatively expensive :(
partition_keys = {
key: (
(min_dependencies - total_dependencies[key] + 1)
* (total_dependents - min_heights)
)
for key, (
total_dependents,
min_dependencies,
_,
min_heights,
_,
) in metrics.items()
}
result = {}
i = 0
# `inner_stask` is used to perform a DFS along dependencies. Once emptied
# (when traversing dependencies), this continue down a path along dependents
# until a root node is reached.
#
# Sometimes, a better path along a dependent is discovered (i.e., something
# that is easier to compute and doesn't requiring holding too much in memory).
# In this case, the current `inner_stack` is appended to `inner_stacks` and
# we begin a new DFS from the better node.
#
# A "better path" is determined by comparing `partition_keys`.
inner_stacks = [[min(init_stack, key=initial_stack_key)]]
inner_stacks_append = inner_stacks.append
inner_stacks_extend = inner_stacks.extend
inner_stacks_pop = inner_stacks.pop
# Okay, now we get to the data structures used for fancy behavior.
#
# As we traverse nodes in the DFS along dependencies, we partition the dependents
# via `partition_key`. A dependent goes to:
# 1) `inner_stack` if it's better than our current target,
# 2) `next_nodes` if the partition key is lower than it's parent,
# 3) `later_nodes` otherwise.
# When the inner stacks are depleted, we process `next_nodes`. If `next_nodes` is
# empty (and `outer_stacks` is empty`), then we process `later_nodes` the same way.
# These dicts use `partition_keys` as keys. We process them by placing the values
# in `outer_stack` so that the smallest keys will be processed first.
next_nodes = defaultdict(list)
later_nodes = defaultdict(list)
# `outer_stack` is used to populate `inner_stacks`. From the time we partition the
# dependents of a node, we group them: one list per partition key per parent node.
# This likely results in many small lists. We do this to avoid sorting many larger
# lists (i.e., to avoid n*log(n) behavior). So, we have many small lists that we
# partitioned, and we keep them in the order that we saw them (we will process them
# in a FIFO manner). By delaying sorting for as long as we can, we can first filter
# out nodes that have already been computed. All this complexity is worth it!
outer_stack = []
outer_stack_extend = outer_stack.extend
outer_stack_pop = outer_stack.pop
# Keep track of nodes that are in `inner_stack` or `inner_stacks` so we don't
# process them again.
seen = set() # seen in an inner_stack (and has dependencies)
seen_update = seen.update
seen_add = seen.add
# alias for speed
set_difference = set.difference
is_init_sorted = False
while True:
while inner_stacks:
inner_stack = inner_stacks_pop()
inner_stack_pop = inner_stack.pop
while inner_stack:
# Perform a DFS along dependencies until we complete our tactical goal
item = inner_stack_pop()
if item in result:
continue
if skip_root_node and item in root_nodes:
continue
if num_needed[item]:
inner_stack.append(item)
deps = set_difference(dependencies[item], result)
if 1 < len(deps) < 1000:
inner_stack.extend(
sorted(deps, key=dependencies_key, reverse=True)
)
else:
inner_stack.extend(deps)
seen_update(deps)
continue
result[item] = i
i += 1
deps = dependents[item]
# If inner_stack is empty, then we typically add the best dependent to it.
# However, we don't add to it if we complete a node early via "finish_now" below
# or if a dependent is already on an inner_stack. In this case, we add the
# dependents (not in an inner_stack) to next_nodes or later_nodes to handle later.
# This serves three purposes:
# 1. shrink `deps` so that it can be processed faster,
# 2. make sure we don't process the same dependency repeatedly, and
# 3. make sure we don't accidentally continue down an expensive-to-compute path.
add_to_inner_stack = True
if metrics[item][3] == 1: # min_height
# Don't leave any dangling single nodes! Finish all dependents that are
# ready and are also root nodes.
finish_now = {
dep
for dep in deps
if not dependents[dep] and num_needed[dep] == 1
}
if finish_now:
deps -= finish_now # Safe to mutate
if len(finish_now) > 1:
finish_now = sorted(finish_now, key=finish_now_key)
for dep in finish_now:
result[dep] = i
i += 1
add_to_inner_stack = False
if deps:
for dep in deps:
num_needed[dep] -= 1
already_seen = deps & seen
if already_seen:
if len(deps) == len(already_seen):
continue
add_to_inner_stack = False
deps -= already_seen
if len(deps) == 1:
# Fast path! We trim down `deps` above hoping to reach here.
(dep,) = deps
if not inner_stack:
if add_to_inner_stack:
inner_stack = [dep]
inner_stack_pop = inner_stack.pop
seen_add(dep)
continue
key = partition_keys[dep]
else:
key = partition_keys[dep]
if key < partition_keys[inner_stack[0]]:
# Run before `inner_stack` (change tactical goal!)
inner_stacks_append(inner_stack)
inner_stack = [dep]
inner_stack_pop = inner_stack.pop
seen_add(dep)
continue
if key < partition_keys[item]:
next_nodes[key].append(deps)
else:
later_nodes[key].append(deps)
else:
# Slow path :(. This requires grouping by partition_key.
dep_pools = defaultdict(list)
for dep in deps:
dep_pools[partition_keys[dep]].append(dep)
item_key = partition_keys[item]
if inner_stack:
# If we have an inner_stack, we need to look for a "better" path
prev_key = partition_keys[inner_stack[0]]
now_keys = [] # < inner_stack[0]
for key, vals in dep_pools.items():
if key < prev_key:
now_keys.append(key)
elif key < item_key:
next_nodes[key].append(vals)
else:
later_nodes[key].append(vals)
if now_keys:
# Run before `inner_stack` (change tactical goal!)
inner_stacks_append(inner_stack)
if 1 < len(now_keys):
now_keys.sort(reverse=True)
for key in now_keys:
pool = dep_pools[key]
if 1 < len(pool) < 100:
pool.sort(key=dependents_key, reverse=True)
inner_stacks_extend([dep] for dep in pool)
seen_update(pool)
inner_stack = inner_stacks_pop()
inner_stack_pop = inner_stack.pop
else:
# If we don't have an inner_stack, then we don't need to look
# for a "better" path, but we do need traverse along dependents.
if add_to_inner_stack:
min_key = min(dep_pools)
min_pool = dep_pools.pop(min_key)
if len(min_pool) == 1:
inner_stack = min_pool
seen_update(inner_stack)
elif (
10 * item_key
> 11 * len(min_pool) * len(min_pool) * min_key
):
# Put all items in min_pool onto inner_stacks.
# I know this is a weird comparison. Hear me out.
# Although it is often beneficial to put all of the items in `min_pool`
# onto `inner_stacks` to process next, it is very easy to be overzealous.
# Sometimes it is actually better to defer until `next_nodes` is handled.
# We should only put items onto `inner_stacks` that we're reasonably
# confident about. The above formula is a best effort heuristic given
# what we have easily available. It is obviously very specific to our
# choice of partition_key. Dask tests take this route about 40%.
if len(min_pool) < 100:
min_pool.sort(key=dependents_key, reverse=True)
inner_stacks_extend([dep] for dep in min_pool)
inner_stack = inner_stacks_pop()
seen_update(min_pool)
else:
# Put one item in min_pool onto inner_stack and the rest into next_nodes.
if len(min_pool) < 100:
inner_stack = [
min(min_pool, key=dependents_key)
]
else:
inner_stack = [min_pool.pop()]
next_nodes[min_key].append(min_pool)
seen_update(inner_stack)
inner_stack_pop = inner_stack.pop
for key, vals in dep_pools.items():
if key < item_key:
next_nodes[key].append(vals)
else:
later_nodes[key].append(vals)
if len(dependencies) == len(result):
break # all done!
if next_nodes:
for key in sorted(next_nodes, reverse=True):
# `outer_stacks` may not be empty here--it has data from previous `next_nodes`.
# Since we pop things off of it (onto `inner_nodes`), this means we handle
# multiple `next_nodes` in a LIFO manner.
outer_stack_extend(reversed(next_nodes[key]))
next_nodes = defaultdict(list)
while outer_stack:
# Try to add a few items to `inner_stacks`
deps = [x for x in outer_stack_pop() if x not in result]
if deps:
if 1 < len(deps) < 100:
deps.sort(key=dependents_key, reverse=True)
inner_stacks_extend([dep] for dep in deps)
seen_update(deps)
break
if inner_stacks:
continue
if later_nodes:
# You know all those dependents with large keys we've been hanging onto to run "later"?
# Well, "later" has finally come.
next_nodes, later_nodes = later_nodes, next_nodes
continue
# We just finished computing a connected group.
# Let's choose the first `item` in the next group to compute.
# If we have few large groups left, then it's best to find `item` by taking a minimum.
# If we have many small groups left, then it's best to sort.
# If we have many tiny groups left, then it's best to simply iterate.
if not is_init_sorted:
prev_len = len(init_stack)
if type(init_stack) is dict:
init_stack = set(init_stack)
init_stack = set_difference(init_stack, result)
N = len(init_stack)
m = prev_len - N
# is `min` likely better than `sort`?
if m >= N or N + (N - m) * log(N - m) < N * log(N):
item = min(init_stack, key=initial_stack_key)
continue
if len(init_stack) < 10000:
init_stack = sorted(init_stack, key=initial_stack_key, reverse=True)
else:
init_stack = list(init_stack)
init_stack_pop = init_stack.pop
is_init_sorted = True
while item in result:
item = init_stack_pop()
inner_stacks_append([item])
return result
|
def order(dsk, dependencies=None):
"""Order nodes in dask graph
This produces an ordering over our tasks that we use to break ties when
executing. We do this ahead of time to reduce a bit of stress on the
scheduler and also to assist in static analysis.
This currently traverses the graph as a single-threaded scheduler would
traverse it. It breaks ties in the following ways:
1. Begin at a leaf node that is a dependency of a root node that has the
largest subgraph (start hard things first)
2. Prefer tall branches with few dependents (start hard things first and
try to avoid memory usage)
3. Prefer dependents that are dependencies of root nodes that have
the smallest subgraph (do small goals that can terminate quickly)
Examples
--------
>>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')}
>>> order(dsk)
{'a': 0, 'c': 1, 'b': 2, 'd': 3}
"""
if not dsk:
return {}
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
dependents = reverse_dict(dependencies)
num_needed, total_dependencies = ndependencies(dependencies, dependents)
metrics = graph_metrics(dependencies, dependents, total_dependencies)
if len(metrics) != len(dsk):
cycle = getcycle(dsk, None)
raise RuntimeError(
"Cycle detected between the following keys:\n -> %s"
% "\n -> ".join(str(x) for x in cycle)
)
# Single root nodes that depend on everything. These cause issues for
# the current ordering algorithm, since we often hit the root node
# and fell back to the key tie-breaker to choose which immediate dependency
# to finish next, rather than finishing off subtrees.
# So under the special case of a single root node that depends on the entire
# tree, we skip processing it normally.
# See https://github.com/dask/dask/issues/6745
root_nodes = {k for k, v in dependents.items() if not v}
skip_root_node = len(root_nodes) == 1 and len(dsk) > 1
# Leaf nodes. We choose one--the initial node--for each weakly connected subgraph.
# Let's calculate the `initial_stack_key` as we determine `init_stack` set.
init_stack = {
# First prioritize large, tall groups, then prioritize the same as ``dependents_key``.
key: (
# at a high-level, work towards a large goal (and prefer tall and narrow)
-max_dependencies,
num_dependents - max_heights,
# tactically, finish small connected jobs first
min_dependencies,
num_dependents - min_heights, # prefer tall and narrow
-total_dependents, # take a big step
# try to be memory efficient
num_dependents,
# tie-breaker
StrComparable(key),
)
for key, num_dependents, (
total_dependents,
min_dependencies,
max_dependencies,
min_heights,
max_heights,
) in (
(key, len(dependents[key]), metrics[key])
for key, val in dependencies.items()
if not val
)
}
# `initial_stack_key` chooses which task to run at the very beginning.
# This value is static, so we pre-compute as the value of this dict.
initial_stack_key = init_stack.__getitem__
def dependents_key(x):
"""Choose a path from our starting task to our tactical goal
This path is connected to a large goal, but focuses on completing
a small goal and being memory efficient.
"""
return (
# Focus on being memory-efficient
len(dependents[x]) - len(dependencies[x]) + num_needed[x],
-metrics[x][3], # min_heights
# tie-breaker
StrComparable(x),
)
def dependencies_key(x):
"""Choose which dependency to run as part of a reverse DFS
This is very similar to both ``initial_stack_key``.
"""
num_dependents = len(dependents[x])
(
total_dependents,
min_dependencies,
max_dependencies,
min_heights,
max_heights,
) = metrics[x]
# Prefer short and narrow instead of tall in narrow, because we're going in
# reverse along dependencies.
return (
# at a high-level, work towards a large goal (and prefer short and narrow)
-max_dependencies,
num_dependents + max_heights,
# tactically, finish small connected jobs first
min_dependencies,
num_dependents + min_heights, # prefer short and narrow
-total_dependencies[x], # go where the work is
# try to be memory efficient
num_dependents - len(dependencies[x]) + num_needed[x],
num_dependents,
total_dependents, # already found work, so don't add more
# tie-breaker
StrComparable(x),
)
def finish_now_key(x):
""" Determine the order of dependents that are ready to run and be released"""
return (-len(dependencies[x]), StrComparable(x))
# Computing this for all keys can sometimes be relatively expensive :(
partition_keys = {
key: (
(min_dependencies - total_dependencies[key] + 1)
* (total_dependents - min_heights)
)
for key, (
total_dependents,
min_dependencies,
_,
min_heights,
_,
) in metrics.items()
}
result = {}
i = 0
# `inner_stask` is used to perform a DFS along dependencies. Once emptied
# (when traversing dependencies), this continue down a path along dependents
# until a root node is reached.
#
# Sometimes, a better path along a dependent is discovered (i.e., something
# that is easier to compute and doesn't requiring holding too much in memory).
# In this case, the current `inner_stack` is appended to `inner_stacks` and
# we begin a new DFS from the better node.
#
# A "better path" is determined by comparing `partition_keys`.
inner_stacks = [[min(init_stack, key=initial_stack_key)]]
inner_stacks_append = inner_stacks.append
inner_stacks_extend = inner_stacks.extend
inner_stacks_pop = inner_stacks.pop
# Okay, now we get to the data structures used for fancy behavior.
#
# As we traverse nodes in the DFS along dependencies, we partition the dependents
# via `partition_key`. A dependent goes to:
# 1) `inner_stack` if it's better than our current target,
# 2) `next_nodes` if the partition key is lower than it's parent,
# 3) `later_nodes` otherwise.
# When the inner stacks are depleted, we process `next_nodes`. If `next_nodes` is
# empty (and `outer_stacks` is empty`), then we process `later_nodes` the same way.
# These dicts use `partition_keys` as keys. We process them by placing the values
# in `outer_stack` so that the smallest keys will be processed first.
next_nodes = defaultdict(list)
later_nodes = defaultdict(list)
# `outer_stack` is used to populate `inner_stacks`. From the time we partition the
# dependents of a node, we group them: one list per partition key per parent node.
# This likely results in many small lists. We do this to avoid sorting many larger
# lists (i.e., to avoid n*log(n) behavior). So, we have many small lists that we
# partitioned, and we keep them in the order that we saw them (we will process them
# in a FIFO manner). By delaying sorting for as long as we can, we can first filter
# out nodes that have already been computed. All this complexity is worth it!
outer_stack = []
outer_stack_extend = outer_stack.extend
outer_stack_pop = outer_stack.pop
# Keep track of nodes that are in `inner_stack` or `inner_stacks` so we don't
# process them again.
seen = set() # seen in an inner_stack (and has dependencies)
seen_update = seen.update
seen_add = seen.add
# alias for speed
set_difference = set.difference
is_init_sorted = False
while True:
while inner_stacks:
inner_stack = inner_stacks_pop()
inner_stack_pop = inner_stack.pop
while inner_stack:
# Perform a DFS along dependencies until we complete our tactical goal
item = inner_stack_pop()
if item in result:
continue
if skip_root_node and item in root_nodes:
continue
if num_needed[item]:
inner_stack.append(item)
deps = set_difference(dependencies[item], result)
if 1 < len(deps) < 1000:
inner_stack.extend(
sorted(deps, key=dependencies_key, reverse=True)
)
else:
inner_stack.extend(deps)
seen_update(deps)
continue
result[item] = i
i += 1
deps = dependents[item]
# If inner_stack is empty, then we typically add the best dependent to it.
# However, we don't add to it if we complete a node early via "finish_now" below
# or if a dependent is already on an inner_stack. In this case, we add the
# dependents (not in an inner_stack) to next_nodes or later_nodes to handle later.
# This serves three purposes:
# 1. shrink `deps` so that it can be processed faster,
# 2. make sure we don't process the same dependency repeatedly, and
# 3. make sure we don't accidentally continue down an expensive-to-compute path.
add_to_inner_stack = True
if metrics[item][3] == 1: # min_height
# Don't leave any dangling single nodes! Finish all dependents that are
# ready and are also root nodes.
finish_now = {
dep
for dep in deps
if not dependents[dep] and num_needed[dep] == 1
}
if finish_now:
deps -= finish_now # Safe to mutate
if len(finish_now) > 1:
finish_now = sorted(finish_now, key=finish_now_key)
for dep in finish_now:
result[dep] = i
i += 1
add_to_inner_stack = False
if deps:
for dep in deps:
num_needed[dep] -= 1
already_seen = deps & seen
if already_seen:
if len(deps) == len(already_seen):
continue
add_to_inner_stack = False
deps -= already_seen
if len(deps) == 1:
# Fast path! We trim down `deps` above hoping to reach here.
(dep,) = deps
if not inner_stack:
if add_to_inner_stack:
inner_stack = [dep]
inner_stack_pop = inner_stack.pop
seen_add(dep)
continue
key = partition_keys[dep]
else:
key = partition_keys[dep]
if key < partition_keys[inner_stack[0]]:
# Run before `inner_stack` (change tactical goal!)
inner_stacks_append(inner_stack)
inner_stack = [dep]
inner_stack_pop = inner_stack.pop
seen_add(dep)
continue
if key < partition_keys[item]:
next_nodes[key].append(deps)
else:
later_nodes[key].append(deps)
else:
# Slow path :(. This requires grouping by partition_key.
dep_pools = defaultdict(list)
for dep in deps:
dep_pools[partition_keys[dep]].append(dep)
item_key = partition_keys[item]
if inner_stack:
# If we have an inner_stack, we need to look for a "better" path
prev_key = partition_keys[inner_stack[0]]
now_keys = [] # < inner_stack[0]
for key, vals in dep_pools.items():
if key < prev_key:
now_keys.append(key)
elif key < item_key:
next_nodes[key].append(vals)
else:
later_nodes[key].append(vals)
if now_keys:
# Run before `inner_stack` (change tactical goal!)
inner_stacks_append(inner_stack)
if 1 < len(now_keys):
now_keys.sort(reverse=True)
for key in now_keys:
pool = dep_pools[key]
if 1 < len(pool) < 100:
pool.sort(key=dependents_key, reverse=True)
inner_stacks_extend([dep] for dep in pool)
seen_update(pool)
inner_stack = inner_stacks_pop()
inner_stack_pop = inner_stack.pop
else:
# If we don't have an inner_stack, then we don't need to look
# for a "better" path, but we do need traverse along dependents.
if add_to_inner_stack:
min_key = min(dep_pools)
min_pool = dep_pools.pop(min_key)
if len(min_pool) == 1:
inner_stack = min_pool
seen_update(inner_stack)
elif (
10 * item_key
> 11 * len(min_pool) * len(min_pool) * min_key
):
# Put all items in min_pool onto inner_stacks.
# I know this is a weird comparison. Hear me out.
# Although it is often beneficial to put all of the items in `min_pool`
# onto `inner_stacks` to process next, it is very easy to be overzealous.
# Sometimes it is actually better to defer until `next_nodes` is handled.
# We should only put items onto `inner_stacks` that we're reasonably
# confident about. The above formula is a best effort heuristic given
# what we have easily available. It is obviously very specific to our
# choice of partition_key. Dask tests take this route about 40%.
if len(min_pool) < 100:
min_pool.sort(key=dependents_key, reverse=True)
inner_stacks_extend([dep] for dep in min_pool)
inner_stack = inner_stacks_pop()
seen_update(min_pool)
else:
# Put one item in min_pool onto inner_stack and the rest into next_nodes.
if len(min_pool) < 100:
inner_stack = [
min(min_pool, key=dependents_key)
]
else:
inner_stack = [min_pool.pop()]
next_nodes[min_key].append(min_pool)
seen_update(inner_stack)
inner_stack_pop = inner_stack.pop
for key, vals in dep_pools.items():
if key < item_key:
next_nodes[key].append(vals)
else:
later_nodes[key].append(vals)
if len(dependencies) == len(result):
break # all done!
if next_nodes:
for key in sorted(next_nodes, reverse=True):
# `outer_stacks` may not be empty here--it has data from previous `next_nodes`.
# Since we pop things off of it (onto `inner_nodes`), this means we handle
# multiple `next_nodes` in a LIFO manner.
outer_stack_extend(reversed(next_nodes[key]))
next_nodes = defaultdict(list)
while outer_stack:
# Try to add a few items to `inner_stacks`
deps = [x for x in outer_stack_pop() if x not in result]
if deps:
if 1 < len(deps) < 100:
deps.sort(key=dependents_key, reverse=True)
inner_stacks_extend([dep] for dep in deps)
seen_update(deps)
break
if inner_stacks:
continue
if later_nodes:
# You know all those dependents with large keys we've been hanging onto to run "later"?
# Well, "later" has finally come.
next_nodes, later_nodes = later_nodes, next_nodes
continue
# We just finished computing a connected group.
# Let's choose the first `item` in the next group to compute.
# If we have few large groups left, then it's best to find `item` by taking a minimum.
# If we have many small groups left, then it's best to sort.
# If we have many tiny groups left, then it's best to simply iterate.
if not is_init_sorted:
prev_len = len(init_stack)
if type(init_stack) is dict:
init_stack = set(init_stack)
init_stack = set_difference(init_stack, result)
N = len(init_stack)
m = prev_len - N
# is `min` likely better than `sort`?
if m >= N or N + (N - m) * log(N - m) < N * log(N):
item = min(init_stack, key=initial_stack_key)
continue
if len(init_stack) < 10000:
init_stack = sorted(init_stack, key=initial_stack_key, reverse=True)
else:
init_stack = list(init_stack)
init_stack_pop = init_stack.pop
is_init_sorted = True
while item in result:
item = init_stack_pop()
inner_stacks_append([item])
return result
|
23,059 |
def test_map_overlap_multiarray_block_broadcast():
def func(x, y):
# Return result with expected padding
z = x.size + y.size
return np.ones((3, 3)) * z
# Chunks in trailing dimension will be unified to two chunks of size 6
# and block broadcast will allow chunks from x to repeat
x = da.ones((12,), chunks=12) # numblocks = (1,) -> (2, 2) after broadcast
y = da.ones((16, 12), chunks=(8, 6)) # numblocks = (2, 2)
z = da.map_overlap(func, x, y, chunks=(3, 3), depth=1, trim=True)
assert_eq(z.compute().shape, z.shape)
assert_eq(z.shape, (2, 2))
# func call will receive (8,) and (10, 8) arrays for each of 4 blocks
assert_eq(z.sum().compute(), 4 * (10 * 8 + 8))
|
def test_map_overlap_multiarray_block_broadcast():
def func(x, y):
# Return result with expected padding
z = x.size + y.size
return np.ones((3, 3)) * z
# Chunks in trailing dimension will be unified to two chunks of size 6
# and block broadcast will allow chunks from x to repeat
x = da.ones((12,), chunks=12) # numblocks = (1,) -> (2, 2) after broadcast
y = da.ones((16, 12), chunks=(8, 6)) # numblocks = (2, 2)
z = da.map_overlap(func, x, y, chunks=(3, 3), depth=1, trim=True)
assert_eq(z.compute().shape, z.shape)
assert_eq(z.shape, (2, 2))
# func call will receive (8,) and (10, 8) arrays for each of 4 blocks
assert_eq(z.sum(), 4 * (10 * 8 + 8))
|
44,400 |
def binary_encoding(
shape: Union[DiscreteHilbert, Tuple[int, ...]],
x: Array,
*,
max_bits: Optional[int] = None,
doubled: bool = False,
) -> Array:
"""
Encodes the array `x` into a set of binary-encoded variables described by shape.
"""
if isinstance(shape, DiscreteHilbert):
shape = shape.shape
jax.core.concrete_or_error(None, shape, "Shape must be known statically")
max_bits, output_idx = _get_output_idx(shape, max_bits, doubled)
return _binarise(x, max_bits, output_idx)
|
def binary_encoding(
shape: Union[DiscreteHilbert, Tuple[int, ...]],
x: Array,
*,
max_bits: Optional[int] = None,
doubled: bool = False,
) -> Array:
"""
Encodes the array `x` into a set of binary-encoded variables described by shape.
"""
if isinstance(shape, DiscreteHilbert):
shape = shape.shape
jax.core.concrete_or_error(None, shape, "Shape must be known statically")
output_idx, max_bit = _get_output_idx(shape, max_bits, doubled)
return _binarise(x, max_bits, output_idx)
|
33,520 |
def import_api_from_openapi_spec(
rest_api: apigateway_models.RestAPI, body: Dict, query_params: Dict
) -> apigateway_models.RestAPI:
"""Import an API from an OpenAPI spec document"""
resolved_schema = resolve_references(body)
# XXX for some reason this makes cf tests fail that's why is commented.
# test_cfn_handle_serverless_api_resource
# rest_api.name = resolved_schema.get("info", {}).get("title")
rest_api.description = resolved_schema.get("info", {}).get("description")
# Remove default root, then add paths from API spec
rest_api.resources = {}
def get_or_create_path(path):
parts = path.rstrip("/").replace("//", "/").split("/")
parent_id = ""
if len(parts) > 1:
parent_path = "/".join(parts[:-1])
parent = get_or_create_path(parent_path)
parent_id = parent.id
if existing := [
r
for r in rest_api.resources.values()
if r.path_part == (parts[-1] or "/") and (r.parent_id or "") == (parent_id or "")
]:
return existing[0]
return add_path(path, parts, parent_id=parent_id)
def add_path(path, parts, parent_id=""):
child_id = create_resource_id()
path = path or "/"
child = apigateway_models.Resource(
resource_id=child_id,
region_name=rest_api.region_name,
api_id=rest_api.id,
path_part=parts[-1] or "/",
parent_id=parent_id,
)
for method, method_schema in resolved_schema["paths"].get(path, {}).items():
method = method.upper()
method_resource = child.add_method(method, None, None)
method_integration = method_schema.get("x-amazon-apigateway-integration", {})
responses = method_schema.get("responses", {})
for status_code in responses:
response_model = None
if model_schema := responses.get(status_code, {}).get("schema", {}):
response_model = {APPLICATION_JSON: model_schema}
response_parameters = (
method_integration.get("responses", {})
.get("default", {})
.get("responseParameters")
)
method_resource.create_response(
status_code,
response_model,
response_parameters,
)
integration = apigateway_models.Integration(
http_method=method,
uri=method_integration.get("uri"),
integration_type=method_integration["type"],
passthrough_behavior=method_integration.get("passthroughBehavior"),
request_templates=method_integration.get("requestTemplates") or {},
)
integration.create_integration_response(
status_code=method_integration.get("default", {}).get("statusCode", 200),
selection_pattern=None,
response_templates=method_integration.get("default", {}).get(
"responseTemplates", None
),
content_handling=None,
)
child.resource_methods[method]["methodIntegration"] = integration
rest_api.resources[child_id] = child
return child
if definitions := resolved_schema.get("definitions"):
for name, model in definitions.items():
rest_api.add_model(name=name, schema=model, content_type=APPLICATION_JSON)
basepath_mode = (query_params.get("basepath") or ["prepend"])[0]
base_path = (resolved_schema.get("basePath") or "") if basepath_mode == "prepend" else ""
for path in resolved_schema.get("paths", {}):
get_or_create_path(base_path + path)
policy = resolved_schema.get("x-amazon-apigateway-policy")
if policy:
policy = json.dumps(policy) if isinstance(policy, dict) else str(policy)
rest_api.policy = policy
minimum_compression_size = resolved_schema.get("x-amazon-apigateway-minimum-compression-size")
if minimum_compression_size is not None:
rest_api.minimum_compression_size = int(minimum_compression_size)
endpoint_config = resolved_schema.get("x-amazon-apigateway-endpoint-configuration")
if endpoint_config:
if endpoint_config.get("vpcEndpointIds"):
endpoint_config.setdefault("types", ["PRIVATE"])
rest_api.endpoint_configuration = endpoint_config
return rest_api
|
def import_api_from_openapi_spec(
rest_api: apigateway_models.RestAPI, body: Dict, query_params: Dict
) -> apigateway_models.RestAPI:
"""Import an API from an OpenAPI spec document"""
resolved_schema = resolve_references(body)
# XXX for some reason this makes cf tests fail that's why is commented.
# test_cfn_handle_serverless_api_resource
# rest_api.name = resolved_schema.get("info", {}).get("title")
rest_api.description = resolved_schema.get("info", {}).get("description")
# Remove default root, then add paths from API spec
rest_api.resources = {}
def get_or_create_path(path):
parts = path.rstrip("/").replace("//", "/").split("/")
parent_id = ""
if len(parts) > 1:
parent_path = "/".join(parts[:-1])
parent = get_or_create_path(parent_path)
parent_id = parent.id
if existing := [
r
for r in rest_api.resources.values()
if r.path_part == (parts[-1] or "/") and (r.parent_id or "") == (parent_id or "")
]:
return existing[0]
return add_path(path, parts, parent_id=parent_id)
def add_path(path, parts, parent_id=""):
child_id = create_resource_id()
path = path or "/"
child = apigateway_models.Resource(
resource_id=child_id,
region_name=rest_api.region_name,
api_id=rest_api.id,
path_part=parts[-1] or "/",
parent_id=parent_id,
)
for method, method_schema in resolved_schema["paths"].get(path, {}).items():
method = method.upper()
method_resource = child.add_method(method, None, None)
method_integration = method_schema.get("x-amazon-apigateway-integration", {})
responses = method_schema.get("responses", {})
for status_code in responses:
response_model = None
if model_schema := responses.get(status_code, {}).get("schema", {}):
response_model = {APPLICATION_JSON: model_schema}
response_parameters = (
method_integration.get("responses", {})
.get("default", {})
.get("responseParameters")
)
method_resource.create_response(
status_code,
response_model,
response_parameters,
)
integration = apigateway_models.Integration(
http_method=method,
uri=method_integration.get("uri"),
integration_type=method_integration["type"],
passthrough_behavior=method_integration.get("passthroughBehavior"),
request_templates=method_integration.get("requestTemplates") or {},
)
integration.create_integration_response(
status_code=method_integration.get("default", {}).get("statusCode", 200),
selection_pattern=None,
response_templates=method_integration.get("default", {}).get(
"responseTemplates", None
),
content_handling=None,
)
child.resource_methods[method]["methodIntegration"] = integration
rest_api.resources[child_id] = child
return child
if definitions := resolved_schema.get("definitions", {}):
for name, model in definitions.items():
rest_api.add_model(name=name, schema=model, content_type=APPLICATION_JSON)
basepath_mode = (query_params.get("basepath") or ["prepend"])[0]
base_path = (resolved_schema.get("basePath") or "") if basepath_mode == "prepend" else ""
for path in resolved_schema.get("paths", {}):
get_or_create_path(base_path + path)
policy = resolved_schema.get("x-amazon-apigateway-policy")
if policy:
policy = json.dumps(policy) if isinstance(policy, dict) else str(policy)
rest_api.policy = policy
minimum_compression_size = resolved_schema.get("x-amazon-apigateway-minimum-compression-size")
if minimum_compression_size is not None:
rest_api.minimum_compression_size = int(minimum_compression_size)
endpoint_config = resolved_schema.get("x-amazon-apigateway-endpoint-configuration")
if endpoint_config:
if endpoint_config.get("vpcEndpointIds"):
endpoint_config.setdefault("types", ["PRIVATE"])
rest_api.endpoint_configuration = endpoint_config
return rest_api
|
36,474 |
def _compile_charset(charset, flags, code):
# compile charset subprogram
emit = code.append
for op, av in charset:
emit(op)
if op is NEGATE:
pass
elif op is LITERAL:
emit(av)
elif op is RANGE or op is RANGE_UNI_IGNORE:
emit(av[0])
emit(av[1])
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CH_LOCALE[av])
elif flags & SRE_FLAG_UNICODE:
emit(CH_UNICODE[av])
else:
emit(av)
else:
raise ReCompileError("internal: unsupported set operator %r" % (op,))
emit(FAILURE)
|
def _compile_charset(charset, flags, code):
# compile charset subprogram
emit = code.append
for op, av in charset:
emit(op)
if op is NEGATE:
pass
elif op is LITERAL:
emit(av)
elif op is RANGE or op is RANGE_UNI_IGNORE:
emit(av[0])
emit(av[1])
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CH_LOCALE[av])
elif flags & SRE_FLAG_UNICODE:
emit(CH_UNICODE[av])
else:
emit(av)
else:
raise ReCompileError(f"internal: unsupported set operator {op!r}")
emit(FAILURE)
|
43,672 |
def _qubit_operator_to_terms(qubit_operator, wires=None):
r"""Converts OpenFermion ``QubitOperator`` to a 2-tuple of coefficients and
PennyLane Pauli observables.
**Example usage:**
>>> q_op = 0.1*QubitOperator('X0') + 0.2*QubitOperator('Y0 Z2')
>>> q_op
0.1 [X0] +
0.2 [Y0 Z2]
>>> _qubit_operator_to_terms(q_op, wires=['w0','w1','w2','extra_wire'])
(array([0.1, 0.2]), [Tensor(PauliX(wires=['w0'])), Tensor(PauliY(wires=['w0']), PauliZ(wires=['w2']))])
Args:
qubit_operator (QubitOperator): Fermionic-to-qubit transformed operator in terms of
Pauli matrices
wires (Wires, list, tuple, dict): Custom wire mapping for connecting to Pennylane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identiy map. Defaults to None.
Returns:
tuple[array[float], Iterable[pennylane.operation.Observable]]: coefficients and their
corresponding PennyLane observables in the Pauli basis
"""
n_wires = (
1 + max([max([i for i, _ in t]) if t else 1 for t in qubit_operator.terms])
if qubit_operator.terms
else 1
)
wires = _proc_wires(wires, n_wires=n_wires)
if not qubit_operator.terms: # added since can't unpack empty zip to (coeffs, ops) below
return np.array([0.0]), [qml.operation.Tensor(qml.Identity(wires[0]))]
xyz2pauli = {"X": qml.PauliX, "Y": qml.PauliY, "Z": qml.PauliZ}
coeffs, ops = zip(
*[
(
coef,
qml.operation.Tensor(*[xyz2pauli[q[1]](wires=wires[q[0]]) for q in term])
if term
else qml.operation.Tensor(qml.Identity(wires[0]))
# example term: ((0,'X'), (2,'Z'), (3,'Y'))
)
for term, coef in qubit_operator.terms.items()
]
)
return np.real(np.array(coeffs)), list(ops)
|
def _qubit_operator_to_terms(qubit_operator, wires=None):
r"""Converts OpenFermion ``QubitOperator`` to a 2-tuple of coefficients and
PennyLane Pauli observables.
**Example usage:**
>>> q_op = 0.1*QubitOperator('X0') + 0.2*QubitOperator('Y0 Z2')
>>> q_op
0.1 [X0] +
0.2 [Y0 Z2]
>>> _qubit_operator_to_terms(q_op, wires=['w0','w1','w2','extra_wire'])
(array([0.1, 0.2]), [Tensor(PauliX(wires=['w0'])), Tensor(PauliY(wires=['w0']), PauliZ(wires=['w2']))])
Args:
qubit_operator (QubitOperator): Fermionic-to-qubit transformed operator in terms of
Pauli matrices
wires (Wires, list, tuple, dict): Custom wire mapping for connecting to Pennylane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map.
Returns:
tuple[array[float], Iterable[pennylane.operation.Observable]]: coefficients and their
corresponding PennyLane observables in the Pauli basis
"""
n_wires = (
1 + max([max([i for i, _ in t]) if t else 1 for t in qubit_operator.terms])
if qubit_operator.terms
else 1
)
wires = _proc_wires(wires, n_wires=n_wires)
if not qubit_operator.terms: # added since can't unpack empty zip to (coeffs, ops) below
return np.array([0.0]), [qml.operation.Tensor(qml.Identity(wires[0]))]
xyz2pauli = {"X": qml.PauliX, "Y": qml.PauliY, "Z": qml.PauliZ}
coeffs, ops = zip(
*[
(
coef,
qml.operation.Tensor(*[xyz2pauli[q[1]](wires=wires[q[0]]) for q in term])
if term
else qml.operation.Tensor(qml.Identity(wires[0]))
# example term: ((0,'X'), (2,'Z'), (3,'Y'))
)
for term, coef in qubit_operator.terms.items()
]
)
return np.real(np.array(coeffs)), list(ops)
|
59,852 |
def config_dir():
conf_dir = os.environ.get(
"XDG_CONFIG_HOME", os.path.join(os.path.expanduser("~"), ".config", "yt")
)
if not os.path.exists(conf_dir):
try:
os.makedirs(conf_dir)
except OSError:
warnings.warn("unable to create yt config directory")
return conf_dir
|
def config_dir():
XDG = os.environ.get("XDG_CONFIG_HOME", os.path.join(os.path.expanduser("~"), ".config"))
conf_dir = os.path.join(os.path.expanduser(XDG, "yt")
)
if not os.path.exists(conf_dir):
try:
os.makedirs(conf_dir)
except OSError:
warnings.warn("unable to create yt config directory")
return conf_dir
|
54,090 |
def _set_electrical_parameters_links(links):
if links.empty: return links
p_max_pu = snakemake.config['links'].get('p_max_pu', 1.)
links['p_max_pu'] = p_max_pu
links['p_min_pu'] = -p_max_pu
links_p_nom = pd.read_csv(snakemake.input.links_p_nom)
#Filter links that are not in operation anymore
links_p_nom['not_in_operation']=links_p_nom.Remarks.str.contains('Shut down', na=False) | links_p_nom.Remarks.str.contains('Replaced', na=False)
links_p_nom = links_p_nom[links_p_nom.not_in_operation == False]
#add column for closest link id
links_p_nom["j"] = np.nan
#find closest link for all links in links_p_nom
for index, row in links_p_nom.iterrows():
links_p_nom.loc[[index],'j'] = _find_closest_links(links, links_p_nom.loc[[index]])
links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum',
})
p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"]
# Don't update p_nom if it's already set
p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom
links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset
return links
|
def _set_electrical_parameters_links(links):
if links.empty: return links
p_max_pu = snakemake.config['links'].get('p_max_pu', 1.)
links['p_max_pu'] = p_max_pu
links['p_min_pu'] = -p_max_pu
links_p_nom = pd.read_csv(snakemake.input.links_p_nom)
#Filter links that are not in operation anymore
removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False)
links_p_nom = links_p_nom[~removed_b]
#add column for closest link id
links_p_nom["j"] = np.nan
#find closest link for all links in links_p_nom
for index, row in links_p_nom.iterrows():
links_p_nom.loc[[index],'j'] = _find_closest_links(links, links_p_nom.loc[[index]])
links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum',
})
p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"]
# Don't update p_nom if it's already set
p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom
links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset
return links
|
45,555 |
def update_auth(c, config):
"""
Set auth related configuration from YAML config file.
As an example, this function should update the following TLJH auth
configuration:
```yaml
auth:
type: oauthenticator.github.GitHubOAuthenticator
GitHubOAuthenticator:
client_id: "..."
client_secret: "..."
oauth_callback_url: "..."
ArbitraryKey:
arbitrary_key: "..."
arbitrary_key_with_none_value:
```
by applying the following configuration:
```python
c.JupyterHub.authenticator_class = "oauthenticator.github.GitHubOAuthenticator"
c.GitHubOAuthenticator.client_id = "..."
c.GitHubOAuthenticator.client_secret = "..."
c.GitHubOAuthenticator.oauth_callback_url = "..."
c.ArbitraryKey.arbitrary_key = "..."
```
Note that "auth.type" and "auth.ArbitraryKey.arbitrary_key_with_none_value"
are treated a bit differently. auth.type will always map to
c.JupyterHub.authenticator_class and any configured value being None won't
be set.
"""
tljh_auth_config = config['auth']
c.JupyterHub.authenticator_class = tljh_auth_config['type']
for auth_key, auth_value in tljh_auth_config.items():
if auth_key == "type":
continue
traitlet_class_name = auth_key
traitlet_class_config = auth_value
traitlet_class_instance = getattr(c, traitlet_class_name)
for config_name, config_value in traitlet_class_config.items():
set_if_not_none(traitlet_class_instance, config_name, config_value)
|
def update_auth(c, config):
"""
Set auth related configuration from YAML config file.
As an example, this function should update the following TLJH auth
configuration:
```yaml
auth:
type: oauthenticator.github.GitHubOAuthenticator
GitHubOAuthenticator:
client_id: "..."
client_secret: "..."
oauth_callback_url: "..."
ClassName:
arbitrary_key: "..."
arbitrary_key_with_none_value:
```
by applying the following configuration:
```python
c.JupyterHub.authenticator_class = "oauthenticator.github.GitHubOAuthenticator"
c.GitHubOAuthenticator.client_id = "..."
c.GitHubOAuthenticator.client_secret = "..."
c.GitHubOAuthenticator.oauth_callback_url = "..."
c.ArbitraryKey.arbitrary_key = "..."
```
Note that "auth.type" and "auth.ArbitraryKey.arbitrary_key_with_none_value"
are treated a bit differently. auth.type will always map to
c.JupyterHub.authenticator_class and any configured value being None won't
be set.
"""
tljh_auth_config = config['auth']
c.JupyterHub.authenticator_class = tljh_auth_config['type']
for auth_key, auth_value in tljh_auth_config.items():
if auth_key == "type":
continue
traitlet_class_name = auth_key
traitlet_class_config = auth_value
traitlet_class_instance = getattr(c, traitlet_class_name)
for config_name, config_value in traitlet_class_config.items():
set_if_not_none(traitlet_class_instance, config_name, config_value)
|
48,593 |
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture.
Parameters
----------
image : numpy.ndarray
Numpy image array. Texture datatype expected to be ``np.uint8``.
Returns
-------
pyvista.Texture
PyVista texture.
Examples
--------
Create an all white texture.
>>> import pyvista as pv
>>> import numpy as np
>>> tex_arr = np.ones((1024, 1024, 3), dtype=np.uint8) * 255
>>> tex = pv.numpy_to_texture(tex_im)
"""
if image.dtype != np.uint8:
image = image.astype(np.uint8)
warnings.warn(
'Expected `image` dtype to be ``np.uint8``. `image` has been copied '
'and converted to np.uint8.',
UserWarning,
)
return pyvista.Texture(image)
|
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture.
Parameters
----------
image : numpy.ndarray
Numpy image array. Texture datatype expected to be ``np.uint8``.
Returns
-------
pyvista.Texture
PyVista texture.
Examples
--------
Create an all white texture.
>>> import pyvista as pv
>>> import numpy as np
>>> tex_arr = np.ones((1024, 1024, 3), dtype=np.uint8) * 255
>>> tex = pv.numpy_to_texture(tex_arr)
"""
if image.dtype != np.uint8:
image = image.astype(np.uint8)
warnings.warn(
'Expected `image` dtype to be ``np.uint8``. `image` has been copied '
'and converted to np.uint8.',
UserWarning,
)
return pyvista.Texture(image)
|
21,960 |
def get_parser():
"""Instantiate the command line argument parser."""
parser = argparse.ArgumentParser(prog=PROG, description=__doc__)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
# Sub-command management
sp = parser.add_subparsers(title="sub-commands", dest='func')
# processing
get_smallbaseline_parser(sp)
get_geocode_parser(sp)
get_multilook_parser(sp)
get_spatial_average_parser(sp)
get_spatial_filter_parser(sp)
get_temporal_average_parser(sp)
get_temporal_derivative_parser(sp)
get_temporal_filter_parser(sp)
# pre-processing
get_prep_aria_parser(sp)
get_prep_cosicorr_parser(sp)
get_prep_fringe_parser(sp)
get_prep_gamma_parser(sp)
get_prep_gmtsar_parser(sp)
get_prep_hyp3_parser(sp)
get_prep_isce_parser(sp)
get_prep_roipac_parser(sp)
get_prep_snap_parser(sp)
# I/O
get_load_data_parser(sp)
get_load_gbis_parser(sp)
get_save_gbis_parser(sp)
get_save_gdal_parser(sp)
get_save_gmt_parser(sp)
get_save_hdfeos5_parser(sp)
get_save_kite_parser(sp)
get_save_kmz_timeseries_parser(sp)
get_save_kmz_parser(sp)
get_save_qgis_parser(sp)
get_save_roipac_parser(sp)
# display
get_info_parser(sp)
get_plot_coherence_matrix_parser(sp)
get_plot_network_parser(sp)
get_plot_transection_parser(sp)
get_tsview_parser(sp)
get_view_parser(sp)
# image operations
get_add_parser(sp)
get_diff_parser(sp)
get_image_math_parser(sp)
get_image_stitch_parser(sp)
get_subset_parser(sp)
# DEM
get_dem_error_parser(sp)
get_dem_gsi_parser(sp)
# mask
get_generate_mask_parser(sp)
get_mask_parser(sp)
# ionosphere
get_iono_tec_parser(sp)
# troposphere
get_tropo_gacos_parser(sp)
get_tropo_phase_elevation_parser(sp)
get_tropo_pyaps3_parser(sp)
# geodesy
try:
get_bulk_plate_motion_parser(sp)
except ImportError:
pass
get_solid_earth_tides_parser(sp)
# phase
get_closure_phase_bias_parser(sp)
get_remove_ramp_parser(sp)
get_unwrap_error_bridging_parser(sp)
get_unwrap_error_phase_closure_parser(sp)
# misc
get_asc_desc2horz_vert_parser(sp)
get_ifgram_inversion_parser(sp)
get_lookup_geo2radar_parser(sp)
get_modify_network_parser(sp)
get_reference_date_parser(sp)
get_reference_point_parser(sp)
get_s1ab_range_bias_parser(sp)
get_timeseries_rms_parser(sp)
get_timeseries2velocity_parser(sp)
_autocomplete(parser)
return parser
|
def get_parser():
"""Instantiate the command line argument parser."""
parser = argparse.ArgumentParser(prog=PROG, description=__doc__)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
# Sub-command management
sp = parser.add_subparsers(title="sub-commands", dest='func', required=True)
# processing
get_smallbaseline_parser(sp)
get_geocode_parser(sp)
get_multilook_parser(sp)
get_spatial_average_parser(sp)
get_spatial_filter_parser(sp)
get_temporal_average_parser(sp)
get_temporal_derivative_parser(sp)
get_temporal_filter_parser(sp)
# pre-processing
get_prep_aria_parser(sp)
get_prep_cosicorr_parser(sp)
get_prep_fringe_parser(sp)
get_prep_gamma_parser(sp)
get_prep_gmtsar_parser(sp)
get_prep_hyp3_parser(sp)
get_prep_isce_parser(sp)
get_prep_roipac_parser(sp)
get_prep_snap_parser(sp)
# I/O
get_load_data_parser(sp)
get_load_gbis_parser(sp)
get_save_gbis_parser(sp)
get_save_gdal_parser(sp)
get_save_gmt_parser(sp)
get_save_hdfeos5_parser(sp)
get_save_kite_parser(sp)
get_save_kmz_timeseries_parser(sp)
get_save_kmz_parser(sp)
get_save_qgis_parser(sp)
get_save_roipac_parser(sp)
# display
get_info_parser(sp)
get_plot_coherence_matrix_parser(sp)
get_plot_network_parser(sp)
get_plot_transection_parser(sp)
get_tsview_parser(sp)
get_view_parser(sp)
# image operations
get_add_parser(sp)
get_diff_parser(sp)
get_image_math_parser(sp)
get_image_stitch_parser(sp)
get_subset_parser(sp)
# DEM
get_dem_error_parser(sp)
get_dem_gsi_parser(sp)
# mask
get_generate_mask_parser(sp)
get_mask_parser(sp)
# ionosphere
get_iono_tec_parser(sp)
# troposphere
get_tropo_gacos_parser(sp)
get_tropo_phase_elevation_parser(sp)
get_tropo_pyaps3_parser(sp)
# geodesy
try:
get_bulk_plate_motion_parser(sp)
except ImportError:
pass
get_solid_earth_tides_parser(sp)
# phase
get_closure_phase_bias_parser(sp)
get_remove_ramp_parser(sp)
get_unwrap_error_bridging_parser(sp)
get_unwrap_error_phase_closure_parser(sp)
# misc
get_asc_desc2horz_vert_parser(sp)
get_ifgram_inversion_parser(sp)
get_lookup_geo2radar_parser(sp)
get_modify_network_parser(sp)
get_reference_date_parser(sp)
get_reference_point_parser(sp)
get_s1ab_range_bias_parser(sp)
get_timeseries_rms_parser(sp)
get_timeseries2velocity_parser(sp)
_autocomplete(parser)
return parser
|
30,784 |
def map_changes_to_existing_user(existing_user, new_json):
# if existing_user is not None:
for k, v in new_json.items():
if type(v) == list:
# handle in specific way
# as of now only emails needs to be handled
if k == 'emails':
existing_email_list = existing_user.get(k)
# update
for i in v:
for j in existing_email_list:
if j.get('type') == i.get('type'):
if j.get('value') != i.get('value'):
j['value'] = i.get('value')
if i.get('primary', None) is not None:
j['primary'] = i.get('primary')
else:
if j.get('primary', None) is not None:
j['primary'] = j.get('primary')
break
# add
new_email_list = []
for i in v:
exist = False
for j in existing_email_list:
if i.get('type') == j.get('type', ''):
exist = True
break
if not exist:
new_email = {'type': i.get('type'),
'value': i.get('value')}
if i.get('primary', None) is not None:
new_email.update({'primary': i.get('primary')})
new_email_list.append(new_email)
existing_email_list.extend(new_email_list)
elif type(v) == dict:
if k != SCIM_EXTENSION_SCHEMA:
map_changes_to_existing_user(existing_user.get(k), v)
else:
existing_user[k] = v
|
def map_changes_to_existing_user(existing_user, new_json):
# if existing_user is not None:
for k, v in new_json.items():
if type(v) == list:
# handle in specific way
# as of now only emails needs to be handled
if k == 'emails':
existing_email_list = existing_user.get(k)
# update
for i in v:
for j in existing_email_list:
if j.get('type') == i.get('type'):
if j.get('value') != i.get('value'):
j['value'] = i.get('value')
if i.get('primary', None) is not None:
j['primary'] = i.get('primary')
else:
if j.get('primary', None) is not None:
j['primary'] = j.get('primary')
break
# add
new_email_list = []
for new_json_email in v:
exist = False
for j in existing_email_list:
if i.get('type') == j.get('type', ''):
exist = True
break
if not exist:
new_email = {'type': i.get('type'),
'value': i.get('value')}
if i.get('primary', None) is not None:
new_email.update({'primary': i.get('primary')})
new_email_list.append(new_email)
existing_email_list.extend(new_email_list)
elif type(v) == dict:
if k != SCIM_EXTENSION_SCHEMA:
map_changes_to_existing_user(existing_user.get(k), v)
else:
existing_user[k] = v
|
5,887 |
def _generate_metadata_legacy(install_req):
# type: (InstallRequirement) -> None
req_details_str = install_req.name or "from {}".format(install_req.link)
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
install_req.setup_py_path, req_details_str,
)
# Compose arguments for subprocess call
base_cmd = make_setuptools_shim_args(install_req.setup_py_path)
if install_req.isolated:
base_cmd += ["--no-user-cfg"]
# For non-editable installed, don't put the .egg-info files at the root,
# to avoid confusion due to the source code being considered an installed
# egg.
egg_base_option = [] # type: List[str]
if not install_req.editable:
egg_info_dir = os.path.join(install_req.setup_py_dir, 'pip-egg-info')
egg_base_option = ['--egg-base', egg_info_dir]
# setuptools complains if the target directory does not exist.
ensure_dir(egg_info_dir)
with install_req.build_env:
call_subprocess(
base_cmd + ["egg_info"] + egg_base_option,
cwd=install_req.setup_py_dir,
command_desc='python setup.py egg_info',
)
|
def _generate_metadata_legacy(install_req):
# type: (InstallRequirement) -> None
req_details_str = install_req.name or "from {}".format(install_req.link)
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
install_req.setup_py_path, req_details_str,
)
# Compose arguments for subprocess call
base_cmd = make_setuptools_shim_args(install_req.setup_py_path)
if install_req.isolated:
base_cmd += ["--no-user-cfg"]
# For non-editable installs, don't put the .egg-info files at the root,
# to avoid confusion due to the source code being considered an installed
# egg.
egg_base_option = [] # type: List[str]
if not install_req.editable:
egg_info_dir = os.path.join(install_req.setup_py_dir, 'pip-egg-info')
egg_base_option = ['--egg-base', egg_info_dir]
# setuptools complains if the target directory does not exist.
ensure_dir(egg_info_dir)
with install_req.build_env:
call_subprocess(
base_cmd + ["egg_info"] + egg_base_option,
cwd=install_req.setup_py_dir,
command_desc='python setup.py egg_info',
)
|
20,097 |
def load_icons():
if not os.path.exists(STAGE_ICONS_PATH):
sys.stderr.write('No stage icons found, aborting icon copy.\n')
return
with setup_flask_app().app_context():
config.instance.load_configuration()
fs_blueprints_path = os.path.join(config.instance.file_server_root,
FILE_SERVER_BLUEPRINTS_FOLDER)
existing_blueprints = {}
for tenant in os.listdir(fs_blueprints_path):
tenant_path = os.path.join(fs_blueprints_path, tenant)
for blueprint in os.listdir(tenant_path):
if blueprint in existing_blueprints:
existing_blueprints[blueprint].append(tenant)
else:
existing_blueprints[blueprint] = [tenant]
icon_blueprints = os.listdir(STAGE_ICONS_PATH)
for blueprint in icon_blueprints:
icon_path = os.path.join(STAGE_ICONS_PATH, blueprint, 'icon.png')
if blueprint in existing_blueprints:
for tenant in existing_blueprints[blueprint]:
dest_path = os.path.join(fs_blueprints_path,
tenant,
blueprint,
BLUEPRINT_ICON_FILENAME)
shutil.copy(icon_path, dest_path)
# We're not deleting because of file ownership issues,
# but even a relatively large amount of icons will not
# be absolutely massive in size so this shouldn't be a
# massive problem (and it'll only apply if icons are
# heavily used, and only on the manager doing the
# upgrade).
else:
sys.stderr.write(
f'Found icon for blueprints named {blueprint}, but no '
f'blueprints of that name. Icon is in {icon_path}\n')
|
def load_icons():
if not os.path.exists(STAGE_ICONS_PATH):
sys.stderr.write('No stage icons found, aborting icon copy.\n')
return
with setup_flask_app().app_context():
config.instance.load_configuration()
fs_blueprints_path = os.path.join(config.instance.file_server_root,
FILE_SERVER_BLUEPRINTS_FOLDER)
existing_blueprints = {}
for tenant in os.listdir(fs_blueprints_path):
tenant_path = os.path.join(fs_blueprints_path, tenant)
for blueprint in os.listdir(tenant_path):
existing_blueprints.setdefault(blueprint, []).append(tenant)
icon_blueprints = os.listdir(STAGE_ICONS_PATH)
for blueprint in icon_blueprints:
icon_path = os.path.join(STAGE_ICONS_PATH, blueprint, 'icon.png')
if blueprint in existing_blueprints:
for tenant in existing_blueprints[blueprint]:
dest_path = os.path.join(fs_blueprints_path,
tenant,
blueprint,
BLUEPRINT_ICON_FILENAME)
shutil.copy(icon_path, dest_path)
# We're not deleting because of file ownership issues,
# but even a relatively large amount of icons will not
# be absolutely massive in size so this shouldn't be a
# massive problem (and it'll only apply if icons are
# heavily used, and only on the manager doing the
# upgrade).
else:
sys.stderr.write(
f'Found icon for blueprints named {blueprint}, but no '
f'blueprints of that name. Icon is in {icon_path}\n')
|
39,446 |
def test_array_volume_rendering(uniform):
arr = uniform["Spatial Point Data"].reshape(uniform.dimensions)
pyvista.plot(arr, volume=True, opacity='linear')
|
def test_array_volume_rendering(uniform):
arr = uniform["Spatial Point Data"].reshape(uniform.dimensions)
pyvista.plot(arr, volume=True, opacity='linear', before_close_callback=verify_cache_image)
|
13,018 |
def _remove_token_from_checkout(checkout):
checkout_data = json.loads(checkout)
checkout_data[0]["token"] = str(uuid.UUID(**{"int": 1}))
return json.dumps(checkout_data)
|
def _remove_token_from_checkout(checkout):
checkout_data = json.loads(checkout)
checkout_data[0]["token"] = str(uuid.UUID(int=1))
return json.dumps(checkout_data)
|
87 |
def solr8_update(
reqs: List[Union[str, UpdateRequest, DeleteRequest]],
commit_within=60_000,
skip_id_check=False,
solr_base_url: str = None,
) -> None:
"""This will replace solr_update once we're fully on Solr 8.7+"""
req_strs = (r if type(r) == str else r.toxml() for r in reqs if r) # type: ignore
# .toxml() can return None :/
content = f"<update>{''.join(s for s in req_strs if s)}</update>" # type: ignore
solr_base_url = solr_base_url or get_solr_base_url()
params = {}
if commit_within is not None:
params['commitWithin'] = commit_within
if skip_id_check:
params['overwrite'] = 'false'
logger.debug(f"POSTing update to {solr_base_url}/update {params}")
try:
resp = httpx.post(
f'{solr_base_url}/update',
timeout=30, # The default timeout is silly short
params=params,
headers={'Content-Type': 'application/xml'},
content=content)
resp.raise_for_status()
except HTTPError:
logger.error('Error with solr8 POST update')
|
def solr8_update(
reqs: List[Union[str, UpdateRequest, DeleteRequest]],
commit_within=60_000,
skip_id_check=False,
solr_base_url: str = None,
) -> None:
"""This will replace solr_update once we're fully on Solr 8.7+"""
req_strs = (r if type(r) == str else r.toxml() for r in reqs if r) # type: ignore
# .toxml() can return None :/
content = f"<update>{''.join(s for s in req_strs if s)}</update>" # type: ignore
req_strs = (r if type(r) is str else r.toxml() for r in reqs if r) # type: ignore
params = {}
if commit_within is not None:
params['commitWithin'] = commit_within
if skip_id_check:
params['overwrite'] = 'false'
logger.debug(f"POSTing update to {solr_base_url}/update {params}")
try:
resp = httpx.post(
f'{solr_base_url}/update',
timeout=30, # The default timeout is silly short
params=params,
headers={'Content-Type': 'application/xml'},
content=content)
resp.raise_for_status()
except HTTPError:
logger.error('Error with solr8 POST update')
|
31,358 |
def list_operators_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""Get operators list from TOPdesk"""
operators = client.get_list_with_query(list_type="operators",
start=args.get('start', None),
page_size=args.get('page_size', None),
query=args.get('query', None))
if len(operators) == 0:
return CommandResults(readable_output='No operators found')
headers = ['id', 'name', 'telephone', 'job title', 'department',
'city', 'branch name', 'login name']
readable_operators = []
for operator in operators:
readable_operators.append({
'id': operator.get('id', None),
'name': operator.get('dynamicName', None),
'telephone': operator.get('phoneNumber', None),
'job title': operator.get('jobTitle', None),
'department': operator.get('department', None),
'city': operator.get('city', None),
'branch name': replace_none(operator.get('branch', {}), {}).get('name', None),
'login name': operator.get('tasLoginName', None),
})
readable_output = tableToMarkdown(f'{INTEGRATION_NAME} operators',
readable_operators,
headers=headers,
removeNull=True)
return CommandResults(
readable_output=readable_output,
outputs_prefix=f'{INTEGRATION_NAME}.operator',
outputs_key_field='id',
outputs=operators
)
|
def list_operators_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""Get operators list from TOPdesk"""
operators = client.get_list_with_query(list_type="operators",
start=args.get('start', None),
page_size=args.get('page_size', None),
query=args.get('query', None))
if len(operators) == 0:
return CommandResults(readable_output='No operators found')
headers = ['id', 'name', 'telephone', 'job title', 'department',
'city', 'branch name', 'login name']
readable_operators = []
for operator in operators:
readable_operators.append({
'id': operator.get('id', None),
'name': operator.get('dynamicName', None),
'telephone': operator.get('phoneNumber', None),
'job title': operator.get('jobTitle', None),
'department': operator.get('department', None),
'city': operator.get('city', None),
'branch name': replace_none(operator.get('branch', {}), {}).get('name', None),
'login name': operator.get('tasLoginName', None),
})
readable_output = tableToMarkdown(f'{INTEGRATION_NAME} operators',
readable_operators,
headers=headers,
removeNull=True)
return CommandResults(
readable_output=readable_output,
outputs_prefix=f'{INTEGRATION_NAME}.Operator',
outputs_key_field='id',
outputs=operators
)
|
45,131 |
def create_if_missing(
flag_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
bucketer: Optional[AbstractBucketer] = None,
conditions: Optional[Iterable[Condition]] = None,
client: FeatureFlagClient = None,
) -> Optional[FeatureFlag]:
"""
Create a feature flag if a flag matching the given name does not
already exist.
Args:
flag_name: the name of the feature flag
is_enabled: the initial enabled/disabled state of the flag if
this function creates it
client_data: arbitrary data that we should store with the flag
bucketer: an optional bucketer from the flipper.bucketing module, e.g.
PercentageBucketer, to use when determining if the flag
is enabled
conditions: an optional iterable of Conditions against which we will
check input data to determine if a flag is enabled
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
FeatureFlag or None: Returns a created or existing FeatureFlag, or None
if feature flagging is disabled.
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return
if not client:
client = get_features_client()
# If the flag exists in the feature flag store, we'll consider the
# enabled state, bucketer, and conditions currently saved in the
# feature flag store as canonical.
if client.exists(flag_name):
return client.get(flag_name)
flag = client.create(flag_name, is_enabled=is_enabled, client_data=client_data)
if bucketer:
flag.set_bucketer(bucketer)
if conditions:
flag.set_conditions(conditions)
return flag
|
def create_if_missing(
flag_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
bucketer: Optional[AbstractBucketer] = None,
conditions: Optional[Iterable[Condition]] = None,
client: FeatureFlagClient = None,
) -> Optional[FeatureFlag]:
"""
Create a feature flag if a flag matching the given name does not
already exist.
Args:
flag_name: The name of the feature flag.
is_enabled: the initial enabled/disabled state of the flag if
this function creates it
client_data: arbitrary data that we should store with the flag
bucketer: an optional bucketer from the flipper.bucketing module, e.g.
PercentageBucketer, to use when determining if the flag
is enabled
conditions: an optional iterable of Conditions against which we will
check input data to determine if a flag is enabled
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
FeatureFlag or None: Returns a created or existing FeatureFlag, or None
if feature flagging is disabled.
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return
if not client:
client = get_features_client()
# If the flag exists in the feature flag store, we'll consider the
# enabled state, bucketer, and conditions currently saved in the
# feature flag store as canonical.
if client.exists(flag_name):
return client.get(flag_name)
flag = client.create(flag_name, is_enabled=is_enabled, client_data=client_data)
if bucketer:
flag.set_bucketer(bucketer)
if conditions:
flag.set_conditions(conditions)
return flag
|
50,037 |
def _run(handle_data,
initialize,
before_trading_start,
analyze,
algofile,
algotext,
defines,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
environ,
blotter,
benchmark_spec):
"""Run a backtest for the given algorithm.
This is shared between the cli and :func:`zipline.run_algo`.
"""
bundle_data = bundles.load(
bundle,
environ,
bundle_timestamp,
)
if trading_calendar is None:
trading_calendar = get_calendar('XNYS')
# date parameter validation
if trading_calendar.session_distance(start, end) < 1:
raise _RunAlgoError(
'There are no trading days between %s and %s' % (
start.date(),
end.date(),
),
)
benchmark_sid, benchmark_returns = benchmark_spec.resolve(
asset_finder=bundle_data.asset_finder,
start_date=start,
end_date=end,
)
if algotext is not None:
if local_namespace:
ip = get_ipython() # noqa
namespace = ip.user_ns
else:
namespace = {}
for assign in defines:
try:
name, value = assign.split('=', 2)
except ValueError:
raise ValueError(
'invalid define %r, should be of the form name=value' %
assign,
)
try:
# evaluate in the same namespace so names may refer to
# eachother
namespace[name] = eval(value, namespace)
except Exception as e:
raise ValueError(
'failed to execute definition for name %r: %s' % (name, e),
)
elif defines:
raise _RunAlgoError(
'cannot pass define without `algotext`',
"cannot pass '-D' / '--define' without '-t' / '--algotext'",
)
else:
namespace = {}
if algofile is not None:
algotext = algofile.read()
if print_algo:
if PYGMENTS:
highlight(
algotext,
PythonLexer(),
TerminalFormatter(),
outfile=sys.stdout,
)
else:
click.echo(algotext)
first_trading_day = \
bundle_data.equity_minute_bar_reader.first_trading_day
data = DataPortal(
bundle_data.asset_finder,
trading_calendar=trading_calendar,
first_trading_day=first_trading_day,
equity_minute_reader=bundle_data.equity_minute_bar_reader,
equity_daily_reader=bundle_data.equity_daily_bar_reader,
adjustment_reader=bundle_data.adjustment_reader,
)
pipeline_loader = USEquityPricingLoader.without_fx(
bundle_data.equity_daily_bar_reader,
bundle_data.adjustment_reader,
)
def choose_loader(column):
if column in USEquityPricing.columns:
return pipeline_loader
raise ValueError(
"No PipelineLoader registered for column %s." % column
)
if isinstance(metrics_set, six.string_types):
try:
metrics_set = metrics.load(metrics_set)
except ValueError as e:
raise _RunAlgoError(str(e))
if isinstance(blotter, six.string_types):
try:
blotter = load(Blotter, blotter)
except ValueError as e:
raise _RunAlgoError(str(e))
try:
perf = TradingAlgorithm(
namespace=namespace,
data_portal=data,
get_pipeline_loader=choose_loader,
trading_calendar=trading_calendar,
sim_params=SimulationParameters(
start_session=start,
end_session=end,
trading_calendar=trading_calendar,
capital_base=capital_base,
data_frequency=data_frequency,
),
metrics_set=metrics_set,
blotter=blotter,
benchmark_returns=benchmark_returns,
benchmark_sid=benchmark_sid,
**{
'initialize': initialize,
'handle_data': handle_data,
'before_trading_start': before_trading_start,
'analyze': analyze,
} if algotext is None else {
'algo_filename': getattr(algofile, 'name', '<algorithm>'),
'script': algotext,
}
).run()
except NoBenchmark:
raise _RunAlgoError(
(
'no ``benchmark_spec`` was provided and'
' ``zipline.api.set_benchmark`` was not called in'
' ``initialize``'
),
(
"neither '--benchmark-symbol' nor '--benchmark-sid' was"
" provided and ``zipline.api.set_benchmark`` was not called"
" in ``initialize``, did you mean to pass '--no-benchmark'"
),
)
if output == '-':
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
|
def _run(handle_data,
initialize,
before_trading_start,
analyze,
algofile,
algotext,
defines,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
environ,
blotter,
benchmark_spec):
"""Run a backtest for the given algorithm.
This is shared between the cli and :func:`zipline.run_algo`.
"""
bundle_data = bundles.load(
bundle,
environ,
bundle_timestamp,
)
if trading_calendar is None:
trading_calendar = get_calendar('XNYS')
# date parameter validation
if trading_calendar.session_distance(start, end) < 1:
raise _RunAlgoError(
'There are no trading days between %s and %s' % (
start.date(),
end.date(),
),
)
benchmark_sid, benchmark_returns = benchmark_spec.resolve(
asset_finder=bundle_data.asset_finder,
start_date=start,
end_date=end,
)
if algotext is not None:
if local_namespace:
ip = get_ipython() # noqa
namespace = ip.user_ns
else:
namespace = {}
for assign in defines:
try:
name, value = assign.split('=', 2)
except ValueError:
raise ValueError(
'invalid define %r, should be of the form name=value' %
assign,
)
try:
# evaluate in the same namespace so names may refer to
# eachother
namespace[name] = eval(value, namespace)
except Exception as e:
raise ValueError(
'failed to execute definition for name %r: %s' % (name, e),
)
elif defines:
raise _RunAlgoError(
'cannot pass define without `algotext`',
"cannot pass '-D' / '--define' without '-t' / '--algotext'",
)
else:
namespace = {}
if algofile is not None:
algotext = algofile.read()
if print_algo:
if PYGMENTS:
highlight(
algotext,
PythonLexer(),
TerminalFormatter(),
outfile=sys.stdout,
)
else:
click.echo(algotext)
first_trading_day = \
bundle_data.equity_minute_bar_reader.first_trading_day
data = DataPortal(
bundle_data.asset_finder,
trading_calendar=trading_calendar,
first_trading_day=first_trading_day,
equity_minute_reader=bundle_data.equity_minute_bar_reader,
equity_daily_reader=bundle_data.equity_daily_bar_reader,
adjustment_reader=bundle_data.adjustment_reader,
)
pipeline_loader = USEquityPricingLoader.without_fx(
bundle_data.equity_daily_bar_reader,
bundle_data.adjustment_reader,
)
def choose_loader(column):
if column in USEquityPricing.columns:
return pipeline_loader
raise ValueError(
"No PipelineLoader registered for column %s." % column
)
if isinstance(metrics_set, six.string_types):
try:
metrics_set = metrics.load(metrics_set)
except ValueError as e:
raise _RunAlgoError(str(e))
if isinstance(blotter, six.string_types):
try:
blotter = load(Blotter, blotter)
except ValueError as e:
raise _RunAlgoError(str(e))
try:
perf = TradingAlgorithm(
namespace=namespace,
data_portal=data,
get_pipeline_loader=choose_loader,
trading_calendar=trading_calendar,
sim_params=SimulationParameters(
start_session=start,
end_session=end,
trading_calendar=trading_calendar,
capital_base=capital_base,
data_frequency=data_frequency,
),
metrics_set=metrics_set,
blotter=blotter,
benchmark_returns=benchmark_returns,
benchmark_sid=benchmark_sid,
**{
'initialize': initialize,
'handle_data': handle_data,
'before_trading_start': before_trading_start,
'analyze': analyze,
} if algotext is None else {
'algo_filename': getattr(algofile, 'name', '<algorithm>'),
'script': algotext,
}
).run()
except NoBenchmark:
raise _RunAlgoError(
(
'No ``benchmark_spec`` was provided, and'
' ``zipline.api.set_benchmark`` was not called in'
' ``initialize``.'
),
(
"neither '--benchmark-symbol' nor '--benchmark-sid' was"
" provided and ``zipline.api.set_benchmark`` was not called"
" in ``initialize``, did you mean to pass '--no-benchmark'"
),
)
if output == '-':
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
|
30,553 |
def describe_vpcs_command(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config)
kwargs = {}
data = []
if args.get('filters') is not None:
kwargs.update({'Filters': parse_filter_field(args.get('filters'))})
if args.get('vpcIds') is not None:
kwargs.update({'VpcIds': parse_resource_ids(args.get('vpcIds'))})
response = client.describe_vpcs(**kwargs)
if len(response['Vpcs'] == 0):
demisto.results('No VPCs were found.')
return
for i, vpc in enumerate(response['Vpcs']):
data.append({
'CidrBlock': vpc['CidrBlock'],
'DhcpOptionsId': vpc['DhcpOptionsId'],
'State': vpc['State'],
'VpcId': vpc['VpcId'],
'InstanceTenancy': vpc['InstanceTenancy'],
'IsDefault': vpc['IsDefault'],
'Region': obj['_user_provided_options']['region_name'],
})
if 'Tags' in vpc:
for tag in vpc['Tags']:
data[i].update({
tag['Key']: tag['Value']
})
try:
output = json.dumps(response['Vpcs'], cls=DatetimeEncoder)
raw = json.loads(output)
raw[0].update({'Region': obj['_user_provided_options']['region_name']})
except ValueError as e:
return_error('Could not decode/encode the raw response - {err_msg}'.format(err_msg=e))
ec = {'AWS.EC2.Vpcs(val.VpcId === obj.VpcId)': raw}
human_readable = tableToMarkdown('AWS EC2 Vpcs', data)
return_outputs(human_readable, ec)
|
def describe_vpcs_command(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config)
kwargs = {}
data = []
if args.get('filters') is not None:
kwargs.update({'Filters': parse_filter_field(args.get('filters'))})
if args.get('vpcIds') is not None:
kwargs.update({'VpcIds': parse_resource_ids(args.get('vpcIds'))})
response = client.describe_vpcs(**kwargs)
if len(response['Vpcs']) == 0:
demisto.results('No VPCs were found.')
return
for i, vpc in enumerate(response['Vpcs']):
data.append({
'CidrBlock': vpc['CidrBlock'],
'DhcpOptionsId': vpc['DhcpOptionsId'],
'State': vpc['State'],
'VpcId': vpc['VpcId'],
'InstanceTenancy': vpc['InstanceTenancy'],
'IsDefault': vpc['IsDefault'],
'Region': obj['_user_provided_options']['region_name'],
})
if 'Tags' in vpc:
for tag in vpc['Tags']:
data[i].update({
tag['Key']: tag['Value']
})
try:
output = json.dumps(response['Vpcs'], cls=DatetimeEncoder)
raw = json.loads(output)
raw[0].update({'Region': obj['_user_provided_options']['region_name']})
except ValueError as e:
return_error('Could not decode/encode the raw response - {err_msg}'.format(err_msg=e))
ec = {'AWS.EC2.Vpcs(val.VpcId === obj.VpcId)': raw}
human_readable = tableToMarkdown('AWS EC2 Vpcs', data)
return_outputs(human_readable, ec)
|
6,669 |
def execute():
if "education" in frappe.get_installed_apps():
return
frappe.delete_doc("Workspace", "Education", ignore_missing=True, force=True)
pages = frappe.get_all("Page", {"module": "education"}, pluck="name")
for page in pages:
frappe.delete_doc("Page", page, ignore_missing=True, force=True)
reports = frappe.get_all("Report", {"module": "education", "is_standard": "Yes"}, pluck="name")
for report in reports:
frappe.delete_doc("Report", report, ignore_missing=True, force=True)
print_formats = frappe.get_all(
"Print Format", {"module": "education", "standard": "Yes"}, pluck="name"
)
for print_format in print_formats:
frappe.delete_doc("Print Format", print_format, ignore_missing=True, force=True)
frappe.reload_doc("website", "doctype", "website_settings")
forms = frappe.get_all("Web Form", {"module": "education", "is_standard": 1}, pluck="name")
for form in forms:
frappe.delete_doc("Web Form", form, ignore_missing=True, force=True)
dashboards = frappe.get_all("Dashboard", {"module": "education", "is_standard": 1}, pluck="name")
for dashboard in dashboards:
frappe.delete_doc("Dashboard", dashboard, ignore_missing=True, force=True)
dashboards = frappe.get_all(
"Dashboard Chart", {"module": "education", "is_standard": 1}, pluck="name"
)
for dashboard in dashboards:
frappe.delete_doc("Dashboard Chart", dashboard, ignore_missing=True, force=True)
frappe.reload_doc("desk", "doctype", "number_card")
cards = frappe.get_all("Number Card", {"module": "education", "is_standard": 1}, pluck="name")
for card in cards:
frappe.delete_doc("Number Card", card, ignore_missing=True, force=True)
doctypes = frappe.get_all("DocType", {"module": "education", "custom": 0}, pluck="name")
for doctype in doctypes:
frappe.delete_doc("DocType", doctype, ignore_missing=True)
frappe.delete_doc("Module Def", "Education", ignore_missing=True, force=True)
click.secho(
"Education Module is moved to a separate app"
"Please install the app to continue using the module: https://github.com/frappe/healthcare",
fg="yellow",
)
|
def execute():
if "education" in frappe.get_installed_apps():
return
frappe.delete_doc("Workspace", "Education", ignore_missing=True, force=True)
pages = frappe.get_all("Page", {"module": "education"}, pluck="name")
for page in pages:
frappe.delete_doc("Page", page, ignore_missing=True, force=True)
reports = frappe.get_all("Report", {"module": "education", "is_standard": "Yes"}, pluck="name")
for report in reports:
frappe.delete_doc("Report", report, ignore_missing=True, force=True)
print_formats = frappe.get_all(
"Print Format", {"module": "education", "standard": "Yes"}, pluck="name"
)
for print_format in print_formats:
frappe.delete_doc("Print Format", print_format, ignore_missing=True, force=True)
frappe.reload_doc("website", "doctype", "website_settings")
forms = frappe.get_all("Web Form", {"module": "education", "is_standard": 1}, pluck="name")
for form in forms:
frappe.delete_doc("Web Form", form, ignore_missing=True, force=True)
dashboards = frappe.get_all("Dashboard", {"module": "education", "is_standard": 1}, pluck="name")
for dashboard in dashboards:
frappe.delete_doc("Dashboard", dashboard, ignore_missing=True, force=True)
dashboards = frappe.get_all(
"Dashboard Chart", {"module": "education", "is_standard": 1}, pluck="name"
)
for dashboard in dashboards:
frappe.delete_doc("Dashboard Chart", dashboard, ignore_missing=True, force=True)
frappe.reload_doc("desk", "doctype", "number_card")
cards = frappe.get_all("Number Card", {"module": "education", "is_standard": 1}, pluck="name")
for card in cards:
frappe.delete_doc("Number Card", card, ignore_missing=True, force=True)
doctypes = frappe.get_all("DocType", {"module": "education", "custom": 0}, pluck="name")
for doctype in doctypes:
frappe.delete_doc("DocType", doctype, ignore_missing=True)
frappe.delete_doc("Module Def", "Education", ignore_missing=True, force=True)
click.secho(
"Education Module is moved to a separate app"
"Please install the app to continue using the module: https://github.com/frappe/education",
fg="yellow",
)
|
7,359 |
def moments_hu(nu):
"""Calculate Hu's set of image moments (2D-only).
Note that this set of moments is translation, scale and
rotation invariant.
Parameters
----------
nu : (M, M) array
Normalized central image moments, where M must be >= 4.
Returns
-------
nu : (7,) array
Hu's set of image moments.
References
----------
.. [1] M. K. Hu, "Visual Pattern Recognition by Moment Invariants",
IRE Trans. Info. Theory, vol. IT-8, pp. 179-187, 1962
.. [2] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [3] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [4] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [5] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> image = np.zeros((20, 20), dtype=np.double)
>>> image[13:17, 13:17] = 0.5
>>> image[10:12, 10:12] = 1
>>> mu = moments_central(image)
>>> nu = moments_normalized(mu)
>>> moments_hu(nu)
array([7.45370370e-01, 3.51165981e-01, 1.04049179e-01, 4.06442107e-02,
2.64312299e-03, 2.40854582e-02, 4.33680869e-19])
"""
dtype = np.float32 if nu.dtype == 'float32' else np.float64
return _moments_cy.moments_hu(nu.astype(dtype, copy=False))
|
def moments_hu(nu):
"""Calculate Hu's set of image moments (2D-only).
Note that this set of moments is proved to be translation, scale and
rotation invariant.
Parameters
----------
nu : (M, M) array
Normalized central image moments, where M must be >= 4.
Returns
-------
nu : (7,) array
Hu's set of image moments.
References
----------
.. [1] M. K. Hu, "Visual Pattern Recognition by Moment Invariants",
IRE Trans. Info. Theory, vol. IT-8, pp. 179-187, 1962
.. [2] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [3] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [4] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [5] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> image = np.zeros((20, 20), dtype=np.double)
>>> image[13:17, 13:17] = 0.5
>>> image[10:12, 10:12] = 1
>>> mu = moments_central(image)
>>> nu = moments_normalized(mu)
>>> moments_hu(nu)
array([7.45370370e-01, 3.51165981e-01, 1.04049179e-01, 4.06442107e-02,
2.64312299e-03, 2.40854582e-02, 4.33680869e-19])
"""
dtype = np.float32 if nu.dtype == 'float32' else np.float64
return _moments_cy.moments_hu(nu.astype(dtype, copy=False))
|
51,465 |
def remove_duplicates(entrypoints):
# sort and group entrypoints by name
entrypoints = sorted(entrypoints, key=lambda ep: ep.name)
entrypoints_grouped = itertools.groupby(entrypoints, key=lambda ep: ep.name)
# check if there are multiple entrypoints for the same name
unique_entrypoints = []
for name, matches in entrypoints_grouped:
matches = list(matches)
unique_entrypoints.append(matches[0])
matches_len = len(matches)
if matches_len > 1:
selected_module_name = matches[0].name
all_module_names = [e.name for e in matches]
warnings.warn(
f"Found {matches_len} entrypoints for the engine name {name}:"
f"\n {all_module_names}.\n It will be used: {selected_module_name}.",
RuntimeWarning,
)
return unique_entrypoints
|
def remove_duplicates(entrypoints):
# sort and group entrypoints by name
entrypoints = sorted(entrypoints, key=lambda ep: ep.name)
entrypoints_grouped = itertools.groupby(entrypoints, key=lambda ep: ep.name)
# check if there are multiple entrypoints for the same name
unique_entrypoints = []
for name, matches in entrypoints_grouped:
matches = list(matches)
unique_entrypoints.append(matches[0])
matches_len = len(matches)
if matches_len > 1:
selected_module_name = matches[0].value
all_module_names = [e.value for e in matches]
warnings.warn(
f"Found {matches_len} entrypoints for the engine name {name}:"
f"\n {all_module_names}.\n It will be used: {selected_module_name}.",
RuntimeWarning,
)
return unique_entrypoints
|
30,085 |
def test_describe_bug_905():
# Explicitly test https://github.com/dib-lab/sourmash/issues/905
with utils.TempDirectory() as location:
testdata1 = utils.get_test_data('protein_905.sig')
status, out, err = utils.runscript('sourmash',
['signature', 'describe',
testdata1],
in_directory=location)
expected_output == """== This is sourmash version 3.2.2. ==
== Please cite Brown and Irber (2016), doi:10.21105/joss.00027. ==
loaded 3 signatures total.
---
signature filename: test.sig
signature: test.prot
source file: test.prot
md5: 57ae47c24a08a24b630dae80f0c6e256
k=11 molecule=protein num=0 scaled=20 seed=42 track_abundance=1
size: 34
signature license: CC0
---
signature filename: test.sig
signature: test.prot
source file: test.prot
md5: 407a774f7a2e13c650ff67255d2056f8
k=11 molecule=dayhoff num=0 scaled=20 seed=42 track_abundance=1
size: 4
signature license: CC0
---
signature filename: test.sig
signature: test.prot
source file: test.prot
md5: 72560192a7f4c23f0b4d71b3eaaa44db
k=11 molecule=hp num=0 scaled=20 seed=42 track_abundance=1
size: 2
signature license: CC0"""
print(out)
print(err)
assert status == 0
# Add final trailing slash for this OS
testdata_dirname = os.path.dirname(testdata1) + os.sep
location = c.location + os.sep
for line in out.splitlines():
cleaned_line = line.strip().replace(
testdata_dirname, '').replace(location, '')
assert cleaned_line in expected_output
|
def test_describe_bug_905():
# Explicitly test https://github.com/dib-lab/sourmash/issues/905
with utils.TempDirectory() as location:
testdata1 = utils.get_test_data('protein_905.sig')
status, out, err = utils.runscript('sourmash',
['signature', 'describe',
testdata1],
in_directory=location)
expected_output = """
== Please cite Brown and Irber (2016), doi:10.21105/joss.00027. ==
loaded 3 signatures total.
---
signature filename: test.sig
signature: test.prot
source file: test.prot
md5: 57ae47c24a08a24b630dae80f0c6e256
k=11 molecule=protein num=0 scaled=20 seed=42 track_abundance=1
size: 34
signature license: CC0
---
signature filename: test.sig
signature: test.prot
source file: test.prot
md5: 407a774f7a2e13c650ff67255d2056f8
k=11 molecule=dayhoff num=0 scaled=20 seed=42 track_abundance=1
size: 4
signature license: CC0
---
signature filename: test.sig
signature: test.prot
source file: test.prot
md5: 72560192a7f4c23f0b4d71b3eaaa44db
k=11 molecule=hp num=0 scaled=20 seed=42 track_abundance=1
size: 2
signature license: CC0"""
print(out)
print(err)
assert status == 0
# Add final trailing slash for this OS
testdata_dirname = os.path.dirname(testdata1) + os.sep
location = c.location + os.sep
for line in out.splitlines():
cleaned_line = line.strip().replace(
testdata_dirname, '').replace(location, '')
assert cleaned_line in expected_output
|
31,181 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# get Acalvio API Server url
base_url = demisto.params()['url']
# get Acalvio API Key
apikey = demisto.params()['apikey']
# check if SSL is to be verified
verify_certificate = demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
# set the headers
headers = {
'api_key': apikey,
'content-type': 'application/json'
}
demisto.log(f'Command being called is \'{demisto.command()}\'')
result = None
acalerror = AcalError()
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button
result, acalerror = do_test_connection(client)
elif demisto.command() == 'acalvio-is-deception-host':
result, acalerror = \
do_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-file':
result, acalerror = \
do_deception_file_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-user':
result, acalerror = \
do_deception_user_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-host':
result, acalerror = \
do_mute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-host':
result, acalerror = \
do_unmute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-on-endpoint':
result, acalerror = \
do_mute_deception_ep_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-on-endpoint':
result, acalerror = \
do_unmute_deception_ep_command(client, demisto.args())
# Log exceptions
except Exception as e:
acalerror = AcalError(message=f'Failed to execute \'{demisto.command()}\' command. Error: {str(e)}')
finally:
if result is not None:
return_results(result)
else:
if acalerror is None:
acalerror = AcalError()
return_error(message=acalerror.message,
error=acalerror.error,
outputs=acalerror.outputs)
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# get Acalvio API Server url
base_url = demisto.params()['url']
# get Acalvio API Key
apikey = demisto.params()['apikey']
# check if SSL is to be verified
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
# set the headers
headers = {
'api_key': apikey,
'content-type': 'application/json'
}
demisto.log(f'Command being called is \'{demisto.command()}\'')
result = None
acalerror = AcalError()
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button
result, acalerror = do_test_connection(client)
elif demisto.command() == 'acalvio-is-deception-host':
result, acalerror = \
do_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-file':
result, acalerror = \
do_deception_file_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-user':
result, acalerror = \
do_deception_user_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-host':
result, acalerror = \
do_mute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-host':
result, acalerror = \
do_unmute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-on-endpoint':
result, acalerror = \
do_mute_deception_ep_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-on-endpoint':
result, acalerror = \
do_unmute_deception_ep_command(client, demisto.args())
# Log exceptions
except Exception as e:
acalerror = AcalError(message=f'Failed to execute \'{demisto.command()}\' command. Error: {str(e)}')
finally:
if result is not None:
return_results(result)
else:
if acalerror is None:
acalerror = AcalError()
return_error(message=acalerror.message,
error=acalerror.error,
outputs=acalerror.outputs)
|
40,051 |
def get_context(
project_config: Optional[Union["DataContextConfig", dict]] = None,
context_root_dir: Optional[str] = None,
runtime_environment: Optional[dict] = None,
ge_cloud_base_url: Optional[str] = None,
ge_cloud_access_token: Optional[str] = None,
ge_cloud_organization_id: Optional[str] = None,
ge_cloud_mode: Optional[bool] = False,
) -> Union["DataContext", "BaseDataContext", "CloudDataContext"]:
"""
Method to return the appropriate DataContext depending on parameters and environment.
Usage:
import great_expectations as gx
my_context = gx.get_context([parameters])
1. If gx.get_context() is run in a filesystem where `great_expectations init` has been run, then it will return a
DataContext
2. If gx.get_context() is passed in a `context_root_dir` (which contains great_expectations.yml) then it will return
a DataContext
3. If gx.get_context() is passed in an in-memory `project_config` then it will return BaseDataContext.
`context_root_dir` can also be passed in, but the configurations from the in-memory config will override the
configurations in the `great_expectations.yml` file.
4. If GX is being run in the cloud, and the information needed for ge_cloud_config (ie ge_cloud_base_url,
ge_cloud_access_token, ge_cloud_organization_id) are passed in as parameters to get_context(), configured as
environment variables, or in a .conf file, then get_context() will return a CloudDataContext.
TODO: This method will eventually return FileDataContext and EphemeralDataContext, rather than DataContext and Base
Args:
project_config (dict or DataContextConfig): In-memory configuration for DataContext.
context_root_dir (str): Path to directory that contains great_expectations.yml file
runtime_environment (dict): A dictionary of values can be passed to a DataContext when it is instantiated.
These values will override both values from the config variables file and
from environment variables.
The following parameters are relevant when running ge_cloud
ge_cloud_base_url (str): url for ge_cloud endpoint.
ge_cloud_access_token (str): access_token for ge_cloud account.
ge_cloud_organization_id (str): org_id for ge_cloud account.
ge_cloud_mode (bool): bool flag to specify whether to run GE in cloud mode (default is False).
Returns:
DataContext. Either a DataContext, BaseDataContext, or CloudDataContext depending on environment and/or
parameters
"""
from great_expectations.data_context.data_context import (
BaseDataContext,
CloudDataContext,
DataContext,
)
if (
CloudDataContext.is_ge_cloud_config_available(
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_access_token=ge_cloud_access_token,
ge_cloud_organization_id=ge_cloud_organization_id,
)
or ge_cloud_mode
):
return CloudDataContext(
project_config=project_config,
runtime_environment=runtime_environment,
context_root_dir=context_root_dir,
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_access_token=ge_cloud_access_token,
ge_cloud_organization_id=ge_cloud_organization_id,
)
elif project_config is not None:
return BaseDataContext(
project_config=project_config,
context_root_dir=context_root_dir,
runtime_environment=runtime_environment,
)
else:
return DataContext(
context_root_dir=context_root_dir,
runtime_environment=runtime_environment,
)
|
def get_context(
project_config: Optional[Union["DataContextConfig", dict]] = None,
context_root_dir: Optional[str] = None,
runtime_environment: Optional[dict] = None,
ge_cloud_base_url: Optional[str] = None,
ge_cloud_access_token: Optional[str] = None,
ge_cloud_organization_id: Optional[str] = None,
ge_cloud_mode: bool = False,
) -> Union["DataContext", "BaseDataContext", "CloudDataContext"]:
"""
Method to return the appropriate DataContext depending on parameters and environment.
Usage:
import great_expectations as gx
my_context = gx.get_context([parameters])
1. If gx.get_context() is run in a filesystem where `great_expectations init` has been run, then it will return a
DataContext
2. If gx.get_context() is passed in a `context_root_dir` (which contains great_expectations.yml) then it will return
a DataContext
3. If gx.get_context() is passed in an in-memory `project_config` then it will return BaseDataContext.
`context_root_dir` can also be passed in, but the configurations from the in-memory config will override the
configurations in the `great_expectations.yml` file.
4. If GX is being run in the cloud, and the information needed for ge_cloud_config (ie ge_cloud_base_url,
ge_cloud_access_token, ge_cloud_organization_id) are passed in as parameters to get_context(), configured as
environment variables, or in a .conf file, then get_context() will return a CloudDataContext.
TODO: This method will eventually return FileDataContext and EphemeralDataContext, rather than DataContext and Base
Args:
project_config (dict or DataContextConfig): In-memory configuration for DataContext.
context_root_dir (str): Path to directory that contains great_expectations.yml file
runtime_environment (dict): A dictionary of values can be passed to a DataContext when it is instantiated.
These values will override both values from the config variables file and
from environment variables.
The following parameters are relevant when running ge_cloud
ge_cloud_base_url (str): url for ge_cloud endpoint.
ge_cloud_access_token (str): access_token for ge_cloud account.
ge_cloud_organization_id (str): org_id for ge_cloud account.
ge_cloud_mode (bool): bool flag to specify whether to run GE in cloud mode (default is False).
Returns:
DataContext. Either a DataContext, BaseDataContext, or CloudDataContext depending on environment and/or
parameters
"""
from great_expectations.data_context.data_context import (
BaseDataContext,
CloudDataContext,
DataContext,
)
if (
CloudDataContext.is_ge_cloud_config_available(
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_access_token=ge_cloud_access_token,
ge_cloud_organization_id=ge_cloud_organization_id,
)
or ge_cloud_mode
):
return CloudDataContext(
project_config=project_config,
runtime_environment=runtime_environment,
context_root_dir=context_root_dir,
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_access_token=ge_cloud_access_token,
ge_cloud_organization_id=ge_cloud_organization_id,
)
elif project_config is not None:
return BaseDataContext(
project_config=project_config,
context_root_dir=context_root_dir,
runtime_environment=runtime_environment,
)
else:
return DataContext(
context_root_dir=context_root_dir,
runtime_environment=runtime_environment,
)
|
45,867 |
def tiltProjection(taux: torch.Tensor, tauy: torch.Tensor, inv: bool = False) -> torch.Tensor:
r"""Estimate the tilt projection matrix or the inverse tilt projection matrix
Args:
taux (torch.Tensor): Rotation angle in radians around the :math:`x`-axis with shape :math:`(*, 1)`.
tauy (torch.Tensor): Rotation angle in radians around the :math:`y`-axis with shape :math:`(*, 1)`.
inv (bool): False to obtain the the tilt projection matrix. False for the inverse matrix
Returns:
torch.Tensor: Inverse tilt projection matrix with shape :math:`(*, 3, 3)`.
"""
assert taux.dim() == tauy.dim()
assert taux.numel() == tauy.numel()
ndim = taux.dim()
taux = taux.reshape(-1)
tauy = tauy.reshape(-1)
cTx = torch.cos(taux)
sTx = torch.sin(taux)
cTy = torch.cos(tauy)
sTy = torch.sin(tauy)
zero = torch.zeros_like(cTx)
one = torch.ones_like(cTx)
Rx = torch.stack([one, zero, zero, zero, cTx, sTx, zero, -sTx, cTx], -1).reshape(-1, 3, 3)
Ry = torch.stack([cTy, zero, -sTy, zero, one, zero, sTy, zero, cTy], -1).reshape(-1, 3, 3)
R = Ry @ Rx
if inv:
invR22 = 1 / R[..., 2, 2]
invPz = torch.stack(
[invR22, zero, R[..., 0, 2] * invR22,
zero, invR22, R[..., 1, 2] * invR22,
zero, zero, one], -1
).reshape(-1, 3, 3)
invTilt = R.transpose(-1, -2) @ invPz
if ndim == 0:
invTilt = torch.squeeze(invTilt)
return invTilt
else:
Pz = torch.stack(
[R[..., 2, 2], zero, -R[..., 0, 2],
zero, R[..., 2, 2], -R[..., 1, 2],
zero, zero, one], -1
).reshape(-1, 3, 3)
tilt = Pz @ R.transpose(-1, -2)
if ndim == 0:
tilt = torch.squeeze(tilt)
return tilt
|
def tilt_projection(taux: torch.Tensor, tauy: torch.Tensor, return_inverse: bool = False) -> torch.Tensor:
r"""Estimate the tilt projection matrix or the inverse tilt projection matrix
Args:
taux (torch.Tensor): Rotation angle in radians around the :math:`x`-axis with shape :math:`(*, 1)`.
tauy (torch.Tensor): Rotation angle in radians around the :math:`y`-axis with shape :math:`(*, 1)`.
inv (bool): False to obtain the the tilt projection matrix. False for the inverse matrix
Returns:
torch.Tensor: Inverse tilt projection matrix with shape :math:`(*, 3, 3)`.
"""
assert taux.dim() == tauy.dim()
assert taux.numel() == tauy.numel()
ndim = taux.dim()
taux = taux.reshape(-1)
tauy = tauy.reshape(-1)
cTx = torch.cos(taux)
sTx = torch.sin(taux)
cTy = torch.cos(tauy)
sTy = torch.sin(tauy)
zero = torch.zeros_like(cTx)
one = torch.ones_like(cTx)
Rx = torch.stack([one, zero, zero, zero, cTx, sTx, zero, -sTx, cTx], -1).reshape(-1, 3, 3)
Ry = torch.stack([cTy, zero, -sTy, zero, one, zero, sTy, zero, cTy], -1).reshape(-1, 3, 3)
R = Ry @ Rx
if inv:
invR22 = 1 / R[..., 2, 2]
invPz = torch.stack(
[invR22, zero, R[..., 0, 2] * invR22,
zero, invR22, R[..., 1, 2] * invR22,
zero, zero, one], -1
).reshape(-1, 3, 3)
invTilt = R.transpose(-1, -2) @ invPz
if ndim == 0:
invTilt = torch.squeeze(invTilt)
return invTilt
else:
Pz = torch.stack(
[R[..., 2, 2], zero, -R[..., 0, 2],
zero, R[..., 2, 2], -R[..., 1, 2],
zero, zero, one], -1
).reshape(-1, 3, 3)
tilt = Pz @ R.transpose(-1, -2)
if ndim == 0:
tilt = torch.squeeze(tilt)
return tilt
|
34,802 |
def test_default_predict__excludes_rejected_action(
default_ensemble: DefaultPolicyPredictionEnsemble,
):
domain = Domain.load("data/test_domains/default.yml")
excluded_action = domain.action_names_or_texts[0]
tracker = DialogueStateTracker.from_events(
sender_id="arbitrary",
evts=[
UserUttered("hi"),
ActionExecuted(excluded_action),
ActionExecutionRejected(excluded_action), # not "Rejection"
],
)
num_actions = len(domain.action_names_or_texts)
predictions = [
PolicyPrediction2(
policy_name=str(idx), probabilities=[1.0] * num_actions, policy_priority=idx
)
for idx in range(2)
]
index_of_excluded_action = domain.index_for_action(excluded_action)
prediction = default_ensemble.predict(
predictions=predictions, domain=domain, tracker=tracker
)
assert prediction.probabilities[index_of_excluded_action] == 0.0
|
def test_default_predict_excludes_rejected_action(
default_ensemble: DefaultPolicyPredictionEnsemble,
):
domain = Domain.load("data/test_domains/default.yml")
excluded_action = domain.action_names_or_texts[0]
tracker = DialogueStateTracker.from_events(
sender_id="arbitrary",
evts=[
UserUttered("hi"),
ActionExecuted(excluded_action),
ActionExecutionRejected(excluded_action), # not "Rejection"
],
)
num_actions = len(domain.action_names_or_texts)
predictions = [
PolicyPrediction2(
policy_name=str(idx), probabilities=[1.0] * num_actions, policy_priority=idx
)
for idx in range(2)
]
index_of_excluded_action = domain.index_for_action(excluded_action)
prediction = default_ensemble.predict(
predictions=predictions, domain=domain, tracker=tracker
)
assert prediction.probabilities[index_of_excluded_action] == 0.0
|
10,414 |
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str'), # R S
network_configuration=dict(required=False, type='dict', options=dict(
subnets=dict(type='list'),
security_groups=dict(type='list'),
assign_public_ip=dict(type='bool')
)),
launch_type=dict(required=False, choices=['EC2', 'FARGATE'])
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
# Validate Inputs
if module.params['operation'] == 'run':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if 'container_instances' not in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if 'task' not in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'],
module.params['launch_type'])
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by']
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
|
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str'), # R S
network_configuration=dict(required=False, type='dict', options=dict(
subnets=dict(type='list'),
security_groups=dict(type='list', elements='str'),
assign_public_ip=dict(type='bool')
)),
launch_type=dict(required=False, choices=['EC2', 'FARGATE'])
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
# Validate Inputs
if module.params['operation'] == 'run':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if 'container_instances' not in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if 'task' not in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'],
module.params['launch_type'])
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by']
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
|
33,561 |
def _do_policy_eval(tf_sess, to_eval, policies, active_episodes, clip_actions):
"""Call compute actions on observation batches to get next actions.
Returns:
eval_results: dict of policy to compute_action() outputs.
"""
eval_results = {}
if tf_sess:
builder = TFRunBuilder(tf_sess, "policy_eval")
pending_fetches = {}
else:
builder = None
for policy_id, eval_data in to_eval.items():
rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])
policy = _get_or_raise(policies, policy_id)
if builder and (policy.compute_actions.__code__ is
TFPolicyGraph.compute_actions.__code__):
pending_fetches[policy_id] = policy.build_compute_actions(
builder, [t.obs for t in eval_data],
rnn_in_cols,
prev_action_batch=[t.prev_action for t in eval_data],
prev_reward_batch=[t.prev_reward for t in eval_data])
else:
eval_results[policy_id] = policy.compute_actions(
[t.obs for t in eval_data],
rnn_in_cols,
prev_action_batch=[t.prev_action for t in eval_data],
prev_reward_batch=[t.prev_reward for t in eval_data],
episodes=[active_episodes[t.env_id] for t in eval_data])
if builder:
for k, v in pending_fetches.items():
eval_results[k] = builder.get(v)
if clip_actions:
for policy_id, actions in eval_results.items():
policy = _get_or_raise(policies, policy_id)
actions, rnn_out_cols, pi_info_cols = eval_results[policy_id]
eval_results[policy_id] = (_clip_actions(
actions, policy.action_space), rnn_out_cols, pi_info_cols)
return eval_results
|
def _do_policy_eval(tf_sess, to_eval, policies, active_episodes, clip_actions):
"""Call compute actions on observation batches to get next actions.
Returns:
eval_results: dict of policy to compute_action() outputs.
"""
eval_results = {}
if tf_sess:
builder = TFRunBuilder(tf_sess, "policy_eval")
pending_fetches = {}
else:
builder = None
for policy_id, eval_data in to_eval.items():
rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])
policy = _get_or_raise(policies, policy_id)
if builder and (policy.compute_actions.__code__ is
TFPolicyGraph.compute_actions.__code__):
pending_fetches[policy_id] = policy.build_compute_actions(
builder, [t.obs for t in eval_data],
rnn_in_cols,
prev_action_batch=[t.prev_action for t in eval_data],
prev_reward_batch=[t.prev_reward for t in eval_data])
else:
eval_results[policy_id] = policy.compute_actions(
[t.obs for t in eval_data],
rnn_in_cols,
prev_action_batch=[t.prev_action for t in eval_data],
prev_reward_batch=[t.prev_reward for t in eval_data],
episodes=[active_episodes[t.env_id] for t in eval_data])
if builder:
for k, v in pending_fetches.items():
eval_results[k] = builder.get(v)
if clip_actions:
for policy_id, results in eval_results.items():
policy = _get_or_raise(policies, policy_id)
actions, rnn_out_cols, pi_info_cols = eval_results[policy_id]
eval_results[policy_id] = (_clip_actions(
actions, policy.action_space), rnn_out_cols, pi_info_cols)
return eval_results
|
50,484 |
def main(args=None):
parser = create_argument_parser()
cli_options = parser.parse_args(args=args)
# load the config
cfg_name = find_config_name(cli_options)
cfg_options = {}
if cfg_name is not None:
with io.open(cfg_name, encoding='UTF-8') as cfg_file:
cfg_options = parse_config_into_dict(
parse_config_file(cfg_file, filename=cfg_name))
options_dict = merge_options_and_set_defaults(
[cfg_options, cli_options.__dict__])
options = Options(**options_dict)
logger = Logger(options.verbose)
if sys.version_info < (3, 8) and options.use_canonical_paths:
logger.warn("--use_canonical_paths will be ignored due to incompatible Python version.")
options.use_canonical_paths = None
if cli_options.version:
logger.msg(
"gcovr {version}\n"
"\n"
"{copyright}",
version=__version__, copyright=COPYRIGHT)
sys.exit(0)
if options.html_title == '':
logger.error(
"an empty --html_title= is not allowed.")
sys.exit(1)
if options.html_medium_threshold == 0:
logger.error(
"value of --html-medium-threshold= should not be zero.")
sys.exit(1)
if options.html_medium_threshold > options.html_high_threshold:
logger.error(
"value of --html-medium-threshold={} should be\n"
"lower than or equal to the value of --html-high-threshold={}.",
options.html_medium_threshold, options.html_high_threshold)
sys.exit(1)
if options.html_tab_size < 1:
logger.error(
"value of --html-tab-size= should be greater 0.")
sys.exit(1)
potential_html_output = (
(options.html and options.html.value)
or (options.html_details and options.html_details.value)
or (options.output and options.output.value))
if options.html_details and not potential_html_output:
logger.error(
"a named output must be given, if the option --html-details\n"
"is used.")
sys.exit(1)
if options.html_self_contained is False and not potential_html_output:
logger.error(
"can only disable --html-self-contained when a named output is given.")
sys.exit(1)
if options.objdir is not None:
if not options.objdir:
logger.error(
"empty --object-directory option.\n"
"\tThis option specifies the path to the object file "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
while os.sep + os.sep in tmp:
tmp = tmp.replace(os.sep + os.sep, os.sep)
if normpath(options.objdir) != tmp:
logger.warn(
"relative referencing in --object-directory.\n"
"\tthis could cause strange errors when gcovr attempts to\n"
"\tidentify the original gcc working directory.")
if not os.path.exists(normpath(options.objdir)):
logger.error(
"Bad --object-directory option.\n"
"\tThe specified directory does not exist.")
sys.exit(1)
if options.use_canonical_paths:
canonical_objdir = os.path.realpath(options.objdir)
if canonical_objdir != options.objdir:
options.objdir = canonical_objdir
logger.msg(f"--object-directory has been normalized to {options.objdir}.")
options.starting_dir = os.path.abspath(os.getcwd())
if options.use_canonical_paths:
canonical_starting_dir = os.path.realpath(options.starting_dir)
if canonical_starting_dir != options.starting_dir:
options.starting_dir = canonical_starting_dir
logger.msg(f"starting_dir has been normalized to {options.starting_dir}.")
if not options.root:
logger.error(
"empty --root option.\n"
"\tRoot specifies the path to the root "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
options.root_dir = os.path.abspath(options.root)
if options.use_canonical_paths:
canonical_root = os.path.realpath(options.root)
if canonical_root != options.root:
options.root = canonical_root
logger.msg(f"--root has been normalized to {options.root}.")
canonical_rootdir = os.path.realpath(options.root_dir)
if canonical_rootdir != options.root_dir:
options.root_dir = canonical_rootdir
logger.msg(f"root_dir has been normalized to {options.root_dir}.")
#
# Setup filters
#
# The root filter isn't technically a filter,
# but is used to turn absolute paths into relative paths
options.root_filter = re.compile('^' + re.escape(options.root_dir + os.sep))
if options.exclude_dirs is not None:
options.exclude_dirs = [
f.build_filter(logger, options.use_canonical_paths) for f in options.exclude_dirs]
options.exclude = [f.build_filter(logger, options.use_canonical_paths) for f in options.exclude]
options.filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.filter]
if not options.filter:
options.filter = [DirectoryPrefixFilter(options.root_dir)]
options.gcov_exclude = [
f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_exclude]
options.gcov_filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_filter]
if not options.gcov_filter:
options.gcov_filter = [AlwaysMatchFilter()]
# Output the filters for debugging
for name, filters in [
('--root', [options.root_filter]),
('--filter', options.filter),
('--exclude', options.exclude),
('--gcov-filter', options.gcov_filter),
('--gcov-exclude', options.gcov_exclude),
('--exclude-directories', options.exclude_dirs),
]:
logger.verbose_msg('Filters for {}: ({})', name, len(filters))
for f in filters:
logger.verbose_msg('- {}', f)
if options.exclude_lines_by_pattern:
try:
re.compile(options.exclude_lines_by_pattern)
except re.error as e:
logger.error(
"--exclude-lines-by-pattern: "
"Invalid regular expression: {}, error: {}",
repr(options.exclude_lines_by_pattern), e)
sys.exit(1)
covdata = dict()
if options.add_tracefile:
collect_coverage_from_tracefiles(covdata, options, logger)
else:
collect_coverage_from_gcov(covdata, options, logger)
logger.verbose_msg("Gathered coveraged data for {} files", len(covdata))
# Print reports
error_occurred = print_reports(covdata, options, logger)
if error_occurred:
logger.error(
"Error occurred while printing reports"
)
sys.exit(7)
if options.fail_under_line > 0.0 or options.fail_under_branch > 0.0:
fail_under(covdata, options.fail_under_line, options.fail_under_branch, logger)
|
def main(args=None):
parser = create_argument_parser()
cli_options = parser.parse_args(args=args)
# load the config
cfg_name = find_config_name(cli_options)
cfg_options = {}
if cfg_name is not None:
with io.open(cfg_name, encoding='UTF-8') as cfg_file:
cfg_options = parse_config_into_dict(
parse_config_file(cfg_file, filename=cfg_name))
options_dict = merge_options_and_set_defaults(
[cfg_options, cli_options.__dict__])
options = Options(**options_dict)
logger = Logger(options.verbose)
if sys.version_info < (3, 8) and options.use_canonical_paths:
logger.warn("--use_canonical_paths will be ignored due to incompatible Python version.")
options.use_canonical_paths = None
if cli_options.version:
logger.msg(
"gcovr {version}\n"
"\n"
"{copyright}",
version=__version__, copyright=COPYRIGHT)
sys.exit(0)
if options.html_title == '':
logger.error(
"an empty --html_title= is not allowed.")
sys.exit(1)
if options.html_medium_threshold == 0:
logger.error(
"value of --html-medium-threshold= should not be zero.")
sys.exit(1)
if options.html_medium_threshold > options.html_high_threshold:
logger.error(
"value of --html-medium-threshold={} should be\n"
"lower than or equal to the value of --html-high-threshold={}.",
options.html_medium_threshold, options.html_high_threshold)
sys.exit(1)
if options.html_tab_size < 1:
logger.error(
"value of --html-tab-size= should be greater 0.")
sys.exit(1)
potential_html_output = (
(options.html and options.html.value)
or (options.html_details and options.html_details.value)
or (options.output and options.output.value))
if options.html_details and not potential_html_output:
logger.error(
"a named output must be given, if the option --html-details\n"
"is used.")
sys.exit(1)
if options.html_self_contained is False and not potential_html_output:
logger.error(
"can only disable --html-self-contained when a named output is given.")
sys.exit(1)
if options.objdir is not None:
if not options.objdir:
logger.error(
"empty --object-directory option.\n"
"\tThis option specifies the path to the object file "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
while os.sep + os.sep in tmp:
tmp = tmp.replace(os.sep + os.sep, os.sep)
if normpath(options.objdir) != tmp:
logger.warn(
"relative referencing in --object-directory.\n"
"\tthis could cause strange errors when gcovr attempts to\n"
"\tidentify the original gcc working directory.")
if not os.path.exists(normpath(options.objdir)):
logger.error(
"Bad --object-directory option.\n"
"\tThe specified directory does not exist.")
sys.exit(1)
if canonical_path:
options.objdir = canonical_path(options.objdir, "--object-dir")
options.starting_dir = os.path.abspath(os.getcwd())
if options.use_canonical_paths:
canonical_starting_dir = os.path.realpath(options.starting_dir)
if canonical_starting_dir != options.starting_dir:
options.starting_dir = canonical_starting_dir
logger.msg(f"starting_dir has been normalized to {options.starting_dir}.")
if not options.root:
logger.error(
"empty --root option.\n"
"\tRoot specifies the path to the root "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
options.root_dir = os.path.abspath(options.root)
if options.use_canonical_paths:
canonical_root = os.path.realpath(options.root)
if canonical_root != options.root:
options.root = canonical_root
logger.msg(f"--root has been normalized to {options.root}.")
canonical_rootdir = os.path.realpath(options.root_dir)
if canonical_rootdir != options.root_dir:
options.root_dir = canonical_rootdir
logger.msg(f"root_dir has been normalized to {options.root_dir}.")
#
# Setup filters
#
# The root filter isn't technically a filter,
# but is used to turn absolute paths into relative paths
options.root_filter = re.compile('^' + re.escape(options.root_dir + os.sep))
if options.exclude_dirs is not None:
options.exclude_dirs = [
f.build_filter(logger, options.use_canonical_paths) for f in options.exclude_dirs]
options.exclude = [f.build_filter(logger, options.use_canonical_paths) for f in options.exclude]
options.filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.filter]
if not options.filter:
options.filter = [DirectoryPrefixFilter(options.root_dir)]
options.gcov_exclude = [
f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_exclude]
options.gcov_filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_filter]
if not options.gcov_filter:
options.gcov_filter = [AlwaysMatchFilter()]
# Output the filters for debugging
for name, filters in [
('--root', [options.root_filter]),
('--filter', options.filter),
('--exclude', options.exclude),
('--gcov-filter', options.gcov_filter),
('--gcov-exclude', options.gcov_exclude),
('--exclude-directories', options.exclude_dirs),
]:
logger.verbose_msg('Filters for {}: ({})', name, len(filters))
for f in filters:
logger.verbose_msg('- {}', f)
if options.exclude_lines_by_pattern:
try:
re.compile(options.exclude_lines_by_pattern)
except re.error as e:
logger.error(
"--exclude-lines-by-pattern: "
"Invalid regular expression: {}, error: {}",
repr(options.exclude_lines_by_pattern), e)
sys.exit(1)
covdata = dict()
if options.add_tracefile:
collect_coverage_from_tracefiles(covdata, options, logger)
else:
collect_coverage_from_gcov(covdata, options, logger)
logger.verbose_msg("Gathered coveraged data for {} files", len(covdata))
# Print reports
error_occurred = print_reports(covdata, options, logger)
if error_occurred:
logger.error(
"Error occurred while printing reports"
)
sys.exit(7)
if options.fail_under_line > 0.0 or options.fail_under_branch > 0.0:
fail_under(covdata, options.fail_under_line, options.fail_under_branch, logger)
|
7,154 |
def _handle_input(image, selem, out, mask, out_dtype=None, pixel_size=1):
"""Preprocess and verify input for filters.rank methods.
Parameters
----------
image : 2-D array (integer, float or boolean)
Input image.
selem : 2-D array (integer, float or boolean)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer, float or boolean)
If None, a new array is allocated.
mask : ndarray (integer, float or boolean)
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
out_dtype : data-type
Desired output data-type. Default is None, which means we cast output
in input dtype.
pixel_size : int
Dimension of each pixel. Default value is 1.
Returns
-------
image : 2-D array (np.uint8 or np.uint16)
selem : 2-D array (np.uint8)
The neighborhood expressed as a binary 2-D array.
out : 3-D array (same dtype out_dtype or as input)
Output array. The two first dimensions are the spatial ones, the third
one is the pixel vector (length 1 by default).
mask : 2-D array (np.uint8)
Mask array that defines (>0) area of the image included in the local
neighborhood.
n_bins : int
Number of histogram bins.
out_dtype : data-type
Output data-type.
"""
assert_nD(image, 2)
if image.dtype not in (np.uint8, np.uint16, np.bool_):
message = ('Possible precision loss converting image of type {} to '
'uint8 as required by rank filters. Convert manually using '
'skimage.util.img_as_ubyte to silence this warning.'
.format(image.dtype))
warn(message, stacklevel=2)
image = img_as_ubyte(image)
if out_dtype is None:
out_dtype = image.dtype
selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
if out is None:
out = np.empty(image.shape + (pixel_size,), dtype=out_dtype)
else:
if len(out.shape) == 2:
out = out.reshape(out.shape+(pixel_size,))
if image.dtype in (np.uint8, np.int8):
n_bins = 256
else:
# Convert to a Python int to avoid the potential overflow when we add
# 1 to the maximum of the image.
n_bins = int(max(3, image.max())) + 1
if n_bins > 2**10:
warn("Bad rank filter performance is expected due to a "
"large number of bins ({}), equivalent to an approximate "
"bitdepth of {:.1f}.".format(n_bins, np.log2(n_bins)),
stacklevel=2)
return image, selem, out, mask, n_bins, out_dtype
|
def _handle_input(image, selem, out, mask, out_dtype=None, pixel_size=1):
"""Preprocess and verify input for filters.rank methods.
Parameters
----------
image : 2-D array (integer, float or boolean)
Input image.
selem : 2-D array (integer, float or boolean)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer, float or boolean)
If None, a new array is allocated.
mask : ndarray (integer, float or boolean)
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
out_dtype : data-type, optional
Desired output data-type. Default is None, which means we cast output
in input dtype.
pixel_size : int
Dimension of each pixel. Default value is 1.
Returns
-------
image : 2-D array (np.uint8 or np.uint16)
selem : 2-D array (np.uint8)
The neighborhood expressed as a binary 2-D array.
out : 3-D array (same dtype out_dtype or as input)
Output array. The two first dimensions are the spatial ones, the third
one is the pixel vector (length 1 by default).
mask : 2-D array (np.uint8)
Mask array that defines (>0) area of the image included in the local
neighborhood.
n_bins : int
Number of histogram bins.
out_dtype : data-type
Output data-type.
"""
assert_nD(image, 2)
if image.dtype not in (np.uint8, np.uint16, np.bool_):
message = ('Possible precision loss converting image of type {} to '
'uint8 as required by rank filters. Convert manually using '
'skimage.util.img_as_ubyte to silence this warning.'
.format(image.dtype))
warn(message, stacklevel=2)
image = img_as_ubyte(image)
if out_dtype is None:
out_dtype = image.dtype
selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
if out is None:
out = np.empty(image.shape + (pixel_size,), dtype=out_dtype)
else:
if len(out.shape) == 2:
out = out.reshape(out.shape+(pixel_size,))
if image.dtype in (np.uint8, np.int8):
n_bins = 256
else:
# Convert to a Python int to avoid the potential overflow when we add
# 1 to the maximum of the image.
n_bins = int(max(3, image.max())) + 1
if n_bins > 2**10:
warn("Bad rank filter performance is expected due to a "
"large number of bins ({}), equivalent to an approximate "
"bitdepth of {:.1f}.".format(n_bins, np.log2(n_bins)),
stacklevel=2)
return image, selem, out, mask, n_bins, out_dtype
|
31,157 |
def transform_object_list(object_type: str, object_list=None):
"""
Transform list objects, i.e. - replace the scim uri to a compressed object name.
This is done as PAN XSOAR is unable to process json keys with symbols like - '.' or ':'.
:type object_type: ``str``
:param object_type: Type of IdentityIQ object.
:type object_list: ``JSON``
:param object_list: List of Identity resources objects.
:return: Transformed list object.
"""
if object_list is None:
return None
transformed_list = []
for object in object_list:
transformed_list.append(transform_object(object_type, object))
return transformed_list
|
def transform_object_list(object_type: str, object_list=None):
"""
Transform list objects, i.e. - replace the scim uri to a compressed object name.
This is done as PAN XSOAR is unable to process json keys with symbols like - '.' or ':'.
:type object_type: ``str``
:param object_type: Type of IdentityIQ object.
:type object_list: ``JSON``
:param object_list: List of Identity resources objects.
:return: Transformed list object.
"""
if not isinstance(object_list, list):
return None
transformed_list = []
for object in object_list:
transformed_list.append(transform_object(object_type, object))
return transformed_list
|
52,279 |
def get_parser():
parser = argparse.ArgumentParser(
description="Segment an anatomical structure or pathologies according to the specified deep learning model.",
add_help=None,
formatter_class=SmartFormatter,
prog=os.path.basename(__file__).strip(".py"))
input_output = parser.add_argument_group("\nINPUT/OUTPUT")
input_output.add_argument(
"-i",
nargs="+",
help="Image to segment.",
metavar=Metavar.file)
input_output.add_argument(
"-o",
help="Output file name. In case of multi-class segmentation, class-specific suffixes will be added. By default,"
"suffix '_seg' will be added and output extension will be .nii.gz.",
metavar=Metavar.str)
seg = parser.add_argument_group('\nTASKS')
seg.add_argument(
"-task",
help="Task to perform. It could either be a pre-installed task, task that could be installed, or a custom task."
" To list available tasks, run: sct_deepseg -list-tasks",
metavar=Metavar.str)
seg.add_argument(
"-list-tasks",
action='store_true',
help="Display a list of tasks that can be achieved.")
seg.add_argument(
"-install-task",
help="Install models that are required for specified task.",
choices=list(deepseg.models.TASKS.keys()))
misc = parser.add_argument_group('\nPARAMETERS')
misc.add_argument(
"-thr",
type=float,
help="Binarize segmentation with specified threshold. Set to 0 for no thresholding (i.e., soft segmentation). "
"Default value is model-specific and was set during optimization "
"(more info at https://github.com/sct-pipeline/deepseg-threshold).",
metavar=Metavar.float,
default=0.9)
misc.add_argument(
"-r",
type=int,
help="Whether to remove temporary files. 0 = no, 1 = yes (default: 1)",
default=1)
misc.add_argument(
"-largest",
type=int,
help="Keep the largest connected-objects from the output segmentation. Specify the number of objects to keep."
"To keep all objects, set to 0",
default=0)
misc.add_argument(
"-fill-holes",
type=int,
help="Fill small holes in the segmentation.",
choices=(0, 1),
default=0)
misc.add_argument(
"-remove-small",
type=str,
help="Minimal object size to keep with unit (mm3 or vox). Example: 1mm3, 5vox.",
default='0vox')
misc = parser.add_argument_group('\nMISC')
misc.add_argument(
"-v",
type=int,
help="Verbose: 0 = no verbosity, 1 = verbose.",
choices=(0, 1),
default=1)
misc.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
return parser
|
def get_parser():
parser = argparse.ArgumentParser(
description="Segment an anatomical structure or pathologies according to the specified deep learning model.",
add_help=None,
formatter_class=SmartFormatter,
prog=os.path.basename(__file__).strip(".py"))
input_output = parser.add_argument_group("\nINPUT/OUTPUT")
input_output.add_argument(
"-i",
nargs="+",
help="Image to segment.",
metavar=Metavar.file)
input_output.add_argument(
"-o",
help="Output file name. In case of multi-class segmentation, class-specific suffixes will be added. By default,"
"suffix '_seg' will be added and output extension will be .nii.gz.",
metavar=Metavar.str)
seg = parser.add_argument_group('\nTASKS')
seg.add_argument(
"-task",
help="Task to perform. It could either be a pre-installed task, task that could be installed, or a custom task."
" To list available tasks, run: sct_deepseg -list-tasks",
metavar=Metavar.str)
seg.add_argument(
"-list-tasks",
action='store_true',
help="Display a list of tasks that can be achieved.")
seg.add_argument(
"-install-task",
help="Install models that are required for specified task.",
choices=list(deepseg.models.TASKS.keys()))
misc = parser.add_argument_group('\nPARAMETERS')
misc.add_argument(
"-thr",
type=float,
help="Binarize segmentation with specified threshold. Set to 0 for no thresholding (i.e., soft segmentation). "
"Default value is model-specific and was set during optimization "
"(more info at https://github.com/sct-pipeline/deepseg-threshold).",
metavar=Metavar.float,
default=0.9)
misc.add_argument(
"-r",
type=int,
help="Remove temporary files.",
choices=(0, 1),
default=1)
misc.add_argument(
"-largest",
type=int,
help="Keep the largest connected-objects from the output segmentation. Specify the number of objects to keep."
"To keep all objects, set to 0",
default=0)
misc.add_argument(
"-fill-holes",
type=int,
help="Fill small holes in the segmentation.",
choices=(0, 1),
default=0)
misc.add_argument(
"-remove-small",
type=str,
help="Minimal object size to keep with unit (mm3 or vox). Example: 1mm3, 5vox.",
default='0vox')
misc = parser.add_argument_group('\nMISC')
misc.add_argument(
"-v",
type=int,
help="Verbose: 0 = no verbosity, 1 = verbose.",
choices=(0, 1),
default=1)
misc.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
return parser
|
10,540 |
def main():
"""
Main program function used to isolate globals from imported code.
Changes to globals in imported modules on Python 2.x will overwrite our own globals.
"""
import ansible
import contextlib
import datetime
import json
import os
import re
import runpy
import subprocess
import sys
import traceback
import types
import warnings
ansible_path = os.path.dirname(os.path.dirname(ansible.__file__))
temp_path = os.environ['SANITY_TEMP_PATH'] + os.path.sep
external_python = os.environ.get('SANITY_EXTERNAL_PYTHON') or sys.executable
collection_full_name = os.environ.get('SANITY_COLLECTION_FULL_NAME')
collection_root = os.environ.get('ANSIBLE_COLLECTIONS_PATH')
try:
# noinspection PyCompatibility
from importlib import import_module
except ImportError:
def import_module(name):
__import__(name)
return sys.modules[name]
try:
# noinspection PyCompatibility
from StringIO import StringIO
except ImportError:
from io import StringIO
if collection_full_name:
# allow importing code from collections when testing a collection
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native, text_type
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
from ansible.utils.collection_loader import _collection_finder
yaml_to_json_path = os.path.join(os.path.dirname(__file__), 'yaml_to_json.py')
yaml_to_dict_cache = {}
# unique ISO date marker matching the one present in yaml_to_json.py
iso_date_marker = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
iso_date_re = re.compile('^%s([0-9]{4})-([0-9]{2})-([0-9]{2})$' % iso_date_marker)
def parse_value(value):
"""Custom value parser for JSON deserialization that recognizes our internal ISO date format."""
if isinstance(value, text_type):
match = iso_date_re.search(value)
if match:
value = datetime.date(int(match.group(1)), int(match.group(2)), int(match.group(3)))
return value
def object_hook(data):
"""Object hook for custom ISO date deserialization from JSON."""
return dict((key, parse_value(value)) for key, value in data.items())
def yaml_to_dict(yaml, content_id):
"""
Return a Python dict version of the provided YAML.
Conversion is done in a subprocess since the current Python interpreter does not have access to PyYAML.
"""
if content_id in yaml_to_dict_cache:
return yaml_to_dict_cache[content_id]
try:
cmd = [external_python, yaml_to_json_path]
proc = subprocess.Popen([to_bytes(c) for c in cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_bytes, stderr_bytes = proc.communicate(to_bytes(yaml))
if proc.returncode != 0:
raise Exception('command %s failed with return code %d: %s' % ([to_native(c) for c in cmd], proc.returncode, to_native(stderr_bytes)))
data = yaml_to_dict_cache[content_id] = json.loads(to_text(stdout_bytes), object_hook=object_hook)
return data
except Exception as ex:
raise Exception('internal importer error - failed to parse yaml: %s' % to_native(ex))
_collection_finder._meta_yml_to_dict = yaml_to_dict # pylint: disable=protected-access
collection_loader = _AnsibleCollectionFinder(paths=[collection_root])
# noinspection PyProtectedMember
collection_loader._install() # pylint: disable=protected-access
else:
# do not support collection loading when not testing a collection
collection_loader = None
# remove all modules under the ansible package
list(map(sys.modules.pop, [m for m in sys.modules if m.partition('.')[0] == ansible.__name__]))
args = sys.argv[1:]
import_type = 'module'
try:
type_index = args.index('--type')
import_type = args[type_index + 1]
args = args[:type_index] + args[type_index + 2:]
except ValueError:
pass
if import_type == 'module':
# pre-load an empty ansible package to prevent unwanted code in __init__.py from loading
# this more accurately reflects the environment that AnsiballZ runs modules under
# it also avoids issues with imports in the ansible package that are not allowed
ansible_module = types.ModuleType(ansible.__name__)
ansible_module.__file__ = ansible.__file__
ansible_module.__path__ = ansible.__path__
ansible_module.__package__ = ansible.__package__
sys.modules[ansible.__name__] = ansible_module
class ImporterAnsibleModuleException(Exception):
"""Exception thrown during initialization of ImporterAnsibleModule."""
class ImporterAnsibleModule:
"""Replacement for AnsibleModule to support import testing."""
def __init__(self, *args, **kwargs):
raise ImporterAnsibleModuleException()
class RestrictedModuleLoader:
"""Python module loader that restricts inappropriate imports."""
def __init__(self, path, name, restrict_to_module_paths):
self.path = path
self.name = name
self.loaded_modules = set()
self.restrict_to_module_paths = restrict_to_module_paths
def find_module(self, fullname, path=None):
"""Return self if the given fullname is restricted, otherwise return None.
:param fullname: str
:param path: str
:return: RestrictedModuleLoader | None
"""
if fullname in self.loaded_modules:
return None # ignore modules that are already being loaded
if is_name_in_namepace(fullname, ['ansible']):
if not self.restrict_to_module_paths:
return None # for non-modules, everything in the ansible namespace is allowed
if fullname in ('ansible.module_utils.basic', 'ansible.module_utils.common.removed'):
return self # intercept loading so we can modify the result
if is_name_in_namepace(fullname, ['ansible.module_utils', self.name]):
return None # module_utils and module under test are always allowed
if any(os.path.exists(candidate_path) for candidate_path in convert_ansible_name_to_absolute_paths(fullname)):
return self # restrict access to ansible files that exist
return None # ansible file does not exist, do not restrict access
if is_name_in_namepace(fullname, ['ansible_collections']):
if not collection_loader:
return self # restrict access to collections when we are not testing a collection
if not self.restrict_to_module_paths:
return None # for non-modules, everything in the ansible namespace is allowed
if is_name_in_namepace(fullname, ['ansible_collections...plugins.module_utils', self.name]):
return None # module_utils and module under test are always allowed
if collection_loader.find_module(fullname, path):
return self # restrict access to collection files that exist
return None # collection file does not exist, do not restrict access
# not a namespace we care about
return None
def load_module(self, fullname):
"""Raise an ImportError.
:type fullname: str
"""
if fullname == 'ansible.module_utils.basic':
module = self.__load_module(fullname)
# stop Ansible module execution during AnsibleModule instantiation
module.AnsibleModule = ImporterAnsibleModule
# no-op for _load_params since it may be called before instantiating AnsibleModule
module._load_params = lambda *args, **kwargs: {} # pylint: disable=protected-access
return module
if fullname == 'ansible.module_utils.common.removed':
module = self.__load_module(fullname)
# no-op for removed_module since it is called in place of AnsibleModule instantiation
module.removed_module = lambda *args, **kwargs: None
return module
raise ImportError('import of "%s" is not allowed in this context' % fullname)
def __load_module(self, fullname):
"""Load the requested module while avoiding infinite recursion.
:type fullname: str
:rtype: module
"""
self.loaded_modules.add(fullname)
return import_module(fullname)
def run(args):
"""Main program function."""
base_dir = os.getcwd()
messages = set()
if import_type == 'module':
for path in args or sys.stdin.read().splitlines():
name = convert_relative_path_to_name(path)
test_python_module(path, name, base_dir, messages, True)
elif import_type == 'plugin':
for path in args or sys.stdin.read().splitlines():
name = convert_relative_path_to_name(path)
test_python_module(path, name, base_dir, messages, False)
else:
print('Invalid import type!')
sys.exit(1)
if messages:
sys.exit(10)
def test_python_module(path, name, base_dir, messages, restrict_to_module_paths):
"""Test the given python module by importing it.
:type path: str
:type name: str
:type base_dir: str
:type messages: set[str]
:type restrict_to_module_paths: bool
"""
if name in sys.modules:
return # cannot be tested because it has already been loaded
is_ansible_module = (path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/')) and os.path.basename(path) != '__init__.py'
run_main = is_ansible_module
if path == 'lib/ansible/modules/async_wrapper.py':
# async_wrapper is a non-standard Ansible module (does not use AnsibleModule) so we cannot test the main function
run_main = False
capture_normal = Capture()
capture_main = Capture()
try:
with monitor_sys_modules(path, messages):
with restrict_imports(path, name, messages, restrict_to_module_paths):
with capture_output(capture_normal):
import_module(name)
if run_main:
with monitor_sys_modules(path, messages):
with restrict_imports(path, name, messages, restrict_to_module_paths):
with capture_output(capture_main):
runpy.run_module(name, run_name='__main__', alter_sys=True)
except ImporterAnsibleModuleException:
# module instantiated AnsibleModule without raising an exception
pass
except BaseException as ex: # pylint: disable=locally-disabled, broad-except
# intentionally catch all exceptions, including calls to sys.exit
exc_type, _exc, exc_tb = sys.exc_info()
message = str(ex)
results = list(reversed(traceback.extract_tb(exc_tb)))
line = 0
offset = 0
full_path = os.path.join(base_dir, path)
base_path = base_dir + os.path.sep
source = None
# avoid line wraps in messages
message = re.sub(r'\n *', ': ', message)
for result in results:
if result[0] == full_path:
# save the line number for the file under test
line = result[1] or 0
if not source and result[0].startswith(base_path) and not result[0].startswith(temp_path):
# save the first path and line number in the traceback which is in our source tree
source = (os.path.relpath(result[0], base_path), result[1] or 0, 0)
if isinstance(ex, SyntaxError):
# SyntaxError has better information than the traceback
if ex.filename == full_path: # pylint: disable=locally-disabled, no-member
# syntax error was reported in the file under test
line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member
offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member
elif ex.filename.startswith(base_path) and not ex.filename.startswith(temp_path): # pylint: disable=locally-disabled, no-member
# syntax error was reported in our source tree
source = (os.path.relpath(ex.filename, base_path), ex.lineno or 0, ex.offset or 0) # pylint: disable=locally-disabled, no-member
# remove the filename and line number from the message
# either it was extracted above, or it's not really useful information
message = re.sub(r' \(.*?, line [0-9]+\)$', '', message)
if source and source[0] != path:
message += ' (at %s:%d:%d)' % (source[0], source[1], source[2])
report_message(path, line, offset, 'traceback', '%s: %s' % (exc_type.__name__, message), messages)
finally:
capture_report(path, capture_normal, messages)
capture_report(path, capture_main, messages)
def is_name_in_namepace(name, namespaces):
"""Returns True if the given name is one of the given namespaces, otherwise returns False."""
name_parts = name.split('.')
for namespace in namespaces:
namespace_parts = namespace.split('.')
length = min(len(name_parts), len(namespace_parts))
truncated_name = name_parts[0:length]
truncated_namespace = namespace_parts[0:length]
# empty parts in the namespace are treated as wildcards
# to simplify the comparison, use those empty parts to indicate the positions in the name to be empty as well
for idx, part in enumerate(truncated_namespace):
if not part:
truncated_name[idx] = part
# example: name=ansible, allowed_name=ansible.module_utils
# example: name=ansible.module_utils.system.ping, allowed_name=ansible.module_utils
if truncated_name == truncated_namespace:
return True
return False
def check_sys_modules(path, before, messages):
"""Check for unwanted changes to sys.modules.
:type path: str
:type before: dict[str, module]
:type messages: set[str]
"""
after = sys.modules
removed = set(before.keys()) - set(after.keys())
changed = set(key for key, value in before.items() if key in after and value != after[key])
# additions are checked by our custom PEP 302 loader, so we don't need to check them again here
for module in sorted(removed):
report_message(path, 0, 0, 'unload', 'unloading of "%s" in sys.modules is not supported' % module, messages)
for module in sorted(changed):
report_message(path, 0, 0, 'reload', 'reloading of "%s" in sys.modules is not supported' % module, messages)
def convert_ansible_name_to_absolute_paths(name):
"""Calculate the module path from the given name.
:type name: str
:rtype: list[str]
"""
return [
os.path.join(ansible_path, name.replace('.', os.path.sep)),
os.path.join(ansible_path, name.replace('.', os.path.sep)) + '.py',
]
def convert_relative_path_to_name(path):
"""Calculate the module name from the given path.
:type path: str
:rtype: str
"""
if path.endswith('/__init__.py'):
clean_path = os.path.dirname(path)
else:
clean_path = path
clean_path = os.path.splitext(clean_path)[0]
name = clean_path.replace(os.path.sep, '.')
if collection_loader:
# when testing collections the relative paths (and names) being tested are within the collection under test
name = 'ansible_collections.%s.%s' % (collection_full_name, name)
else:
# when testing ansible all files being imported reside under the lib directory
name = name[len('lib/'):]
return name
class Capture:
"""Captured output and/or exception."""
def __init__(self):
self.stdout = StringIO()
self.stderr = StringIO()
def capture_report(path, capture, messages):
"""Report on captured output.
:type path: str
:type capture: Capture
:type messages: set[str]
"""
if capture.stdout.getvalue():
first = capture.stdout.getvalue().strip().splitlines()[0].strip()
report_message(path, 0, 0, 'stdout', first, messages)
if capture.stderr.getvalue():
first = capture.stderr.getvalue().strip().splitlines()[0].strip()
report_message(path, 0, 0, 'stderr', first, messages)
def report_message(path, line, column, code, message, messages):
"""Report message if not already reported.
:type path: str
:type line: int
:type column: int
:type code: str
:type message: str
:type messages: set[str]
"""
message = '%s:%d:%d: %s: %s' % (path, line, column, code, message)
if message not in messages:
messages.add(message)
print(message)
@contextlib.contextmanager
def restrict_imports(path, name, messages, restrict_to_module_paths):
"""Restrict available imports.
:type path: str
:type name: str
:type messages: set[str]
:type restrict_to_module_paths: bool
"""
restricted_loader = RestrictedModuleLoader(path, name, restrict_to_module_paths)
# noinspection PyTypeChecker
sys.meta_path.insert(0, restricted_loader)
sys.path_importer_cache.clear()
try:
yield
finally:
if import_type == 'plugin':
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
_AnsibleCollectionFinder._remove() # pylint: disable=protected-access
if sys.meta_path[0] != restricted_loader:
report_message(path, 0, 0, 'metapath', 'changes to sys.meta_path[0] are not permitted', messages)
while restricted_loader in sys.meta_path:
# noinspection PyTypeChecker
sys.meta_path.remove(restricted_loader)
sys.path_importer_cache.clear()
@contextlib.contextmanager
def monitor_sys_modules(path, messages):
"""Monitor sys.modules for unwanted changes, reverting any additions made to our own namespaces."""
snapshot = sys.modules.copy()
try:
yield
finally:
check_sys_modules(path, snapshot, messages)
for key in set(sys.modules.keys()) - set(snapshot.keys()):
if is_name_in_namepace(key, ('ansible', 'ansible_collections')):
del sys.modules[key] # only unload our own code since we know it's native Python
@contextlib.contextmanager
def capture_output(capture):
"""Capture sys.stdout and sys.stderr.
:type capture: Capture
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = capture.stdout
sys.stderr = capture.stderr
# clear all warnings registries to make all warnings available
for module in sys.modules.values():
try:
# noinspection PyUnresolvedReferences
module.__warningregistry__.clear()
except AttributeError:
pass
with warnings.catch_warnings():
warnings.simplefilter('error')
if sys.version_info[0] == 2:
warnings.filterwarnings(
"ignore",
"Python 2 is no longer supported by the Python core team. Support for it is now deprecated in cryptography,"
" and will be removed in a future release.")
warnings.filterwarnings(
"ignore",
"Python 2 is no longer supported by the Python core team. Support for it is now deprecated in cryptography,"
" and will be removed in the next release.")
if sys.version_info[:2] == (3, 5):
warnings.filterwarnings(
"ignore", "Python 3.5 support will be dropped in the next release ofcryptography. Please upgrade your Python.")
warnings.filterwarnings(
"ignore", "Python 3.5 support will be dropped in the next release of cryptography. Please upgrade your Python.")
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
run(args)
|
def main():
"""
Main program function used to isolate globals from imported code.
Changes to globals in imported modules on Python 2.x will overwrite our own globals.
"""
import ansible
import contextlib
import datetime
import json
import os
import re
import runpy
import subprocess
import sys
import traceback
import types
import warnings
ansible_path = os.path.dirname(os.path.dirname(ansible.__file__))
temp_path = os.environ['SANITY_TEMP_PATH'] + os.path.sep
external_python = os.environ.get('SANITY_EXTERNAL_PYTHON') or sys.executable
collection_full_name = os.environ.get('SANITY_COLLECTION_FULL_NAME')
collection_root = os.environ.get('ANSIBLE_COLLECTIONS_PATH')
try:
# noinspection PyCompatibility
from importlib import import_module
except ImportError:
def import_module(name):
__import__(name)
return sys.modules[name]
try:
# noinspection PyCompatibility
from StringIO import StringIO
except ImportError:
from io import StringIO
if collection_full_name:
# allow importing code from collections when testing a collection
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native, text_type
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
from ansible.utils.collection_loader import _collection_finder
yaml_to_json_path = os.path.join(os.path.dirname(__file__), 'yaml_to_json.py')
yaml_to_dict_cache = {}
# unique ISO date marker matching the one present in yaml_to_json.py
iso_date_marker = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
iso_date_re = re.compile('^%s([0-9]{4})-([0-9]{2})-([0-9]{2})$' % iso_date_marker)
def parse_value(value):
"""Custom value parser for JSON deserialization that recognizes our internal ISO date format."""
if isinstance(value, text_type):
match = iso_date_re.search(value)
if match:
value = datetime.date(int(match.group(1)), int(match.group(2)), int(match.group(3)))
return value
def object_hook(data):
"""Object hook for custom ISO date deserialization from JSON."""
return dict((key, parse_value(value)) for key, value in data.items())
def yaml_to_dict(yaml, content_id):
"""
Return a Python dict version of the provided YAML.
Conversion is done in a subprocess since the current Python interpreter does not have access to PyYAML.
"""
if content_id in yaml_to_dict_cache:
return yaml_to_dict_cache[content_id]
try:
cmd = [external_python, yaml_to_json_path]
proc = subprocess.Popen([to_bytes(c) for c in cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_bytes, stderr_bytes = proc.communicate(to_bytes(yaml))
if proc.returncode != 0:
raise Exception('command %s failed with return code %d: %s' % ([to_native(c) for c in cmd], proc.returncode, to_native(stderr_bytes)))
data = yaml_to_dict_cache[content_id] = json.loads(to_text(stdout_bytes), object_hook=object_hook)
return data
except Exception as ex:
raise Exception('internal importer error - failed to parse yaml: %s' % to_native(ex))
_collection_finder._meta_yml_to_dict = yaml_to_dict # pylint: disable=protected-access
collection_loader = _AnsibleCollectionFinder(paths=[collection_root])
# noinspection PyProtectedMember
collection_loader._install() # pylint: disable=protected-access
else:
# do not support collection loading when not testing a collection
collection_loader = None
# remove all modules under the ansible package
list(map(sys.modules.pop, [m for m in sys.modules if m.partition('.')[0] == ansible.__name__]))
args = sys.argv[1:]
import_type = 'module'
try:
type_index = args.index('--type')
import_type = args[type_index + 1]
args = args[:type_index] + args[type_index + 2:]
except ValueError:
pass
if import_type == 'module':
# pre-load an empty ansible package to prevent unwanted code in __init__.py from loading
# this more accurately reflects the environment that AnsiballZ runs modules under
# it also avoids issues with imports in the ansible package that are not allowed
ansible_module = types.ModuleType(ansible.__name__)
ansible_module.__file__ = ansible.__file__
ansible_module.__path__ = ansible.__path__
ansible_module.__package__ = ansible.__package__
sys.modules[ansible.__name__] = ansible_module
class ImporterAnsibleModuleException(Exception):
"""Exception thrown during initialization of ImporterAnsibleModule."""
class ImporterAnsibleModule:
"""Replacement for AnsibleModule to support import testing."""
def __init__(self, *args, **kwargs):
raise ImporterAnsibleModuleException()
class RestrictedModuleLoader:
"""Python module loader that restricts inappropriate imports."""
def __init__(self, path, name, restrict_to_module_paths):
self.path = path
self.name = name
self.loaded_modules = set()
self.restrict_to_module_paths = restrict_to_module_paths
def find_module(self, fullname, path=None):
"""Return self if the given fullname is restricted, otherwise return None.
:param fullname: str
:param path: str
:return: RestrictedModuleLoader | None
"""
if fullname in self.loaded_modules:
return None # ignore modules that are already being loaded
if is_name_in_namepace(fullname, ['ansible']):
if not self.restrict_to_module_paths:
return None # for non-modules, everything in the ansible namespace is allowed
if fullname in ('ansible.module_utils.basic', 'ansible.module_utils.common.removed'):
return self # intercept loading so we can modify the result
if is_name_in_namepace(fullname, ['ansible.module_utils', self.name]):
return None # module_utils and module under test are always allowed
if any(os.path.exists(candidate_path) for candidate_path in convert_ansible_name_to_absolute_paths(fullname)):
return self # restrict access to ansible files that exist
return None # ansible file does not exist, do not restrict access
if is_name_in_namepace(fullname, ['ansible_collections']):
if not collection_loader:
return self # restrict access to collections when we are not testing a collection
if not self.restrict_to_module_paths:
return None # for non-modules, everything in the ansible namespace is allowed
if is_name_in_namepace(fullname, ['ansible_collections...plugins.module_utils', self.name]):
return None # module_utils and module under test are always allowed
if collection_loader.find_module(fullname, path):
return self # restrict access to collection files that exist
return None # collection file does not exist, do not restrict access
# not a namespace we care about
return None
def load_module(self, fullname):
"""Raise an ImportError.
:type fullname: str
"""
if fullname == 'ansible.module_utils.basic':
module = self.__load_module(fullname)
# stop Ansible module execution during AnsibleModule instantiation
module.AnsibleModule = ImporterAnsibleModule
# no-op for _load_params since it may be called before instantiating AnsibleModule
module._load_params = lambda *args, **kwargs: {} # pylint: disable=protected-access
return module
if fullname == 'ansible.module_utils.common.removed':
module = self.__load_module(fullname)
# no-op for removed_module since it is called in place of AnsibleModule instantiation
module.removed_module = lambda *args, **kwargs: None
return module
raise ImportError('import of "%s" is not allowed in this context' % fullname)
def __load_module(self, fullname):
"""Load the requested module while avoiding infinite recursion.
:type fullname: str
:rtype: module
"""
self.loaded_modules.add(fullname)
return import_module(fullname)
def run(args):
"""Main program function."""
base_dir = os.getcwd()
messages = set()
if import_type == 'module':
for path in args or sys.stdin.read().splitlines():
name = convert_relative_path_to_name(path)
test_python_module(path, name, base_dir, messages, True)
elif import_type == 'plugin':
for path in args or sys.stdin.read().splitlines():
name = convert_relative_path_to_name(path)
test_python_module(path, name, base_dir, messages, False)
else:
print('Invalid import type!')
sys.exit(1)
if messages:
sys.exit(10)
def test_python_module(path, name, base_dir, messages, restrict_to_module_paths):
"""Test the given python module by importing it.
:type path: str
:type name: str
:type base_dir: str
:type messages: set[str]
:type restrict_to_module_paths: bool
"""
if name in sys.modules:
return # cannot be tested because it has already been loaded
is_ansible_module = (path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/')) and os.path.basename(path) != '__init__.py'
run_main = is_ansible_module
if path == 'lib/ansible/modules/async_wrapper.py':
# async_wrapper is a non-standard Ansible module (does not use AnsibleModule) so we cannot test the main function
run_main = False
capture_normal = Capture()
capture_main = Capture()
try:
with monitor_sys_modules(path, messages):
with restrict_imports(path, name, messages, restrict_to_module_paths):
with capture_output(capture_normal):
import_module(name)
if run_main:
with monitor_sys_modules(path, messages):
with restrict_imports(path, name, messages, restrict_to_module_paths):
with capture_output(capture_main):
runpy.run_module(name, run_name='__main__', alter_sys=True)
except ImporterAnsibleModuleException:
# module instantiated AnsibleModule without raising an exception
pass
except BaseException as ex: # pylint: disable=locally-disabled, broad-except
# intentionally catch all exceptions, including calls to sys.exit
exc_type, _exc, exc_tb = sys.exc_info()
message = str(ex)
results = list(reversed(traceback.extract_tb(exc_tb)))
line = 0
offset = 0
full_path = os.path.join(base_dir, path)
base_path = base_dir + os.path.sep
source = None
# avoid line wraps in messages
message = re.sub(r'\n *', ': ', message)
for result in results:
if result[0] == full_path:
# save the line number for the file under test
line = result[1] or 0
if not source and result[0].startswith(base_path) and not result[0].startswith(temp_path):
# save the first path and line number in the traceback which is in our source tree
source = (os.path.relpath(result[0], base_path), result[1] or 0, 0)
if isinstance(ex, SyntaxError):
# SyntaxError has better information than the traceback
if ex.filename == full_path: # pylint: disable=locally-disabled, no-member
# syntax error was reported in the file under test
line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member
offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member
elif ex.filename.startswith(base_path) and not ex.filename.startswith(temp_path): # pylint: disable=locally-disabled, no-member
# syntax error was reported in our source tree
source = (os.path.relpath(ex.filename, base_path), ex.lineno or 0, ex.offset or 0) # pylint: disable=locally-disabled, no-member
# remove the filename and line number from the message
# either it was extracted above, or it's not really useful information
message = re.sub(r' \(.*?, line [0-9]+\)$', '', message)
if source and source[0] != path:
message += ' (at %s:%d:%d)' % (source[0], source[1], source[2])
report_message(path, line, offset, 'traceback', '%s: %s' % (exc_type.__name__, message), messages)
finally:
capture_report(path, capture_normal, messages)
capture_report(path, capture_main, messages)
def is_name_in_namepace(name, namespaces):
"""Returns True if the given name is one of the given namespaces, otherwise returns False."""
name_parts = name.split('.')
for namespace in namespaces:
namespace_parts = namespace.split('.')
length = min(len(name_parts), len(namespace_parts))
truncated_name = name_parts[0:length]
truncated_namespace = namespace_parts[0:length]
# empty parts in the namespace are treated as wildcards
# to simplify the comparison, use those empty parts to indicate the positions in the name to be empty as well
for idx, part in enumerate(truncated_namespace):
if not part:
truncated_name[idx] = part
# example: name=ansible, allowed_name=ansible.module_utils
# example: name=ansible.module_utils.system.ping, allowed_name=ansible.module_utils
if truncated_name == truncated_namespace:
return True
return False
def check_sys_modules(path, before, messages):
"""Check for unwanted changes to sys.modules.
:type path: str
:type before: dict[str, module]
:type messages: set[str]
"""
after = sys.modules
removed = set(before.keys()) - set(after.keys())
changed = set(key for key, value in before.items() if key in after and value != after[key])
# additions are checked by our custom PEP 302 loader, so we don't need to check them again here
for module in sorted(removed):
report_message(path, 0, 0, 'unload', 'unloading of "%s" in sys.modules is not supported' % module, messages)
for module in sorted(changed):
report_message(path, 0, 0, 'reload', 'reloading of "%s" in sys.modules is not supported' % module, messages)
def convert_ansible_name_to_absolute_paths(name):
"""Calculate the module path from the given name.
:type name: str
:rtype: list[str]
"""
return [
os.path.join(ansible_path, name.replace('.', os.path.sep)),
os.path.join(ansible_path, name.replace('.', os.path.sep)) + '.py',
]
def convert_relative_path_to_name(path):
"""Calculate the module name from the given path.
:type path: str
:rtype: str
"""
if path.endswith('/__init__.py'):
clean_path = os.path.dirname(path)
else:
clean_path = path
clean_path = os.path.splitext(clean_path)[0]
name = clean_path.replace(os.path.sep, '.')
if collection_loader:
# when testing collections the relative paths (and names) being tested are within the collection under test
name = 'ansible_collections.%s.%s' % (collection_full_name, name)
else:
# when testing ansible all files being imported reside under the lib directory
name = name[len('lib/'):]
return name
class Capture:
"""Captured output and/or exception."""
def __init__(self):
self.stdout = StringIO()
self.stderr = StringIO()
def capture_report(path, capture, messages):
"""Report on captured output.
:type path: str
:type capture: Capture
:type messages: set[str]
"""
if capture.stdout.getvalue():
first = capture.stdout.getvalue().strip().splitlines()[0].strip()
report_message(path, 0, 0, 'stdout', first, messages)
if capture.stderr.getvalue():
first = capture.stderr.getvalue().strip().splitlines()[0].strip()
report_message(path, 0, 0, 'stderr', first, messages)
def report_message(path, line, column, code, message, messages):
"""Report message if not already reported.
:type path: str
:type line: int
:type column: int
:type code: str
:type message: str
:type messages: set[str]
"""
message = '%s:%d:%d: %s: %s' % (path, line, column, code, message)
if message not in messages:
messages.add(message)
print(message)
@contextlib.contextmanager
def restrict_imports(path, name, messages, restrict_to_module_paths):
"""Restrict available imports.
:type path: str
:type name: str
:type messages: set[str]
:type restrict_to_module_paths: bool
"""
restricted_loader = RestrictedModuleLoader(path, name, restrict_to_module_paths)
# noinspection PyTypeChecker
sys.meta_path.insert(0, restricted_loader)
sys.path_importer_cache.clear()
try:
yield
finally:
if import_type == 'plugin':
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
_AnsibleCollectionFinder._remove() # pylint: disable=protected-access
if sys.meta_path[0] != restricted_loader:
report_message(path, 0, 0, 'metapath', 'changes to sys.meta_path[0] are not permitted', messages)
while restricted_loader in sys.meta_path:
# noinspection PyTypeChecker
sys.meta_path.remove(restricted_loader)
sys.path_importer_cache.clear()
@contextlib.contextmanager
def monitor_sys_modules(path, messages):
"""Monitor sys.modules for unwanted changes, reverting any additions made to our own namespaces."""
snapshot = sys.modules.copy()
try:
yield
finally:
check_sys_modules(path, snapshot, messages)
for key in set(sys.modules.keys()) - set(snapshot.keys()):
if is_name_in_namepace(key, ('ansible', 'ansible_collections')):
del sys.modules[key] # only unload our own code since we know it's native Python
@contextlib.contextmanager
def capture_output(capture):
"""Capture sys.stdout and sys.stderr.
:type capture: Capture
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = capture.stdout
sys.stderr = capture.stderr
# clear all warnings registries to make all warnings available
for module in sys.modules.values():
try:
# noinspection PyUnresolvedReferences
module.__warningregistry__.clear()
except AttributeError:
pass
with warnings.catch_warnings():
warnings.simplefilter('error')
if sys.version_info[0] == 2:
warnings.filterwarnings(
"ignore",
"Python 2 is no longer supported by the Python core team. Support for it is now deprecated in cryptography,"
" and will be removed in a future release.")
warnings.filterwarnings(
"ignore",
"Python 2 is no longer supported by the Python core team. Support for it is now deprecated in cryptography,"
" and will be removed in the next release.")
if sys.version_info[:2] == (3, 5):
warnings.filterwarnings(
"ignore", "Python 3.5 support will be dropped in the next release ofcryptography. Please upgrade your Python.")
warnings.filterwarnings(
"ignore", "Python 3.5 support will be dropped in the next release of cryptography. Please upgrade your Python.")
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
run(import_type == 'module')
|
6,387 |
def create_appointments(number):
for i in range(1, number):
frappe.get_doc({
'doctype': 'Appointment',
'scheduled_time': datetime.datetime.min,
'customer_name': 'Test Customer'+str(i),
'customer_phone_number': '8088',
'customer_skype': 'test'+str(i),
})
|
def create_appointments(number):
for i in range(1, number):
frappe.get_doc({
'doctype': 'Appointment',
'scheduled_time': datetime.datetime.min,
'customer_name': 'Test Customer'+str(i),
'customer_phone_number': '8088',
'customer_skype': 'test' + str(count),
})
|
28,005 |
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('input',
type=str,
nargs='+',
metavar='file/folder',
help="The analysis result files and/or folders "
"containing analysis results which should be "
"parsed and printed.")
parser.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'parse' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"parse\": [\n"
" \"--trim-path-prefix\",\n"
" \"/home/workspace\"\n"
" ]\n"
"}")
parser.add_argument('-t', '--type', '--input-format',
dest="input_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results were "
"created as.")
output_opts = parser.add_argument_group("export arguments")
output_opts.add_argument('-e', '--export',
dest="export",
required=False,
choices=['html', 'json', 'codeclimate'],
help="R|Specify extra output format type.\n"
"'codeclimate' format can be used for "
"Code Climate and for GitLab integration. "
"For more information see:\n"
"https://github.com/codeclimate/platform/"
"blob/master/spec/analyzers/SPEC.md"
"#data-types")
output_opts.add_argument('-o', '--output',
dest="output_path",
default=argparse.SUPPRESS,
help="Store the output in the given folder.")
parser.add_argument('--suppress',
type=str,
dest="suppress",
default=argparse.SUPPRESS,
required=False,
help="Path of the suppress file to use. Records in "
"the suppress file are used to suppress the "
"display of certain results when parsing the "
"analyses' report. (Reports to an analysis "
"result can also be suppressed in the source "
"code -- please consult the manual on how to "
"do so.) NOTE: The suppress file relies on the "
"\"bug identifier\" generated by the analyzers "
"which is experimental, take care when relying "
"on it.")
parser.add_argument('--export-source-suppress',
dest="create_suppress",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Write suppress data from the suppression "
"annotations found in the source files that were "
"analyzed earlier that created the results. "
"The suppression information will be written "
"to the parameter of '--suppress'.")
parser.add_argument('--print-steps',
dest="print_steps",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Print the steps the analyzers took in finding "
"the reported defect.")
parser.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. Please "
"consult the User guide on how a Skipfile "
"should be laid out.")
parser.add_argument('--trim-path-prefix',
type=str,
nargs='*',
dest="trim_path_prefix",
required=False,
default=argparse.SUPPRESS,
help="Removes leading path from files which will be "
"printed. So if you have /a/b/c/x.cpp and "
"/a/b/c/y.cpp then by removing \"/a/b/\" prefix "
"will print files like c/x.cpp and c/y.cpp. "
"If multiple prefix is given, the longest match "
"will be removed.")
parser.add_argument('--review-status',
nargs='*',
dest="review_status",
metavar='REVIEW_STATUS',
choices=REVIEW_STATUS_VALUES,
default=["confirmed", "unreviewed"],
help="Filter results by review statuses. Valid "
"values are: {0}".format(
', '.join(REVIEW_STATUS_VALUES)))
logger.add_verbose_arguments(parser)
parser.set_defaults(
func=main, func_process_config_file=cmd_config.process_config_file)
|
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('input',
type=str,
nargs='+',
metavar='file/folder',
help="The analysis result files and/or folders "
"containing analysis results which should be "
"parsed and printed.")
parser.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'parse' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"parse\": [\n"
" \"--trim-path-prefix\",\n"
" \"$HOME/workspace\"\n"
" ]\n"
"}")
parser.add_argument('-t', '--type', '--input-format',
dest="input_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results were "
"created as.")
output_opts = parser.add_argument_group("export arguments")
output_opts.add_argument('-e', '--export',
dest="export",
required=False,
choices=['html', 'json', 'codeclimate'],
help="R|Specify extra output format type.\n"
"'codeclimate' format can be used for "
"Code Climate and for GitLab integration. "
"For more information see:\n"
"https://github.com/codeclimate/platform/"
"blob/master/spec/analyzers/SPEC.md"
"#data-types")
output_opts.add_argument('-o', '--output',
dest="output_path",
default=argparse.SUPPRESS,
help="Store the output in the given folder.")
parser.add_argument('--suppress',
type=str,
dest="suppress",
default=argparse.SUPPRESS,
required=False,
help="Path of the suppress file to use. Records in "
"the suppress file are used to suppress the "
"display of certain results when parsing the "
"analyses' report. (Reports to an analysis "
"result can also be suppressed in the source "
"code -- please consult the manual on how to "
"do so.) NOTE: The suppress file relies on the "
"\"bug identifier\" generated by the analyzers "
"which is experimental, take care when relying "
"on it.")
parser.add_argument('--export-source-suppress',
dest="create_suppress",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Write suppress data from the suppression "
"annotations found in the source files that were "
"analyzed earlier that created the results. "
"The suppression information will be written "
"to the parameter of '--suppress'.")
parser.add_argument('--print-steps',
dest="print_steps",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Print the steps the analyzers took in finding "
"the reported defect.")
parser.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. Please "
"consult the User guide on how a Skipfile "
"should be laid out.")
parser.add_argument('--trim-path-prefix',
type=str,
nargs='*',
dest="trim_path_prefix",
required=False,
default=argparse.SUPPRESS,
help="Removes leading path from files which will be "
"printed. So if you have /a/b/c/x.cpp and "
"/a/b/c/y.cpp then by removing \"/a/b/\" prefix "
"will print files like c/x.cpp and c/y.cpp. "
"If multiple prefix is given, the longest match "
"will be removed.")
parser.add_argument('--review-status',
nargs='*',
dest="review_status",
metavar='REVIEW_STATUS',
choices=REVIEW_STATUS_VALUES,
default=["confirmed", "unreviewed"],
help="Filter results by review statuses. Valid "
"values are: {0}".format(
', '.join(REVIEW_STATUS_VALUES)))
logger.add_verbose_arguments(parser)
parser.set_defaults(
func=main, func_process_config_file=cmd_config.process_config_file)
|
47,308 |
def write_model_card(
hf_model_name: str,
repo_root=DEFAULT_REPO,
save_dir=Path("marian_converted"),
dry_run=False,
extra_metadata={},
) -> str:
"""
Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir
s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun
"""
import pandas as pd
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
opus_name: str = convert_hf_name_to_opus_name(hf_model_name)
if not (repo_root in ("OPUS-MT-train", "Tatoeba-Challenge")):
raise ValueError(f"Repos root is {repo_root}. Expected either OPUS-MT-train or Tatoeba-Challenge")
opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md")
if not (opus_readme_path.exists()):
raise ValueError(f"Readme file {opus_readme_path} not found")
opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")]
readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md"
s, t = ",".join(opus_src), ",".join(opus_tgt)
metadata = {
"hf_name": hf_model_name,
"source_languages": s,
"target_languages": t,
"opus_readme_url": readme_url,
"original_repo": repo_root,
"tags": ["translation"],
}
metadata.update(extra_metadata)
metadata.update(get_system_metadata(repo_root))
# combine with opus markdown
extra_markdown = (
f"### {hf_model_name}\n\n* source group: {metadata['src_name']} \n* target group: "
f"{metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n"
)
content = opus_readme_path.open().read()
content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model.
splat = content.split("*")[2:]
print(splat[3])
content = "*".join(splat)
content = (
FRONT_MATTER_TEMPLATE.format(metadata["src_alpha2"])
+ extra_markdown
+ "\n* "
+ content.replace("download", "download original weights")
)
items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()])
sec3 = "\n### System Info: \n" + items
content += sec3
if dry_run:
return content, metadata
sub_dir = save_dir / f"opus-mt-{hf_model_name}"
sub_dir.mkdir(exist_ok=True)
dest = sub_dir / "README.md"
dest.open("w").write(content)
pd.Series(metadata).to_json(sub_dir / "metadata.json")
# if dry_run:
return content, metadata
|
def write_model_card(
hf_model_name: str,
repo_root=DEFAULT_REPO,
save_dir=Path("marian_converted"),
dry_run=False,
extra_metadata={},
) -> str:
"""
Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir
s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun
"""
import pandas as pd
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
opus_name: str = convert_hf_name_to_opus_name(hf_model_name)
if repo_root not in ("OPUS-MT-train", "Tatoeba-Challenge"):
raise ValueError(f"Repos root is {repo_root}. Expected either OPUS-MT-train or Tatoeba-Challenge")
opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md")
if not (opus_readme_path.exists()):
raise ValueError(f"Readme file {opus_readme_path} not found")
opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")]
readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md"
s, t = ",".join(opus_src), ",".join(opus_tgt)
metadata = {
"hf_name": hf_model_name,
"source_languages": s,
"target_languages": t,
"opus_readme_url": readme_url,
"original_repo": repo_root,
"tags": ["translation"],
}
metadata.update(extra_metadata)
metadata.update(get_system_metadata(repo_root))
# combine with opus markdown
extra_markdown = (
f"### {hf_model_name}\n\n* source group: {metadata['src_name']} \n* target group: "
f"{metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n"
)
content = opus_readme_path.open().read()
content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model.
splat = content.split("*")[2:]
print(splat[3])
content = "*".join(splat)
content = (
FRONT_MATTER_TEMPLATE.format(metadata["src_alpha2"])
+ extra_markdown
+ "\n* "
+ content.replace("download", "download original weights")
)
items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()])
sec3 = "\n### System Info: \n" + items
content += sec3
if dry_run:
return content, metadata
sub_dir = save_dir / f"opus-mt-{hf_model_name}"
sub_dir.mkdir(exist_ok=True)
dest = sub_dir / "README.md"
dest.open("w").write(content)
pd.Series(metadata).to_json(sub_dir / "metadata.json")
# if dry_run:
return content, metadata
|
13,878 |
def test_build(compiled, format, available_targets, update_reference):
name = compiled
scrub = SCRUBBERS[format]
output_pattern = OUTPUT_PATTERN[format]
assert_equals = ASSERT_EQUALS.get(format, None)
encoding = 'utf8'
if format == 'html' and name.startswith('html-encoding-'):
encoding = re.match('^html-encoding-(.*)$', name).group(1)
os.chdir(os.path.join(basedir, name))
assert run(["make", format])
if GENERATE_REFERENCE: # pragma: no cover
for generated_file in glob.glob(output_pattern):
reference_file = os.path.join('reference', generated_file)
if os.path.isfile(reference_file):
continue
else:
try:
os.makedirs('reference')
except FileExistsError:
# directory already exists
pass
print('copying %s to %s' % (generated_file, reference_file))
shutil.copyfile(generated_file, reference_file)
for coverage_file, reference_file in find_reference_files(output_pattern):
with io.open(coverage_file, encoding=encoding) as f:
coverage_raw = f.read()
coverage = scrub(coverage_raw)
with io.open(reference_file, encoding=encoding) as f:
reference = scrub(f.read())
if assert_equals is not None:
assert_equals(coverage, reference)
else:
diff_out = list(difflib.unified_diff(reference.splitlines(keepends=True), coverage.splitlines(keepends=True), fromfile=reference_file, tofile=coverage_file))
len_diff_is_zero = True if len(diff_out) == 0 else False
if not len_diff_is_zero and update_reference:
with io.open(reference_file, mode="w", encoding=encoding) as f:
f.write(coverage_raw)
assert len_diff_is_zero, "Unified diff output:\n" + "".join(diff_out)
# some tests require additional cleanup after each test
if 'clean-each' in available_targets:
assert run(['make', 'clean-each'])
os.chdir(basedir)
|
def test_build(compiled, format, available_targets, update_reference):
name = compiled
scrub = SCRUBBERS[format]
output_pattern = OUTPUT_PATTERN[format]
assert_equals = ASSERT_EQUALS.get(format, None)
encoding = 'utf8'
if format == 'html' and name.startswith('html-encoding-'):
encoding = re.match('^html-encoding-(.*)$', name).group(1)
os.chdir(os.path.join(basedir, name))
assert run(["make", format])
if GENERATE_REFERENCE: # pragma: no cover
for generated_file in glob.glob(output_pattern):
reference_file = os.path.join('reference', generated_file)
if os.path.isfile(reference_file):
continue
else:
try:
os.makedirs('reference')
except FileExistsError:
# directory already exists
pass
print('copying %s to %s' % (generated_file, reference_file))
shutil.copyfile(generated_file, reference_file)
for coverage_file, reference_file in find_reference_files(output_pattern):
with io.open(coverage_file, encoding=encoding) as f:
coverage_raw = f.read()
coverage = scrub(coverage_raw)
with io.open(reference_file, encoding=encoding) as f:
reference = scrub(f.read())
if assert_equals is not None:
assert_equals(coverage, reference)
else:
diff_out = list(difflib.unified_diff(reference.splitlines(keepends=True), coverage.splitlines(keepends=True), fromfile=reference_file, tofile=coverage_file))
diff_is_empty = len(diff_out) == 0
if not len_diff_is_zero and update_reference:
with io.open(reference_file, mode="w", encoding=encoding) as f:
f.write(coverage_raw)
assert len_diff_is_zero, "Unified diff output:\n" + "".join(diff_out)
# some tests require additional cleanup after each test
if 'clean-each' in available_targets:
assert run(['make', 'clean-each'])
os.chdir(basedir)
|
55,043 |
def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
Given a subset of edges in a directed graph, the net-flow constraint imposes that the number of
edges leaving any given node is equal to the number of edges entering the node, i.e.,
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
47,752 |
def test__templater_dbt_handle_database_connection_failure(
project_dir, dbt_templater # noqa: F811
):
"""Test the result of a failed database connection."""
from dbt.adapters.factory import get_adapter
src_fpath = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/exception_connect_database.sql"
target_fpath = os.path.abspath(
os.path.join(
project_dir, "models/my_new_project/exception_connect_database.sql"
)
)
# We move the file that throws an error in and out of the project directory
# as dbt throws an error if a node fails to parse while computing the DAG
os.rename(src_fpath, target_fpath)
try:
_, violations = dbt_templater.process(
in_str="",
fname=target_fpath,
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
except Exception as e:
if DBT_VERSION_TUPLE == (1, 0):
# In dbt 1.0.0, connection failures raise an exception
assert str(e).startswith(
"Runtime Error\n connection never acquired for thread"
)
else:
raise (e)
finally:
get_adapter(dbt_templater.dbt_config).connections.release()
os.rename(target_fpath, src_fpath)
if DBT_VERSION_TUPLE != (1, 0):
assert violations
# NB: Replace slashes to deal with different plaform paths being returned.
assert (
violations[0]
.desc()
.replace("\\", "/")
.startswith("dbt tried to connect to the database")
)
|
def test__templater_dbt_handle_database_connection_failure(
project_dir, dbt_templater # noqa: F811
):
"""Test the result of a failed database connection."""
from dbt.adapters.factory import get_adapter
src_fpath = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/exception_connect_database.sql"
target_fpath = os.path.abspath(
os.path.join(
project_dir, "models/my_new_project/exception_connect_database.sql"
)
)
# We move the file that throws an error in and out of the project directory
# as dbt throws an error if a node fails to parse while computing the DAG
os.rename(src_fpath, target_fpath)
try:
_, violations = dbt_templater.process(
in_str="",
fname=target_fpath,
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
except Exception as e:
if DBT_VERSION_TUPLE >= (1, 0):
# In dbt 1.0.0, connection failures raise an exception
assert str(e).startswith(
"Runtime Error\n connection never acquired for thread"
)
else:
raise (e)
finally:
get_adapter(dbt_templater.dbt_config).connections.release()
os.rename(target_fpath, src_fpath)
if DBT_VERSION_TUPLE != (1, 0):
assert violations
# NB: Replace slashes to deal with different plaform paths being returned.
assert (
violations[0]
.desc()
.replace("\\", "/")
.startswith("dbt tried to connect to the database")
)
|
7,616 |
def _parse_formats(*cosmos: object, format: _FormatsT) -> ndarray:
"""Parse Cosmology-like to |Cosmology|, using provided formats.
`format` is broadcast to match the shape of the cosmology arguments.
Note that the cosmology arguments are not broadcast against ``format``,
so it cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as
specified by the corresponding `format`.
Raises
------
TypeError
If any in 'cosmos' is not a |Cosmology| and the corresponding 'format'
equals `numpy.False_`.
"""
formats = np.broadcast_to(np.array(format, dtype=object), len(cosmos))
# parse each cosmo & format
# Have to deal with things that do not broadcast well.
# astropy.row cannot be used in an array, even if dtype=object
# and will raise a segfault when used in a ufunc.
towrap = (isinstance(cosmo, _CosmologyWrapper._cantbroadcast) for cosmo in cosmos)
wcosmos = [(c if not wrap else _CosmologyWrapper(c)) for c, wrap in zip(cosmos, towrap)]
return _parse_format(wcosmos, formats)
|
def _parse_formats(*cosmos: object, format: _FormatsT) -> ndarray:
"""Parse Cosmology-like to |Cosmology|, using provided formats.
`format` is broadcast to match the shape of the cosmology arguments.
Note that the cosmology arguments are not broadcast against ``format``,
so it cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as
specified by the corresponding `format`.
Raises
------
TypeError
If any in 'cosmos' is not a |Cosmology| and the corresponding 'format'
equals `numpy.False_`.
"""
formats = np.broadcast_to(np.array(format, dtype=object), len(cosmos))
# parse each cosmo & format
# Have to deal with things that do not broadcast well.
# astropy.row cannot be used in an array, even if dtype=object
# and will raise a segfault when used in a ufunc.
towrap = (isinstance(cosmo, _CosmologyWrapper._cantbroadcast) for cosmo in cosmos)
wcosmos = [(c if not wrap else _CosmologyWrapper(c)) for c, wrap in zip(cosmos, towrap)]
wcosmos = [c if not wrap else _CosmologyWrapper(c) for c, wrap in zip(cosmos, towrap)]
|
59,147 |
def _process_image(adata, data_points, img_key, crop_coord, scale_spot, bw=False):
offset = 100
cmap_img = None
img = adata.uns[img_key]
scalef_key = f"tissue_{img_key}_scalef"
spot_size = adata.uns[scalef_key] * data_points[0].max() * scale_spot
if crop_coord is not None:
img_coord = [crop_coord[0], crop_coord[1], np.ceil(img.shape[0]-crop_coord[2]), np.ceil(img.shape[0]-crop_coord[3])]
else:
img_coord = [
data_points[0][:, 0].min() - offset,
data_points[0][:, 0].max() + offset,
data_points[0][:, 1].min() - offset,
data_points[0][:, 1].max() + offset,
]
if bw:
img = np.dot(img[..., :3], [0.2989, 0.5870, 0.1140])
cmap_img = "gray"
return img, img_coord, spot_size, cmap_img
|
def _process_image(adata, data_points, img_key, crop_coord, scale_spot, bw=False):
offset = 100
cmap_img = None
img = adata.uns[img_key]
scalef_key = f"tissue_{img_key}_scalef"
spot_size = adata.uns[scalef_key] * data_points[0].max() * scale_spot
if crop_coord is not None:
crop_coord = np.asarray(crop_coord)
if len(crop_coord) != 4:
raise ValueError("Invalid crop_coord of length {len(crop_coord)}(!=4)")
img_coord = (*crop_coord[:2], *np.ceil(img.shape[0] - crop_coord[2:4]).astype(int))
else:
img_coord = [
data_points[0][:, 0].min() - offset,
data_points[0][:, 0].max() + offset,
data_points[0][:, 1].min() - offset,
data_points[0][:, 1].max() + offset,
]
if bw:
img = np.dot(img[..., :3], [0.2989, 0.5870, 0.1140])
cmap_img = "gray"
return img, img_coord, spot_size, cmap_img
|
2,554 |
def assert_all_finite(
X,
*,
allow_nan=False,
estimator_name=None,
input_name="",
):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
The input data.
allow_nan : bool, default=False
Do not throw an error if X contains Nan.
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
"""
_assert_all_finite(
X.data if sp.issparse(X) else X,
allow_nan=allow_nan,
estimator_name=estimator_name,
input_name=input_name,
)
|
def assert_all_finite(
X,
*,
allow_nan=False,
estimator_name=None,
input_name="",
):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
The input data.
allow_nan : bool, default=False
If True, do not throw error when X to contains NaN.
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
"""
_assert_all_finite(
X.data if sp.issparse(X) else X,
allow_nan=allow_nan,
estimator_name=estimator_name,
input_name=input_name,
)
|
34,243 |
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
interactive_parser = subparsers.add_parser(
"interactive",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session to create new training data for a "
"Rasa model by chatting.",
)
interactive_parser.set_defaults(func=interactive)
interactive_parser.add_argument(
"--e2e", action="store_true", help="save file in e2e format"
)
interactive_subparsers = interactive_parser.add_subparsers()
interactive_core_parser = interactive_subparsers.add_parser(
"core",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session model to create new training data "
"for a Rasa Core model by chatting. Uses the 'RegexInterpreter', i.e. "
"`/<intent>` input format.",
)
interactive_core_parser.set_defaults(func=interactive_core)
arguments.set_interactive_arguments(interactive_parser)
arguments.set_interactive_core_arguments(interactive_core_parser)
|
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
interactive_parser = subparsers.add_parser(
"interactive",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session to create new training data for a "
"Rasa model by chatting.",
)
interactive_parser.set_defaults(func=interactive)
interactive_parser.add_argument(
"--e2e", action="store_true", help="Save story files in e2e format. In this format user messages will be included in the stories."
)
interactive_subparsers = interactive_parser.add_subparsers()
interactive_core_parser = interactive_subparsers.add_parser(
"core",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session model to create new training data "
"for a Rasa Core model by chatting. Uses the 'RegexInterpreter', i.e. "
"`/<intent>` input format.",
)
interactive_core_parser.set_defaults(func=interactive_core)
arguments.set_interactive_arguments(interactive_parser)
arguments.set_interactive_core_arguments(interactive_core_parser)
|
43,788 |
def pauli_group(n_qubits, wire_map=None):
"""Generate the :math:`n`-qubit Pauli group.
This function enables the construction of the :math:`n`-qubit Pauli group with no
storage involved. The :math:`n`-qubit Pauli group has size :math:`4^n`,
thus it may not be desirable to construct it in full and store.
The order of iteration is based on the binary symplectic representation of
the Pauli group as :math:`2n`-bit strings. Ordering is done by converting
the integers :math:`0` to :math:`2^{2n}` to binary strings, and converting those
strings to Pauli operators using the ``binary_to_pauli`` method.
Args:
n_qubits (int): The number of qubits for which to create the group.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels
used in the Pauli word as keys, and unique integer labels as their values.
If no wire map is provided, wires will be labeled by integers between 0 and ``n_qubits``.
Returns:
.Operation: The next Pauli word in the group.
**Example**
The ``pauli_group`` generator can be used to loop over the Pauli group as follows:
.. code-block:: python
from pennylane.pauli import pauli_group
n_qubits = 3
for p in pauli_group(n_qubits):
print(p)
The Pauli group in full can be obtained in full like so:
.. code-block:: python
full_pg = list(pauli_group(n_qubits))
The group can also be created using a custom wire map (if no map is
specified, a default map of label :math:`i` to wire ``i`` will be created).
.. code-block:: python
n_qubits = 3
wire_map = {'a' : 0, 'b' : 1, 'c' : 2}
for p in pauli_group(n_qubits, wire_map=wire_map):
print(p)
"""
if not isinstance(n_qubits, int):
raise TypeError("Must specify an integer number of qubits construct the Pauli group.")
if n_qubits <= 0:
raise ValueError("Number of qubits must be at least 1 to construct Pauli group.")
return _pauli_group_generator(n_qubits, wire_map=wire_map)
|
def pauli_group(n_qubits, wire_map=None):
"""Generate the :math:`n`-qubit Pauli group.
This function enables the construction of the :math:`n`-qubit Pauli group with no
storage involved. The :math:`n`-qubit Pauli group has size :math:`4^n`,
thus it may not be desirable to construct it in full and store.
The order of iteration is based on the binary symplectic representation of
the Pauli group as :math:`2n`-bit strings. Ordering is done by converting
the integers :math:`0` to :math:`2^{2n}` to binary strings, and converting those
strings to Pauli operators using the :func`~.binary_to_pauli` method.
Args:
n_qubits (int): The number of qubits for which to create the group.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels
used in the Pauli word as keys, and unique integer labels as their values.
If no wire map is provided, wires will be labeled by integers between 0 and ``n_qubits``.
Returns:
.Operation: The next Pauli word in the group.
**Example**
The ``pauli_group`` generator can be used to loop over the Pauli group as follows:
.. code-block:: python
from pennylane.pauli import pauli_group
n_qubits = 3
for p in pauli_group(n_qubits):
print(p)
The Pauli group in full can be obtained in full like so:
.. code-block:: python
full_pg = list(pauli_group(n_qubits))
The group can also be created using a custom wire map (if no map is
specified, a default map of label :math:`i` to wire ``i`` will be created).
.. code-block:: python
n_qubits = 3
wire_map = {'a' : 0, 'b' : 1, 'c' : 2}
for p in pauli_group(n_qubits, wire_map=wire_map):
print(p)
"""
if not isinstance(n_qubits, int):
raise TypeError("Must specify an integer number of qubits construct the Pauli group.")
if n_qubits <= 0:
raise ValueError("Number of qubits must be at least 1 to construct Pauli group.")
return _pauli_group_generator(n_qubits, wire_map=wire_map)
|
41,978 |
def _encode_param(value: Any) -> str:
if value is None:
return _NONE
return str(value)
|
def _encode_param(value: Any) -> str:
return _NONE if value is None else str(value)
|
24,329 |
def _varbind_value_to_float(s):
# type: (Any) -> float
"""
Sanitize varbind values
"""
if not isinstance(s, OctetString):
return s
s = s.asOctets()
s = to_native_string(s)
found = s.find('\x00')
if found >= 0:
s = s[:found]
return float(s.strip())
|
def _varbind_value_to_float(s):
# type: (Any) -> float
"""
Sanitize varbind values
"""
if not isinstance(s, OctetString):
return s
s = to_native_string(str(s))
found = s.find('\x00')
if found >= 0:
s = s[:found]
return float(s.strip())
|
2,973 |
def _maybe_wrap_formatter(formatter, na_rep: Optional[str]):
if isinstance(formatter, str):
formatter_func = lambda x: formatter.format(x)
elif callable(formatter):
formatter_func = formatter
else:
msg = f"Expected a template string or callable, got {formatter} " "instead"
raise TypeError(msg)
if na_rep is None:
return formatter_func
elif isinstance(na_rep, str):
return lambda x: na_rep if pd.isna(x) else formatter_func(x)
else:
msg = f"Expected a string, got {na_rep} instead"
raise TypeError(msg)
|
def _maybe_wrap_formatter(formatter, na_rep: Optional[str]):
if isinstance(formatter, str):
formatter_func = lambda x: formatter.format(x)
elif callable(formatter):
formatter_func = formatter
else:
msg = f"Expected a template string or callable, got {formatter} instead"
raise TypeError(msg)
if na_rep is None:
return formatter_func
elif isinstance(na_rep, str):
return lambda x: na_rep if pd.isna(x) else formatter_func(x)
else:
msg = f"Expected a string, got {na_rep} instead"
raise TypeError(msg)
|
313 |
def _is_one_d(dist_shape):
if hasattr(dist_shape, "dshape") and dist_shape.dshape in {()}:
return True
elif hasattr(dist_shape, "shape") and dist_shape.shape in {()}:
return True
return False
|
def _is_one_d(dist_shape):
if hasattr(dist_shape, "dshape") and dist_shape.dshape in {()}:
return True
elif hasattr(dist_shape, "shape") and dist_shape.shape == ():
return True
return False
|
40,180 |
def _check_anndata_setup_equivalence(
adata_source: Union[AnnData, dict], adata_target: AnnData
) -> bool:
"""
Checks if target setup is equivalent to source.
Parameters
----------
adata_source
Either AnnData already setup or scvi_setup_dict as the source
adata_target
Target AnnData to check setup equivalence
Returns
-------
Whether the adata_target should be run through `transfer_anndata_setup`
"""
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
adata = adata_target
stats = _scvi_dict["summary_stats"]
target_n_vars = adata.shape[1]
error_msg = (
"Number of {} in anndata different from initial anndata used for training."
)
if target_n_vars != stats["n_vars"]:
raise ValueError(error_msg.format("vars"))
error_msg = (
"There are more {} categories in the data than were originally registered. "
+ "Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."
)
self_categoricals = _scvi_dict["categorical_mappings"]
self_batch_mapping = self_categoricals["_scvi_batch"]["mapping"]
adata_categoricals = adata.uns["_scvi"]["categorical_mappings"]
adata_batch_mapping = adata_categoricals["_scvi_batch"]["mapping"]
# check if mappings are equal or needs transfer
transfer_setup = _needs_transfer(self_batch_mapping, adata_batch_mapping, "batch")
self_labels_mapping = self_categoricals["_scvi_labels"]["mapping"]
adata_labels_mapping = adata_categoricals["_scvi_labels"]["mapping"]
transfer_setup = transfer_setup or _needs_transfer(
self_labels_mapping, adata_labels_mapping, "label"
)
# validate any extra categoricals
error_msg = (
"Registered categorical key order mismatch between "
+ "the anndata used to train and the anndata passed in."
+ "Expected categories & order {}. Received {}.\n"
)
if "extra_categoricals" in _scvi_dict.keys():
target_dict = adata.uns["_scvi"]["extra_categoricals"]
source_dict = _scvi_dict["extra_categoricals"]
# check that order of keys setup is same
if not np.array_equal(target_dict["keys"], source_dict["keys"]):
raise ValueError(error_msg.format(source_dict["keys"], target_dict["keys"]))
# check mappings are equivalent
target_extra_cat_maps = adata.uns["_scvi"]["extra_categoricals"]["mappings"]
for key, val in source_dict["mappings"].items():
target_map = target_extra_cat_maps[key]
transfer_setup = transfer_setup or _needs_transfer(val, target_map, key)
# validate any extra continuous covs
if "extra_continuous_keys" in _scvi_dict.keys():
if "extra_continuous_keys" not in adata.uns["_scvi"].keys():
raise ValueError('extra_continuous_keys not in adata.uns["_scvi"]')
target_cont_keys = adata.uns["_scvi"]["extra_continuous_keys"]
source_cont_keys = _scvi_dict["extra_continuous_keys"]
# check that order of keys setup is same
if not np.array_equal(target_cont_keys["keys"], source_cont_keys["keys"]):
raise ValueError(
error_msg.format(source_cont_keys["keys"], target_cont_keys["keys"])
)
return transfer_setup
|
def _check_anndata_setup_equivalence(
adata_source: Union[AnnData, dict], adata_target: AnnData
) -> bool:
"""
Checks if target setup is equivalent to source.
Parameters
----------
adata_source
Either AnnData already setup or scvi_setup_dict as the source
adata_target
Target AnnData to check setup equivalence
Returns
-------
Whether the adata_target should be run through `transfer_anndata_setup`
"""
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
adata = adata_target
stats = _scvi_dict["summary_stats"]
target_n_vars = adata.shape[1]
error_msg = (
"Number of {} in anndata different from initial anndata used for training."
)
if target_n_vars != stats["n_vars"]:
raise ValueError(error_msg.format("vars"))
error_msg = (
"There are more {} categories in the data than were originally registered. "
+ "Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."
)
self_categoricals = _scvi_dict["categorical_mappings"]
self_batch_mapping = self_categoricals["_scvi_batch"]["mapping"]
adata_categoricals = adata.uns["_scvi"]["categorical_mappings"]
adata_batch_mapping = adata_categoricals["_scvi_batch"]["mapping"]
# check if mappings are equal or needs transfer
transfer_setup = _needs_transfer(self_batch_mapping, adata_batch_mapping, "batch")
self_labels_mapping = self_categoricals["_scvi_labels"]["mapping"]
adata_labels_mapping = adata_categoricals["_scvi_labels"]["mapping"]
transfer_setup = transfer_setup or _needs_transfer(
self_labels_mapping, adata_labels_mapping, "label"
)
# validate any extra categoricals
error_msg = (
"Registered categorical key order mismatch between "
+ "the anndata used to train and the anndata passed in."
+ "Expected categories & order {}. Received {}.\n"
)
if "extra_categoricals" in _scvi_dict.keys():
target_dict = adata.uns["_scvi"]["extra_categoricals"]
source_dict = _scvi_dict["extra_categoricals"]
# check that order of keys setup is same
if not np.array_equal(target_dict["keys"], source_dict["keys"]):
raise ValueError(error_msg.format(source_dict["keys"], target_dict["keys"]))
# check mappings are equivalent
target_extra_cat_maps = adata.uns["_scvi"]["extra_categoricals"]["mappings"]
for key, val in source_dict["mappings"].items():
target_map = target_extra_cat_maps[key]
transfer_setup = transfer_setup or _needs_transfer(val, target_map, key)
# validate any extra continuous covs
if "extra_continuous_keys" in _scvi_dict.keys():
if "extra_continuous_keys" not in adata.uns["_scvi"].keys():
raise ValueError('extra_continuous_keys not in adata.uns["_scvi"]')
target_cont_keys = adata.uns["_scvi"]["extra_continuous_keys"]
source_cont_keys = _scvi_dict["extra_continuous_keys"]
# check that order of keys setup is same
if not np.array_equal(target_cont_keys["keys"], source_cont_keys["keys"]):
raise ValueError(
error_msg.format(source_cont_keys, target_cont_keys)
)
return transfer_setup
|
1,761 |
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Normalized input X.
norms : array, shape (n_samples, ) if axis=1 else (n_features, )
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
|
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Normalized input X.
norms : ndarray of shape (n_samples, ) if `axis=1` else (n_features, )
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
|
29,789 |
def _init_authorization_state(provider_config, db_uri, sub_hash_salt, mirror_public):
if db_uri:
authz_code_db = StorageBase.from_uri(
db_uri,
db_name="satosa",
collection="authz_codes",
ttl=provider_config.get("authorization_code_lifetime", 600),
)
access_token_db = StorageBase.from_uri(
db_uri,
db_name="satosa",
collection="access_tokens",
ttl=provider_config.get("access_token_lifetime", 3600),
)
refresh_token_db = StorageBase.from_uri(
db_uri,
db_name="satosa",
collection="refresh_tokens",
ttl=provider_config.get("refresh_token_lifetime", None),
)
sub_db = StorageBase.from_uri(
db_uri, db_name="satosa", collection="subject_identifiers", ttl=None
)
else:
authz_code_db = None
access_token_db = None
refresh_token_db = None
sub_db = None
token_lifetimes = {
k: provider_config[k]
for k in [
"authorization_code_lifetime",
"access_token_lifetime",
"refresh_token_lifetime",
"refresh_token_threshold",
]
if k in provider_config
}
subject_id_factory = (
MirrorPublicSubjectIdentifierFactory(sub_hash_salt)
if mirror_public
else HashBasedSubjectIdentifierFactory(sub_hash_salt)
)
return AuthorizationState(
subject_id_factory,
authz_code_db,
access_token_db,
refresh_token_db,
sub_db,
**token_lifetimes,
)
|
def _init_authorization_state(provider_config, db_uri, sub_hash_salt, mirror_public=False):
if db_uri:
authz_code_db = StorageBase.from_uri(
db_uri,
db_name="satosa",
collection="authz_codes",
ttl=provider_config.get("authorization_code_lifetime", 600),
)
access_token_db = StorageBase.from_uri(
db_uri,
db_name="satosa",
collection="access_tokens",
ttl=provider_config.get("access_token_lifetime", 3600),
)
refresh_token_db = StorageBase.from_uri(
db_uri,
db_name="satosa",
collection="refresh_tokens",
ttl=provider_config.get("refresh_token_lifetime", None),
)
sub_db = StorageBase.from_uri(
db_uri, db_name="satosa", collection="subject_identifiers", ttl=None
)
else:
authz_code_db = None
access_token_db = None
refresh_token_db = None
sub_db = None
token_lifetimes = {
k: provider_config[k]
for k in [
"authorization_code_lifetime",
"access_token_lifetime",
"refresh_token_lifetime",
"refresh_token_threshold",
]
if k in provider_config
}
subject_id_factory = (
MirrorPublicSubjectIdentifierFactory(sub_hash_salt)
if mirror_public
else HashBasedSubjectIdentifierFactory(sub_hash_salt)
)
return AuthorizationState(
subject_id_factory,
authz_code_db,
access_token_db,
refresh_token_db,
sub_db,
**token_lifetimes,
)
|
40,083 |
def log_config(config: dict):
"""
Initialize some logging for the cli ops
:param config:
:return:
"""
global _log_config
if config['debug']:
logging.basicConfig(level=logging.DEBUG)
_log_config.update(config)
try:
log_level = 'INFO'
if config['debug']:
log_level = 'DEBUG'
enable_json_logging = os.getenv('ANCHORE_JSON_LOGGING_ENABLED') == 'true'
log_beginner = LogBeginner(LogPublisher(), sys.stderr, sys, warnings)
logger.configure_logging(log_level, enable_json_logging=enable_json_logging, log_beginner=log_beginner)
except Exception as err:
logger.error(format_error_output(config, 'service', {}, err))
sys.exit(ExitCode.failed.value)
|
def log_config(config: dict):
"""
Initialize some logging for the cli ops
:param config:
:return:
"""
global _log_config
if config['debug']:
logging.basicConfig(level=logging.DEBUG)
_log_config.update(config)
try:
log_level = 'INFO'
if config['debug']:
log_level = 'DEBUG'
enable_json_logging = os.getenv('ANCHORE_JSON_LOGGING_ENABLED', "") == 'true'
log_beginner = LogBeginner(LogPublisher(), sys.stderr, sys, warnings)
logger.configure_logging(log_level, enable_json_logging=enable_json_logging, log_beginner=log_beginner)
except Exception as err:
logger.error(format_error_output(config, 'service', {}, err))
sys.exit(ExitCode.failed.value)
|
53,809 |
def main():
"""Entry point for MRIQC's CLI."""
import gc
import os
import sys
from tempfile import mktemp
from mriqc import config, messages
from mriqc.cli.parser import parse_args
# Run parser
parse_args()
_plugin = config.nipype.get_plugin()
if config.nipype.plugin in ("MultiProc", "LegacyMultiProc"):
from importlib import import_module
from multiprocessing import set_start_method
from contextlib import suppress
with suppress(RuntimeError):
set_start_method("forkserver")
Plugin = getattr(
import_module(f"nipype.pipeline.plugins.{config.nipype.plugin.lower()}"),
f"{config.nipype.plugin}Plugin",
)
_plugin = {
"plugin": Plugin(plugin_args=config.nipype.plugin_args),
}
gc.collect()
if config.execution.pdb:
from mriqc.utils.debug import setup_exceptionhook
setup_exceptionhook()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
# The config file name needs to be unique, otherwise multiple mriqc instances
# will create write conflicts.
config_file = mktemp(
dir=config.execution.work_dir, prefix=".mriqc.", suffix=".toml"
)
config.to_filename(config_file)
# Set up participant level
if "participant" in config.workflow.analysis_level:
from mriqc.workflows.core import init_mriqc_wf
start_message = messages.PARTICIPANT_START.format(
version=config.environment.version,
bids_dir=config.execution.bids_dir,
output_dir=config.execution.output_dir,
analysis_level=config.workflow.analysis_level,
)
config.loggers.cli.log(25, start_message)
mriqc_wf = init_mriqc_wf()
if mriqc_wf is None:
sys.exit(os.EX_SOFTWARE)
if mriqc_wf and config.execution.write_graph:
mriqc_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
if not config.execution.dry_run:
# Warn about submitting measures BEFORE
if not config.execution.no_sub:
config.loggers.cli.warning(config.DSA_MESSAGE)
# Clean up master process before running workflow, which may create forks
gc.collect()
# run MRIQC
mriqc_wf.run(**_plugin)
# Warn about submitting measures AFTER
if not config.execution.no_sub:
config.loggers.cli.warning(config.DSA_MESSAGE)
config.loggers.cli.log(25, messages.PARTICIPANT_FINISHED)
# Set up group level
if "group" in config.workflow.analysis_level:
from ..reports import group_html
from ..utils.bids import DEFAULT_TYPES
from ..utils.misc import generate_tsv # , generate_pred
config.loggers.cli.info(messages.GROUP_START)
# Generate reports
mod_group_reports = []
for mod in config.execution.modalities or DEFAULT_TYPES:
output_dir = config.execution.output_dir
dataframe, out_tsv = generate_tsv(output_dir, mod)
# If there are no iqm.json files, nothing to do.
if dataframe is None:
continue
tsv_message = messages.TSV_GENERATED.format(modality=mod, path=out_tsv)
config.loggers.cli.info(tsv_message)
# out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
# if out_pred is not None:
# log.info('Predicted QA CSV table for the %s data generated (%s)',
# mod, out_pred)
out_html = output_dir / f"group_{mod}.html"
group_html(
out_tsv,
mod,
csv_failed=output_dir / f"group_variant-failed_{mod}.csv",
out_file=out_html,
)
report_message = messages.GROUP_REPORT_GENERATED.format(
modality=mod, path=out_html
)
config.loggers.cli.info(report_message)
mod_group_reports.append(mod)
if not mod_group_reports:
raise Exception(messages.GROUP_NO_DATA)
config.loggers.cli.info(messages.GROUP_FINISHED)
from mriqc.utils.bids import write_bidsignore, write_derivative_description
config.loggers.cli.info(messages.BIDS_META)
write_derivative_description(config.execution.bids_dir, config.execution.output_dir)
write_bidsignore(config.execution.output_dir)
config.loggers.cli.info(messages.RUN_FINISHED)
|
def main():
"""Entry point for MRIQC's CLI."""
import gc
import os
import sys
from tempfile import mktemp
from mriqc import config, messages
from mriqc.cli.parser import parse_args
# Run parser
parse_args()
_plugin = config.nipype.get_plugin()
if config.nipype.plugin in ("MultiProc", "LegacyMultiProc"):
from multiprocessing import set_start_method
from contextlib import suppress
with suppress(RuntimeError):
set_start_method("forkserver")
if config.nipype.plugin in ("MultiProc", "LegacyMultiProc"):
from multiprocessing import set_start_method
from contextlib import suppress
from nipype.pipeline import plugins
with suppress(RuntimeError):
set_start_method("forkserver")
Plugin = getattr(plugins, f"{config.nipype.plugin}Plugin")
_plugin = {
"plugin": Plugin(plugin_args=config.nipype.plugin_args),
}
gc.collect()
if config.execution.pdb:
from mriqc.utils.debug import setup_exceptionhook
setup_exceptionhook()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
# The config file name needs to be unique, otherwise multiple mriqc instances
# will create write conflicts.
config_file = mktemp(
dir=config.execution.work_dir, prefix=".mriqc.", suffix=".toml"
)
config.to_filename(config_file)
# Set up participant level
if "participant" in config.workflow.analysis_level:
from mriqc.workflows.core import init_mriqc_wf
start_message = messages.PARTICIPANT_START.format(
version=config.environment.version,
bids_dir=config.execution.bids_dir,
output_dir=config.execution.output_dir,
analysis_level=config.workflow.analysis_level,
)
config.loggers.cli.log(25, start_message)
mriqc_wf = init_mriqc_wf()
if mriqc_wf is None:
sys.exit(os.EX_SOFTWARE)
if mriqc_wf and config.execution.write_graph:
mriqc_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
if not config.execution.dry_run:
# Warn about submitting measures BEFORE
if not config.execution.no_sub:
config.loggers.cli.warning(config.DSA_MESSAGE)
# Clean up master process before running workflow, which may create forks
gc.collect()
# run MRIQC
mriqc_wf.run(**_plugin)
# Warn about submitting measures AFTER
if not config.execution.no_sub:
config.loggers.cli.warning(config.DSA_MESSAGE)
config.loggers.cli.log(25, messages.PARTICIPANT_FINISHED)
# Set up group level
if "group" in config.workflow.analysis_level:
from ..reports import group_html
from ..utils.bids import DEFAULT_TYPES
from ..utils.misc import generate_tsv # , generate_pred
config.loggers.cli.info(messages.GROUP_START)
# Generate reports
mod_group_reports = []
for mod in config.execution.modalities or DEFAULT_TYPES:
output_dir = config.execution.output_dir
dataframe, out_tsv = generate_tsv(output_dir, mod)
# If there are no iqm.json files, nothing to do.
if dataframe is None:
continue
tsv_message = messages.TSV_GENERATED.format(modality=mod, path=out_tsv)
config.loggers.cli.info(tsv_message)
# out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
# if out_pred is not None:
# log.info('Predicted QA CSV table for the %s data generated (%s)',
# mod, out_pred)
out_html = output_dir / f"group_{mod}.html"
group_html(
out_tsv,
mod,
csv_failed=output_dir / f"group_variant-failed_{mod}.csv",
out_file=out_html,
)
report_message = messages.GROUP_REPORT_GENERATED.format(
modality=mod, path=out_html
)
config.loggers.cli.info(report_message)
mod_group_reports.append(mod)
if not mod_group_reports:
raise Exception(messages.GROUP_NO_DATA)
config.loggers.cli.info(messages.GROUP_FINISHED)
from mriqc.utils.bids import write_bidsignore, write_derivative_description
config.loggers.cli.info(messages.BIDS_META)
write_derivative_description(config.execution.bids_dir, config.execution.output_dir)
write_bidsignore(config.execution.output_dir)
config.loggers.cli.info(messages.RUN_FINISHED)
|
46,450 |
def plot_acf(ts: TimeSeries,
m: Optional[int] = None,
max_lag: int = 24,
alpha: float = 0.05,
bartlett_confint: bool = True,
fig_size: Tuple[int, int] = (10, 5),
axis: Optional[plt.axis] = None) -> None:
"""
Plots the ACF of `ts`, highlighting it at lag `m`, with corresponding significance interval.
Parameters
----------
ts
The TimeSeries whose ACF should be plotted.
m
Optionally, a time lag to highlight on the plot.
max_lag
The maximal lag order to consider.
alpha
The confidence interval to display.
bartlett_confint
The boolean value indicating whether the confidence interval should be
calculated using Bartlett's formula. If set to True, the confidence interval
can be used in the model identification stage for fitting ARIMA models.
If set to False, the confidence interval can be used to test for randomness
(i.e. there is no time dependence in the data) of the data.
fig_size
The size of the figure to be displayed.
axis
Optionally, an axis object to plot the ACF on.
"""
ts._assert_univariate()
raise_if(max_lag is None or not (1 <= max_lag < len(ts)),
'max_lag must be greater than or equal to 1 and less than len(ts).')
raise_if(m is not None and not (0 <= m <= max_lag),
'm must be greater than or equal to 0 and less than or equal to max_lag.')
raise_if(alpha is None or not (0 < alpha < 1), 'alpha must be greater than 0 and less than 1.')
r, confint = acf(ts.values(), nlags=max_lag, fft=False, alpha=alpha, bartlett_confint=bartlett_confint)
if axis is None:
plt.figure(figsize=fig_size)
axis = plt
for i in range(len(r)):
axis.plot((i, i),
(0, r[i]),
color=('#b512b8' if m is not None and i == m else 'black'),
lw=(1 if m is not None and i == m else .5))
# Adjusts the upper band of the confidence interval to center it on the x axis.
upp_band = [confint[lag][1] - r[lag] for lag in range(1, max_lag + 1)]
axis.fill_between(np.arange(1, max_lag + 1), upp_band, [-x for x in upp_band], color='#003DFD', alpha=.25)
axis.plot((0, max_lag + 1), (0, 0), color='black')
|
def plot_acf(ts: TimeSeries,
m: Optional[int] = None,
max_lag: int = 24,
alpha: float = 0.05,
bartlett_confint: bool = True,
fig_size: Tuple[int, int] = (10, 5),
axis: Optional[plt.axis] = None) -> None:
"""
Plots the ACF of `ts`, highlighting it at lag `m`, with corresponding significance interval.
Parameters
----------
ts
The TimeSeries whose ACF should be plotted.
m
Optionally, a time lag to highlight on the plot.
max_lag
The maximal lag order to consider.
alpha
The confidence interval to display.
bartlett_confint
The boolean value indicating whether the confidence interval should be
calculated using Bartlett's formula. If set to True, the confidence interval
can be used in the model identification stage for fitting ARIMA models.
If set to False, the confidence interval can be used to test for randomness
(i.e. there is no time dependence in the data) of the data. Default: True
fig_size
The size of the figure to be displayed.
axis
Optionally, an axis object to plot the ACF on.
"""
ts._assert_univariate()
raise_if(max_lag is None or not (1 <= max_lag < len(ts)),
'max_lag must be greater than or equal to 1 and less than len(ts).')
raise_if(m is not None and not (0 <= m <= max_lag),
'm must be greater than or equal to 0 and less than or equal to max_lag.')
raise_if(alpha is None or not (0 < alpha < 1), 'alpha must be greater than 0 and less than 1.')
r, confint = acf(ts.values(), nlags=max_lag, fft=False, alpha=alpha, bartlett_confint=bartlett_confint)
if axis is None:
plt.figure(figsize=fig_size)
axis = plt
for i in range(len(r)):
axis.plot((i, i),
(0, r[i]),
color=('#b512b8' if m is not None and i == m else 'black'),
lw=(1 if m is not None and i == m else .5))
# Adjusts the upper band of the confidence interval to center it on the x axis.
upp_band = [confint[lag][1] - r[lag] for lag in range(1, max_lag + 1)]
axis.fill_between(np.arange(1, max_lag + 1), upp_band, [-x for x in upp_band], color='#003DFD', alpha=.25)
axis.plot((0, max_lag + 1), (0, 0), color='black')
|
893 |
def test_NormalInverseGamma():
ng = NormalInverseGamma('G', 1, 2, 3, 4)
assert density(ng)(1, 1) == 32.0*exp(-4)/sqrt(pi)
assert density(ng)(7, 2) == 1.0*sqrt(2)*exp(-20)/sqrt(pi)
assert ng.pspace.distribution.set == ProductSet(S.Reals, Interval(0, oo))
raises(ValueError, lambda:NormalGamma('G', 1, 2, 3, -1))
raises(ValueError, lambda:NormalGamma('G', 1, 2, -3, 1))
raises(ValueError, lambda:NormalGamma('G', 1, -2, 3, 1))
|
def test_NormalInverseGamma():
ng = NormalInverseGamma('G', 1, 2, 3, 4)
assert density(ng)(1, 1) == 32.0*exp(-4)/sqrt(pi)
assert density(ng)(7, 2) == 1.0*sqrt(2)*exp(-20)/sqrt(pi)
assert ng.pspace.distribution.set == ProductSet(S.Reals, Interval(0, oo))
raises(ValueError, lambda: NormalGamma('G', 1, 2, 3, -1))
raises(ValueError, lambda:NormalGamma('G', 1, 2, -3, 1))
raises(ValueError, lambda:NormalGamma('G', 1, -2, 3, 1))
|
4,479 |
def distance_to_bem(
pos: np.array, bem: ConductorModel, trans: Transform = None
) -> np.array:
"""Calculate the distance of postions to inner skull surface.
Parameters
----------
pos : array, shape = (3,) | (n, 3)
Position(s) in m, in head coordinates.
bem : instance of ConductorModel
Conductor model.
trans : array, shape = (4, 4) | instance of Transform
Transform matrix.
Returns
-------
distances : array, shape = (3,) | (n, 3)
"""
if pos.size == 3:
single_pos = True
pos = np.expand_dims(pos, axis=0)
else:
single_pos = False
n = pos.shape[0]
distance = np.zeros((n,))
if bem["is_sphere"]:
center = bem["r0"]
if trans:
center = apply_trans(trans, center, move=True)
radius = bem["layers"][0]["rad"]
for i in range(n):
distance[i] = radius - np.linalg.norm(pos[i, :] - center)
else: # is BEM
surface_points = bem["surfs"][0]["rr"]
if trans:
surface_points = apply_trans(
trans, surface_points, move=True
)
for i in range(n):
distance[i] = np.min(np.linalg.norm(surface_points - pos[i, :], axis=1))
if single_pos:
distance = np.squeeze(distance)[()]
return distance
|
def distance_to_bem(
pos: np.array, bem: ConductorModel, trans: Transform = None
) -> np.array:
"""Calculate the distance of postions to inner skull surface.
Parameters
----------
pos : array, shape = (3,) | (n, 3)
Position(s) in m, in head coordinates.
bem : instance of ConductorModel
Conductor model.
trans : array, shape = (4, 4) | instance of Transform
Transform matrix.
Returns
-------
distances : array, shape = (3,) | (n, 3)
"""
if pos.size == 3:
single_pos = True
pos = np.expand_dims(pos, axis=0)
else:
single_pos = False
n = pos.shape[0]
distance = np.zeros((n,))
if bem["is_sphere"]:
center = bem["r0"]
if trans:
center = apply_trans(trans, center, move=True)
radius = bem["layers"][0]["rad"]
for i in range(n):
distance[i] = radius - np.linalg.norm(pos[i, :] - center)
else: # is BEM
surface_points = bem["surfs"][0]["rr"]
if trans:
surface_points = apply_trans(
trans, surface_points, move=True
)
for i in range(n):
distance[i] = np.min(np.linalg.norm(surface_points - pos[i, :], axis=1))
if single_pos:
distance = np.squeeze(distance)[()]
return distance
|
31,702 |
def get_whitelist_helper(raw_response: dict) -> tuple[list, list]:
"""
Prepares the data for human readable format and context data
Args:
raw_response: dict -> The raw response returned by the API call
Returns:
Tuple[entry_context, human_readable_data]
"""
entry_context: list = []
human_readable_data: list = []
if raw_response:
# Extract the meta information
base_meta: dict = raw_response.get("meta")
limit: int = base_meta.get("limit")
# "next" returns the endpoint as /api/v1/orgwhitelist/?username=devops
# For calling http_request, we need to remove /api/
next_endpoint: str = base_meta.get("next")
next_endpoint = re.sub("^/api/", "", next_endpoint)
previous_endpoint: str = base_meta.get("previous")
offset: int = base_meta.get("offset")
total_count: int = base_meta.get("total_count")
total_count_more_than_limit: bool = total_count > limit
if total_count > 0:
while(next_endpoint is not None):
# passing the required arguments to prepare human redable and context data lists
# Offset -> Starting index to add
# end_index -> End index to add
prepare_whitelist_human_and_context(entry_context, human_readable_data, raw_response.get("objects"))
# Make the new next requests to get required information
# Since, the API returns the next endpoint to hit, no need to call build_params() function.
raw_response = http_request("GET", next_endpoint, headers=HEADERS)
next_endpoint = raw_response.get("meta").get("next")
if next_endpoint:
next_endpoint = re.sub("^/api/", "", next_endpoint)
if total_count_more_than_limit:
prepare_whitelist_human_and_context(entry_context, human_readable_data, raw_response.get("objects"))
return (entry_context, human_readable_data)
|
def get_whitelist_helper(raw_response: dict) -> tuple[list, list]:
"""
Prepares the data for human readable format and context data
Args:
raw_response: dict -> The raw response returned by the API call
Returns:
Tuple[entry_context, human_readable_data]
"""
entry_context: list = []
human_readable_data: list = []
if raw_response:
# Extract the meta information
base_meta: dict = raw_response.get("meta")
limit: int = base_meta.get("limit")
# "next" returns the endpoint as /api/v1/orgwhitelist/?username=devops
# For calling http_request, we need to remove /api/
next_endpoint: str = base_meta.get("next")
next_endpoint = re.sub("^/api/", "", next_endpoint)
previous_endpoint: str = base_meta.get("previous")
offset: int = base_meta.get("offset")
total_count: int = base_meta.get("total_count")
total_count_more_than_limit: bool = total_count > limit
if total_count > 0:
while next_endpoint:
# passing the required arguments to prepare human redable and context data lists
# Offset -> Starting index to add
# end_index -> End index to add
prepare_whitelist_human_and_context(entry_context, human_readable_data, raw_response.get("objects"))
# Make the new next requests to get required information
# Since, the API returns the next endpoint to hit, no need to call build_params() function.
raw_response = http_request("GET", next_endpoint, headers=HEADERS)
next_endpoint = raw_response.get("meta").get("next")
if next_endpoint:
next_endpoint = re.sub("^/api/", "", next_endpoint)
if total_count_more_than_limit:
prepare_whitelist_human_and_context(entry_context, human_readable_data, raw_response.get("objects"))
return (entry_context, human_readable_data)
|
55,930 |
def _collate_batch(examples, tokenizer):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
# Check if padding is necessary.
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length:
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have one."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
|
def _collate_batch(examples, tokenizer):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
# Check if padding is necessary.
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length:
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
|
30,652 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials', {}).get('identifier', '')
password = params.get('credentials', {}).get('password', '')
base_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
ok_codes=(200, 201, 204),
headers={'accept': "application/json"}
)
if demisto.command() == 'test-module':
result = test_module(client)
return_outputs(result)
elif demisto.command() == 'guardian-search':
result = search_by_query(client, demisto.args())
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-list-all-assets':
result = list_all_assets(client)
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-find-ip-by-mac':
result = find_ip_by_mac(client, demisto.args())
return_outputs(result[0], result[1], result[2])
except Exception as e:
return_error(str(f'Failed to execute {demisto.command()} command. Error: {str(e)}'))
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials', {}).get('identifier', '')
password = params.get('credentials', {}).get('password', '')
base_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
ok_codes=(200, 201, 204),
headers={'accept': "application/json"}
)
if demisto.command() == 'test-module':
result = test_module(client)
return_outputs(result)
elif demisto.command() == 'guardian-search':
result = search_by_query(client, demisto.args())
return_outputs(*result)
elif demisto.command() == 'guardian-list-all-assets':
result = list_all_assets(client)
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-find-ip-by-mac':
result = find_ip_by_mac(client, demisto.args())
return_outputs(result[0], result[1], result[2])
except Exception as e:
return_error(str(f'Failed to execute {demisto.command()} command. Error: {str(e)}'))
|
57,905 |
def list_apps_command(
client: ShiftLeftClient, org_id: str, args: Dict[str, Any]
) -> CommandResults:
result = client.list_apps(org_id)
apps: Any = result.get("response") if result.get("ok") else []
for a in apps:
if a.get("tags"):
a["labels"] = "\n".join(
[f'`{t.get("key")}`: {t.get("value")}' for t in a.get("tags")]
)
if apps:
markdown = f"### Apps List ({len(apps)})\n"
markdown += tableToMarkdown(
"",
apps,
headers=[
"id",
"name",
"labels",
],
)
return CommandResults(
readable_output=markdown,
outputs_prefix="ShiftLeft.Apps",
outputs_key_field="id",
outputs=apps,
)
else:
return CommandResults(
outputs_prefix="ShiftLeft.Apps",
outputs_key_field="",
outputs={},
)
|
def list_apps_command(
client: ShiftLeftClient, org_id: str) -> CommandResults:
result = client.list_apps(org_id)
apps: Any = result.get("response") if result.get("ok") else []
for a in apps:
if a.get("tags"):
a["labels"] = "\n".join(
[f'`{t.get("key")}`: {t.get("value")}' for t in a.get("tags")]
)
if apps:
markdown = f"### Apps List ({len(apps)})\n"
markdown += tableToMarkdown(
"",
apps,
headers=[
"id",
"name",
"labels",
],
)
return CommandResults(
readable_output=markdown,
outputs_prefix="ShiftLeft.Apps",
outputs_key_field="id",
outputs=apps,
)
else:
return CommandResults(
outputs_prefix="ShiftLeft.Apps",
outputs_key_field="",
outputs={},
)
|
15,040 |
def build_resources(
translation_cache: Dict[str, Dict[str, Any]],
components: Set[str],
category: Optional[str],
) -> Dict[str, Dict[str, Any]]:
"""Build the resources response for the given components."""
# Build response
resources: Dict[str, Dict[str, Any]] = {}
for component in components:
if "." not in component:
domain = component
else:
domain = component.split(".", 1)[0]
domain_resources = resources.setdefault(domain, {})
# Add the translations for this component to the domain resources.
# Since clients cannot determine which platform an entity belongs to,
# all translations for a domain will be returned together.
if category is None:
domain_resources.update(translation_cache[component])
continue
new_value = translation_cache[component].get(category)
if new_value is None:
continue
if isinstance(new_value, dict):
domain_resources.setdefault(category, {}).update(
translation_cache[component][category]
)
else:
domain_resources[category] = translation_cache[component][category]
return {"component": resources}
|
def build_resources(
translation_cache: Dict[str, Dict[str, Any]],
components: Set[str],
category: Optional[str],
) -> Dict[str, Dict[str, Any]]:
"""Build the resources response for the given components."""
# Build response
resources: Dict[str, Dict[str, Any]] = {}
for component in components:
if "." not in component:
domain = component
else:
domain = component.split(".", 1)[0]
domain_resources = resources.setdefault(domain, {})
# Add the translations for this component to the domain resources.
# Since clients cannot determine which platform an entity belongs to,
# all translations for a domain will be returned together.
if category is None:
domain_resources.update(translation_cache[component])
continue
new_value = translation_cache[component].get(category)
if new_value is None:
continue
if isinstance(new_value, dict):
domain_resources.setdefault(category, {}).update(
new_value
)
else:
domain_resources[category] = translation_cache[component][category]
return {"component": resources}
|
47,076 |
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/transformers")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
if (
not issue.assignees
and (dt.utcnow() - issue.updated_at).days > 21
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
print("Closing", issue)
# issue.create_comment(
|
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/transformers")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
if (
not issue.assignees
and (dt.utcnow() - issue.updated_at).days > 21
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
print("Closing 🤗", issue)
# issue.create_comment(
|
39,403 |
def test_polygon():
geom = pyvista.Polygon()
assert np.any(geom.points)
geom1 = pyvista.Polygon(generate_polygon=True)
assert geom.n_cells == 2
geom2 = pyvista.Polygon(generate_polygon=False)
assert geom.n_cells == 1
|
def test_polygon():
geom = pyvista.Polygon()
assert np.any(geom.points)
geom1 = pyvista.Polygon(generate_polygon=True)
assert geom.n_cells == 2
geom2 = pyvista.Polygon(generate_polygon=False)
assert geom2.n_cells == 1
|
8,469 |
def pytest_addoption(parser):
"""Add our command line options to pytest"""
parser.addoption("--use-default-hook",
action="store_true",
default=False,
help="Run tests with the '--use-default-hook option'")
parser.addoption("--skip-onefile",
action="store_true",
default=False,
help="Skip 'onefile' tests and only run 'onedir'")
|
def pytest_addoption(parser):
"""Add our command line options to pytest"""
parser.addoption("--use-default-hook",
action="store_true",
default=False,
help="Run tests with the '--use-default-hook' option")
parser.addoption("--skip-onefile",
action="store_true",
default=False,
help="Skip 'onefile' tests and only run 'onedir'")
|
58,762 |
def sparsereshape(sparse_indices, sparse_values, prev_shape, new_shape):
"""
Reshape a Sparse Tensor
Parameters
----------
inputs : List[relay.Expr]
Input tensor and indices.
The first tensor is input data and rests are indices.
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
sparse_values = [7, 5, 6, 3, 9]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
relay.sparsereshape(sparse_indices,
sparse_values,
prev_shape,
new_shape)
= [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
"""
return _make.sparsereshape(sparse_indices, sparse_values, prev_shape, new_shape)
|
def sparsereshape(sparse_indices, sparse_values, prev_shape, new_shape):
"""
Reshape a Sparse Tensor
Parameters
----------
inputs : List[relay.Expr]
Input tensor and indices.
The first tensor is input data and rests are indices.
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
sparse_values = [7, 5, 6, 3, 9]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
relay.sparsereshape(sparse_indices,
sparse_values,
prev_shape,
new_shape)
= [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
"""
return _make.sparsereshape(sparse_indices, sparse_values, prev_shape, new_shape)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.