id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
32,385 |
def edit_status(issue_id, status, issue):
# check for all authorized transitions available for this user
# if the requested transition is available, execute it.
if issue is None:
issue = {}
j_res = list_transitions_data_for_issue(issue_id)
transitions = [transition.get('name') for transition in j_res.get('transitions')]
for i, transition in enumerate(transitions):
if transition.lower() == status.lower():
url = f'rest/api/latest/issue/{issue_id}/transitions?expand=transitions.fields'
issue['transition'] = {"id": str(j_res.get('transitions')[i].get('id'))}
return jira_req('POST', url, json.dumps(issue))
return_error(f'Status "{status}" not found. \nValid transitions are: {transitions} \n')
|
def edit_status(issue_id, status, issue):
# check for all authorized transitions available for this user
# if the requested transition is available, execute it.
if not issue:
issue = {}
j_res = list_transitions_data_for_issue(issue_id)
transitions = [transition.get('name') for transition in j_res.get('transitions')]
for i, transition in enumerate(transitions):
if transition.lower() == status.lower():
url = f'rest/api/latest/issue/{issue_id}/transitions?expand=transitions.fields'
issue['transition'] = {"id": str(j_res.get('transitions')[i].get('id'))}
return jira_req('POST', url, json.dumps(issue))
return_error(f'Status "{status}" not found. \nValid transitions are: {transitions} \n')
|
20,430 |
def firewall_upnp(action="status", no_refresh=False):
"""
Manage port forwarding using UPnP
Note: 'reload' action is deprecated and will be removed in the near
future. You should use 'status' instead - which retrieve UPnP status
and automatically refresh port forwarding if 'no_refresh' is False.
Keyword argument:
action -- Action to perform
no_refresh -- Do not refresh port forwarding
"""
firewall = firewall_list(raw=True)
if action == "status":
enabled = firewall["uPnP"]["enabled"]
elif action == "enable":
# Add cron job
with open(UPNP_CRON_JOB, "w+") as f:
f.write(
"*/10 * * * * root "
"/usr/bin/yunohost firewall upnp status >>/dev/null\n"
)
enabled = True
elif action == "disable":
try:
# Remove cron job
os.remove(UPNP_CRON_JOB)
except Exception:
pass
enabled = False
else:
raise YunohostValidationError("action_invalid", action=action)
# Refresh port mapping
refresh_success = True
if not no_refresh:
# Open port to receive discovery message
process.run_commands(
["iptables -w -A INPUT -p udp --dport %d -j ACCEPT" % SSDP_CLIENT_PORT],
callback=_on_rule_command_error,
)
upnpc = miniupnpc.UPnP(localport=SSDP_CLIENT_PORT)
upnpc.discoverdelay = 3000
# Discover UPnP device(s)
logger.debug("discovering UPnP devices...")
nb_dev = upnpc.discover()
logger.debug("found %d UPnP device(s)", int(nb_dev))
# Close discovery port
process.run_commands(
["iptables -w -D INPUT -p udp --dport %d -j ACCEPT" % SSDP_CLIENT_PORT],
callback=_on_rule_command_error,
)
if nb_dev < 1:
logger.error(m18n.n("upnp_dev_not_found"))
refresh_success = False
else:
try:
# Select UPnP device
upnpc.selectigd()
except Exception:
logger.debug("unable to select UPnP device", exc_info=1)
refresh_success = False
else:
# Iterate over ports
for protocol in ["TCP", "UDP"]:
if protocol + "_TO_CLOSE" in firewall["uPnP"]:
for port in firewall["uPnP"][protocol + "_TO_CLOSE"]:
# Clean the mapping of this port
if upnpc.getspecificportmapping(port, protocol):
try:
upnpc.deleteportmapping(port, protocol)
except Exception:
pass
del firewall["uPnP"][protocol + "_TO_CLOSE"]
for port in firewall["uPnP"][protocol]:
# Clean the mapping of this port
if upnpc.getspecificportmapping(port, protocol):
try:
upnpc.deleteportmapping(port, protocol)
except Exception:
pass
if not enabled:
continue
try:
# Add new port mapping
upnpc.addportmapping(
port,
protocol,
upnpc.lanaddr,
port,
"yunohost firewall: port %d" % port,
"",
)
except Exception:
logger.debug(
"unable to add port %d using UPnP", port, exc_info=1
)
refresh_success = False
if refresh_success:
logger.debug("UPnP port refresh successful")
if action == "enable":
logger.success(m18n.n("upnp_enabled"))
elif action == "disable":
logger.success(m18n.n("upnp_disabled"))
# Save state always (note that refreshing can change the "TO_CLOSE" states)
firewall["uPnP"]["enabled"] = enabled
_update_firewall_file(firewall)
return {"enabled": enabled}
|
def firewall_upnp(action="status", no_refresh=False):
"""
Manage port forwarding using UPnP
Note: 'reload' action is deprecated and will be removed in the near
future. You should use 'status' instead - which retrieve UPnP status
and automatically refresh port forwarding if 'no_refresh' is False.
Keyword argument:
action -- Action to perform
no_refresh -- Do not refresh port forwarding
"""
firewall = firewall_list(raw=True)
if action == "status":
enabled = firewall["uPnP"]["enabled"]
elif action == "enable":
# Add cron job
with open(UPNP_CRON_JOB, "w+") as f:
f.write(
"*/10 * * * * root "
"/usr/bin/yunohost firewall upnp status >>/dev/null\n"
)
enabled = True
elif action == "disable":
try:
# Remove cron job
os.remove(UPNP_CRON_JOB)
except Exception:
pass
enabled = False
else:
raise YunohostValidationError("action_invalid", action=action)
# Refresh port mapping
refresh_success = True
if not no_refresh:
# Open port to receive discovery message
process.run_commands(
["iptables -w -A INPUT -p udp --dport %d -j ACCEPT" % SSDP_CLIENT_PORT],
callback=_on_rule_command_error,
)
upnpc = miniupnpc.UPnP(localport=SSDP_CLIENT_PORT)
upnpc.discoverdelay = 3000
# Discover UPnP device(s)
logger.debug("discovering UPnP devices...")
nb_dev = upnpc.discover()
logger.debug("found %d UPnP device(s)", int(nb_dev))
# Close discovery port
process.run_commands(
["iptables -w -D INPUT -p udp --dport %d -j DENY" % SSDP_CLIENT_PORT],
callback=_on_rule_command_error,
)
if nb_dev < 1:
logger.error(m18n.n("upnp_dev_not_found"))
refresh_success = False
else:
try:
# Select UPnP device
upnpc.selectigd()
except Exception:
logger.debug("unable to select UPnP device", exc_info=1)
refresh_success = False
else:
# Iterate over ports
for protocol in ["TCP", "UDP"]:
if protocol + "_TO_CLOSE" in firewall["uPnP"]:
for port in firewall["uPnP"][protocol + "_TO_CLOSE"]:
# Clean the mapping of this port
if upnpc.getspecificportmapping(port, protocol):
try:
upnpc.deleteportmapping(port, protocol)
except Exception:
pass
del firewall["uPnP"][protocol + "_TO_CLOSE"]
for port in firewall["uPnP"][protocol]:
# Clean the mapping of this port
if upnpc.getspecificportmapping(port, protocol):
try:
upnpc.deleteportmapping(port, protocol)
except Exception:
pass
if not enabled:
continue
try:
# Add new port mapping
upnpc.addportmapping(
port,
protocol,
upnpc.lanaddr,
port,
"yunohost firewall: port %d" % port,
"",
)
except Exception:
logger.debug(
"unable to add port %d using UPnP", port, exc_info=1
)
refresh_success = False
if refresh_success:
logger.debug("UPnP port refresh successful")
if action == "enable":
logger.success(m18n.n("upnp_enabled"))
elif action == "disable":
logger.success(m18n.n("upnp_disabled"))
# Save state always (note that refreshing can change the "TO_CLOSE" states)
firewall["uPnP"]["enabled"] = enabled
_update_firewall_file(firewall)
return {"enabled": enabled}
|
44,498 |
def _resolve_graph_task(
component_spec: ComponentSpec,
arguments: Mapping[str, Any],
component_ref: ComponentReference = None,
) -> TaskSpec:
from ..components import ComponentStore
component_store = ComponentStore.default_store
graph = component_spec.implementation.graph
graph_input_arguments = {input.name: input.default for input in component_spec.inputs or [] if input.default is not None}
graph_input_arguments.update(arguments)
outputs_of_tasks = {}
def resolve_argument(argument):
if isinstance(argument, (str, int, float, bool)):
return argument
elif isinstance(argument, GraphInputArgument):
return graph_input_arguments[argument.graph_input.input_name]
elif isinstance(argument, TaskOutputArgument):
upstream_task_output_ref = argument.task_output
upstream_task_outputs = outputs_of_tasks[upstream_task_output_ref.task_id]
upstream_task_output = upstream_task_outputs[upstream_task_output_ref.output_name]
return upstream_task_output
else:
raise TypeError('Argument for input has unexpected type "{}".'.format(type(argument)))
for task_id, task_spec in graph._toposorted_tasks.items(): # Cannot use graph.tasks here since they might be listed not in dependency order. Especially on python <3.6 where the dicts do not preserve ordering
task_factory = component_store._load_component_from_ref(task_spec.component_ref)
# TODO: Handle the case when optional graph component input is passed to optional task component input
task_arguments = {input_name: resolve_argument(argument) for input_name, argument in task_spec.arguments.items()}
task_component_spec = task_factory.component_spec
input_name_to_pythonic = generate_unique_name_conversion_table([input.name for input in task_component_spec.inputs or []], _sanitize_python_function_name)
output_name_to_pythonic = generate_unique_name_conversion_table([output.name for output in task_component_spec.outputs or []], _sanitize_python_function_name)
pythonic_output_name_to_original = {pythonic_name: original_name for original_name, pythonic_name in output_name_to_pythonic.items()}
pythonic_task_arguments = {input_name_to_pythonic[input_name]: argument for input_name, argument in task_arguments.items()}
task_obj = task_factory(**pythonic_task_arguments)
task_outputs_with_pythonic_names = task_obj.outputs
task_outputs_with_original_names = {
# component_bridge generates outputs under both pythonic and original name,
# so half of them are absent from pythonic_output_name_to_original
pythonic_output_name_to_original.get(pythonic_output_name, pythonic_output_name): output_value
for pythonic_output_name, output_value in task_outputs_with_pythonic_names.items()
}
outputs_of_tasks[task_id] = task_outputs_with_original_names
resolved_graph_outputs = OrderedDict([(output_name, resolve_argument(argument)) for output_name, argument in graph.output_values.items()])
# For resolved graph component tasks task.outputs point to the actual tasks that originally produced the output that is later returned from the graph
graph_task = _ResolvedGraphTask(
component_ref=component_ref,
component_spec=component_spec,
outputs = resolved_graph_outputs,
task_arguments=arguments,
)
return graph_task
|
def _resolve_graph_task(
component_spec: ComponentSpec,
arguments: Mapping[str, Any],
component_ref: ComponentReference = None,
) -> TaskSpec:
from ..components import ComponentStore
component_store = ComponentStore.default_store
graph = component_spec.implementation.graph
graph_input_arguments = {input.name: input.default for input in component_spec.inputs or [] if input.default is not None}
graph_input_arguments.update(arguments)
outputs_of_tasks = {}
def resolve_argument(argument):
if isinstance(argument, (str, int, float, bool)):
return argument
elif isinstance(argument, GraphInputArgument):
return graph_input_arguments[argument.graph_input.input_name]
elif isinstance(argument, TaskOutputArgument):
upstream_task_output_ref = argument.task_output
upstream_task_outputs = outputs_of_tasks[upstream_task_output_ref.task_id]
upstream_task_output = upstream_task_outputs[upstream_task_output_ref.output_name]
return upstream_task_output
else:
raise TypeError('Argument for input has unexpected type "{}".'.format(type(argument)))
for task_id, task_spec in graph._toposorted_tasks.items(): # Cannot use graph.tasks here since they might be listed not in dependency order. Especially on python <3.6 where the dicts do not preserve ordering
task_factory = component_store._load_component_from_ref(task_spec.component_ref)
# TODO: Handle the case when optional graph component input is passed to optional task component input
task_arguments = {input_name: resolve_argument(argument) for input_name, argument in task_spec.arguments.items()}
task_component_spec = task_factory.component_spec
input_name_to_pythonic = generate_unique_name_conversion_table([input.name for input in task_component_spec.inputs or []], _sanitize_python_function_name)
output_name_to_pythonic = generate_unique_name_conversion_table([output.name for output in task_component_spec.outputs or []], _sanitize_python_function_name)
pythonic_output_name_to_original = {pythonic_name: original_name for original_name, pythonic_name in output_name_to_pythonic.items()}
pythonic_task_arguments = {input_name_to_pythonic[input_name]: argument for input_name, argument in task_arguments.items()}
task_obj = task_factory(**pythonic_task_arguments)
task_outputs_with_pythonic_names = task_obj.outputs
task_outputs_with_original_names = {output.name: task_obj.outputs[output.name] for output in task_component_spec.outputs or []}
outputs_of_tasks[task_id] = task_outputs_with_original_names
resolved_graph_outputs = OrderedDict([(output_name, resolve_argument(argument)) for output_name, argument in graph.output_values.items()])
# For resolved graph component tasks task.outputs point to the actual tasks that originally produced the output that is later returned from the graph
graph_task = _ResolvedGraphTask(
component_ref=component_ref,
component_spec=component_spec,
outputs = resolved_graph_outputs,
task_arguments=arguments,
)
return graph_task
|
14,704 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up access to Netatmo cameras."""
home = config.get(CONF_HOME)
verify_ssl = config.get(CONF_VERIFY_SSL, True)
quality = config.get(CONF_QUALITY, DEFAULT_QUALITY)
import pyatmo
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
data = CameraData(hass, conf, home)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
add_entities([NetatmoCamera(data, camera_name, home,
camera_type, verify_ssl, quality)])
data.get_persons()
except pyatmo.NoDevice:
return None
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up access to Netatmo cameras."""
home = config.get(CONF_HOME)
verify_ssl = config.get(CONF_VERIFY_SSL, True)
quality = config.get(CONF_QUALITY, DEFAULT_QUALITY)
import pyatmo
auth = hass.data[DATA_NETATMO_AUTH]
try:
data = CameraData(hass, conf, home)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
add_entities([NetatmoCamera(data, camera_name, home,
camera_type, verify_ssl, quality)])
data.get_persons()
except pyatmo.NoDevice:
return None
|
7,528 |
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', axis=None, masked=True,
return_bounds=False, copy=True, grow=False):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int]))
data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int]))
Invalid data values (i.e., NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class. Also, its
input data cannot be a masked array and it does not handle data
that contains invalid values (i.e., NaN or inf). Also note that
it uses the mean as the centering function.
If your data is a `~numpy.ndarray` with no invalid values and
you want to use the mean as the centering function with
``axis=None`` and iterate to convergence, then
`scipy.stats.sigmaclip` is ~25-30% faster than the equivalent
settings here (``sigma_clip(data, cenfunc='mean', maxiters=None,
axis=None)``).
Parameters
----------
data : array_like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g., `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/pydata/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g., `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where the
mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` and the minimum and maximum clipping thresholds
are returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). The default is
`True`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those that
fall outwith the clipping limits (only applied along `axis`, if
specified). A value of 1 will mask the nearest pixels in a cross
pattern around each deviant pixel, while 1.5 will also reject the
nearest diagonal neighbours and so on.
Returns
-------
result : flexible
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values and where the input
mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked array
or array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array is
a flattened 1D `~numpy.ndarray` where the clipped values have
been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the output
`~numpy.ndarray` will have the same shape as the input ``data``
and contain ``np.nan`` where values were clipped. If the input
``data`` was a masked array, then the output `~numpy.ndarray`
will also contain ``np.nan`` where the input mask was `True`.
If ``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\\s.
See Also
--------
SigmaClip, sigma_clipped_stats
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative to
the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc, grow=grow)
return sigclip(data, axis=axis, masked=masked,
return_bounds=return_bounds, copy=copy)
|
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', axis=None, masked=True,
return_bounds=False, copy=True, grow=False):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int]))
data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int]))
Invalid data values (i.e., NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class. Also, its
input data cannot be a masked array and it does not handle data
that contains invalid values (i.e., NaN or inf). Also note that
it uses the mean as the centering function.
If your data is a `~numpy.ndarray` with no invalid values and
you want to use the mean as the centering function with
``axis=None`` and iterate to convergence, then
`scipy.stats.sigmaclip` is ~25-30% faster than the equivalent
settings here (``sigma_clip(data, cenfunc='mean', maxiters=None,
axis=None)``).
Parameters
----------
data : array_like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g., `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/pydata/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g., `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where the
mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` and the minimum and maximum clipping thresholds
are returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). The default is
`True`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those that
fall outwith the clipping limits (only applied along ``axis``, if
specified). A value of 1 will mask the nearest pixels in a cross
pattern around each deviant pixel, while 1.5 will also reject the
nearest diagonal neighbours and so on.
Returns
-------
result : flexible
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values and where the input
mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked array
or array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array is
a flattened 1D `~numpy.ndarray` where the clipped values have
been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the output
`~numpy.ndarray` will have the same shape as the input ``data``
and contain ``np.nan`` where values were clipped. If the input
``data`` was a masked array, then the output `~numpy.ndarray`
will also contain ``np.nan`` where the input mask was `True`.
If ``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\\s.
See Also
--------
SigmaClip, sigma_clipped_stats
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative to
the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc, grow=grow)
return sigclip(data, axis=axis, masked=masked,
return_bounds=return_bounds, copy=copy)
|
2,285 |
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray of shape (n_samples, n_outputs)
Target values
Xy : array-like of shape (n_features, n_outputs), default=None
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float, default=1.0
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
fit_intercept : bool, default=True
Whether to fit an intercept or not
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, accept_sparse='csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
|
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Target values
Xy : array-like of shape (n_features, n_outputs), default=None
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float, default=1.0
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
fit_intercept : bool, default=True
Whether to fit an intercept or not
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, accept_sparse='csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
|
17,620 |
def create_pep_zero(app: Sphinx, env: BuildEnvironment, docnames: list[str]) -> None:
# Read from root directory
path = Path(".")
pep_zero_filename = "pep-0000"
peps: list[parser.PEP] = []
pep_pat = re.compile(r"pep-\d{4}") # Path.match() doesn't support regular expressions
# AUTHOR_OVERRIDES.csv is an exception file for PEP0 name parsing
with open("AUTHOR_OVERRIDES.csv", encoding="utf-8") as f:
authors_overrides = {}
for line in csv.DictReader(f):
full_name = line.pop("Overridden Name")
authors_overrides[full_name] = line
for file_path in path.iterdir():
if not file_path.is_file():
continue # Skip directories etc.
if file_path.match("pep-0000*"):
continue # Skip pre-existing PEP 0 files
if pep_pat.match(str(file_path)) and file_path.suffix in {".txt", ".rst"}:
pep = parser.PEP(path.joinpath(file_path).absolute(), authors_overrides)
peps.append(pep)
pep0_text = writer.PEPZeroWriter().write_pep0(sorted(peps))
Path(f"{pep_zero_filename}.rst").write_text(pep0_text, encoding="utf-8")
# Add to files for builder
docnames.insert(1, pep_zero_filename)
# Add to files for writer
env.found_docs.add(pep_zero_filename)
# Create peps.json
create_pep_json(peps, app.outdir)
|
def create_pep_zero(app: Sphinx, env: BuildEnvironment, docnames: list[str]) -> None:
# Read from root directory
path = Path(".")
pep_zero_filename = "pep-0000"
peps: list[parser.PEP] = []
pep_pat = re.compile(r"pep-\d{4}") # Path.match() doesn't support regular expressions
# AUTHOR_OVERRIDES.csv is an exception file for PEP0 name parsing
with open("AUTHOR_OVERRIDES.csv", "r", encoding="utf-8") as f:
authors_overrides = {}
for line in csv.DictReader(f):
full_name = line.pop("Overridden Name")
authors_overrides[full_name] = line
for file_path in path.iterdir():
if not file_path.is_file():
continue # Skip directories etc.
if file_path.match("pep-0000*"):
continue # Skip pre-existing PEP 0 files
if pep_pat.match(str(file_path)) and file_path.suffix in {".txt", ".rst"}:
pep = parser.PEP(path.joinpath(file_path).absolute(), authors_overrides)
peps.append(pep)
pep0_text = writer.PEPZeroWriter().write_pep0(sorted(peps))
Path(f"{pep_zero_filename}.rst").write_text(pep0_text, encoding="utf-8")
# Add to files for builder
docnames.insert(1, pep_zero_filename)
# Add to files for writer
env.found_docs.add(pep_zero_filename)
# Create peps.json
create_pep_json(peps, app.outdir)
|
19,830 |
def count_sobjects(sf: Salesforce, objs: T.Sequence[str]) -> ObjectCount:
"""Quickly count SObjects using SOQL and Parallelization"""
with CompositeParallelSalesforce(sf, max_workers=8, chunk_size=5) as cpsf:
responses, transport_errors = cpsf.do_composite_requests(
(
{
"method": "GET",
"url": f"/services/data/v{sf.sf_version}/query/?q=select count() from {obj}",
"referenceId": f"ref{obj}",
}
for obj in objs
)
)
salesforce_errors, successes = partition(
lambda response: response["httpStatusCode"] == 200, responses
)
transport_errors = tuple(error._asdict() for error in transport_errors)
successes = list(successes)
def removeprefix(mainstr: str, prefix: str):
# Until Python 3.9 is minimum supported
a, b = mainstr[0 : len(prefix)], mainstr[len(prefix) :]
assert a == prefix
return b
ret = {
removeprefix(response["referenceId"], "ref"): response["body"]["totalSize"]
for response in successes
}
return ObjectCount(ret, transport_errors, tuple(salesforce_errors))
|
def count_sobjects(sf: Salesforce, objs: T.Sequence[str]) -> ObjectCount:
"""Quickly count SObjects using SOQL and Parallelization"""
with CompositeParallelSalesforce(sf, max_workers=8, chunk_size=5) as cpsf:
responses, transport_errors = cpsf.do_composite_requests(
(
{
"method": "GET",
"url": f"/services/data/v{sf.sf_version}/query/?q=select count() from {obj}",
"referenceId": f"ref{obj}",
}
for obj in objs
)
)
salesforce_errors, successes = partition(
lambda response: response["httpStatusCode"] == 200, responses
)
transport_errors = tuple({"exception": error.exception, "request": error.request} for error in transport_errors)
successes = list(successes)
def removeprefix(mainstr: str, prefix: str):
# Until Python 3.9 is minimum supported
a, b = mainstr[0 : len(prefix)], mainstr[len(prefix) :]
assert a == prefix
return b
ret = {
removeprefix(response["referenceId"], "ref"): response["body"]["totalSize"]
for response in successes
}
return ObjectCount(ret, transport_errors, tuple(salesforce_errors))
|
31,506 |
def http_request(method, url_suffix, params=None, files=None, ignore_errors=False):
""" General HTTP request.
Args:
ignore_errors (bool):
method: (str) 'GET', 'POST', 'DELETE' 'PUT'
url_suffix: (str)
params: (dict)
files: (tuple, dict)
Returns:
dict: response json
"""
def find_error(may_be_error_inside):
"""Function will search for dict with 'errors' or 'error_msg' key
Args:
may_be_error_inside: object, any object
Returns:
None if no error presents
Errors list/string if errors inside.
"""
if isinstance(may_be_error_inside, list):
for obj in may_be_error_inside:
ans = find_error(obj)
if ans:
return ans
return None
if isinstance(may_be_error_inside, dict):
if 'error_msg' in may_be_error_inside:
return may_be_error_inside['error_msg']
if 'errors' in may_be_error_inside and may_be_error_inside.get('errors'):
return may_be_error_inside['errors']
for value in may_be_error_inside.values():
err_r = find_error(value)
if err_r:
return err_r
return None
url = SERVER + url_suffix
r = requests.request(
method, url, params=params, headers=HEADERS, files=files, verify=USE_SSL, proxies=PROXIES
)
# Handle errors
try:
if r.status_code in {405, 401}:
return_error(ERROR_FORMAT.format(r.status_code, 'Token may be invalid'))
elif not is_json(r):
raise ValueError
response = r.json()
if r.status_code not in {200, 201, 202, 204} and not ignore_errors:
err = find_error(response)
if not err:
err = r.text
return_error(ERROR_FORMAT.format(r.status_code, err))
err = find_error(response)
if err:
if "no jobs were created" in build_errors_string(err):
err_message = err[0].get("error_msg") + ' Please try using the command with reanalyze=true.'
err[0]['error_msg'] = err_message
return_error(ERROR_FORMAT.format(r.status_code, err))
return response
except ValueError:
# If no JSON is present, must be an error that can't be ignored
return_error(ERROR_FORMAT.format(r.status_code, r.text))
|
def http_request(method, url_suffix, params=None, files=None, ignore_errors=False):
""" General HTTP request.
Args:
ignore_errors (bool):
method: (str) 'GET', 'POST', 'DELETE' 'PUT'
url_suffix: (str)
params: (dict)
files: (tuple, dict)
Returns:
dict: response json
"""
def find_error(may_be_error_inside):
"""Function will search for dict with 'errors' or 'error_msg' key
Args:
may_be_error_inside: object, any object
Returns:
None if no error presents
Errors list/string if errors inside.
"""
if isinstance(may_be_error_inside, list):
for obj in may_be_error_inside:
ans = find_error(obj)
if ans:
return ans
return None
if isinstance(may_be_error_inside, dict):
if 'error_msg' in may_be_error_inside:
return may_be_error_inside['error_msg']
if 'errors' in may_be_error_inside and may_be_error_inside.get('errors'):
return may_be_error_inside['errors']
for value in may_be_error_inside.values():
err_r = find_error(value)
if err_r:
return err_r
return None
url = SERVER + url_suffix
r = requests.request(
method, url, params=params, headers=HEADERS, files=files, verify=USE_SSL, proxies=PROXIES
)
# Handle errors
try:
if r.status_code in {405, 401}:
return_error(ERROR_FORMAT.format(r.status_code, 'Token may be invalid'))
elif not is_json(r):
raise ValueError
response = r.json()
if r.status_code not in {200, 201, 202, 204} and not ignore_errors:
err = find_error(response)
if not err:
err = r.text
return_error(ERROR_FORMAT.format(r.status_code, err))
err = find_error(response)
if err:
if "no jobs were created" in build_errors_string(err):
err_message = err[0].get("error_msg") + ' \nThere is a possibility this file has been analyzed before. Please try using the command with the argument: reanalyze=true.'
err[0]['error_msg'] = err_message
return_error(ERROR_FORMAT.format(r.status_code, err))
return response
except ValueError:
# If no JSON is present, must be an error that can't be ignored
return_error(ERROR_FORMAT.format(r.status_code, r.text))
|
23,671 |
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""
Smooth out short-term model transience using the Prilliman model [1]_.
The Prilliman et al. model applies an exponential moving average to
the output of a steady-state cell temperature model to account for a
module's thermal inertia and smooth out the cell temperature's response
to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time. Data with irregular time steps should be resampled
prior to using this function.
Parameters
----------
temp_cell : pandas Series
Cell temperature modeled with steady-state assumptions [C]
wind_speed : pandas Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0–a_3 from [1]_
Returns
-------
temp_cell : pandas Series
Smoothed version of the input cell temperature [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
model for the steady-state input.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`
"""
# TODO: check inputs to ensure regular spacing?
time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds()
if time_step >= 1200:
# too coarsely sampled for smoothing to be relevant
return temp_cell
window = min(int(1200 / time_step), # time series > 20 minutes
len(temp_cell)) # time series < 20 minutes
# prefix with NaNs so that the rolling window is "full",
# even for the first actual value:
prefix = np.full(window, np.nan)
temp_cell_prefixed = np.append(prefix, temp_cell.values)
# get one row per 20-minute window
H = scipy.linalg.hankel(np.arange(window),
np.arange(window - 1, len(temp_cell_prefixed)))
subsets = temp_cell_prefixed[H].T
# calculate weights for the values in each window
if coefficients is not None:
a = coefficients
else:
# values from [1], Table II
a = [0.0046, 0.00046, -0.00023, -1.6e-5]
wind_speed = wind_speed.values
P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
timedeltas = np.arange(window, 0, -1) * time_step
weights = np.exp(-P[:, np.newaxis] * timedeltas)
# set weights corresponding to the prefix values to zero; otherwise the
# denominator of the weighted average below would be wrong
mask_idx = np.triu_indices(window)
np.fliplr(weights)[mask_idx] = 0
# change the first row of weights from zero to nan -- this is a
# trick to prevent div by zero warning when dividing by summed weights
weights[0, :] = np.nan
# finally, take the weighted average of each window
numerator = np.nansum(subsets[:-1] * weights, axis=1)
denominator = np.sum(weights, axis=1)
smoothed = numerator / denominator
smoothed[0] = temp_cell.values[0]
smoothed = pd.Series(smoothed, index=temp_cell.index)
return smoothed
|
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""
Smooth out short-term model transience using the Prilliman model [1]_.
The Prilliman et al. model applies an exponential moving average to
the output of a steady-state cell temperature model to account for a
module's thermal inertia and smooth out the cell temperature's response
to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time. Data with irregular time steps should be resampled
prior to using this function.
Parameters
----------
temp_cell : pandas Series
Cell temperature modeled with steady-state assumptions [C]
wind_speed : pandas Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0–a_3 from [1]_
Returns
-------
temp_cell : pandas Series
Smoothed version of the input cell temperature [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
cell temperature model for the steady-state input.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`
"""
# TODO: check inputs to ensure regular spacing?
time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds()
if time_step >= 1200:
# too coarsely sampled for smoothing to be relevant
return temp_cell
window = min(int(1200 / time_step), # time series > 20 minutes
len(temp_cell)) # time series < 20 minutes
# prefix with NaNs so that the rolling window is "full",
# even for the first actual value:
prefix = np.full(window, np.nan)
temp_cell_prefixed = np.append(prefix, temp_cell.values)
# get one row per 20-minute window
H = scipy.linalg.hankel(np.arange(window),
np.arange(window - 1, len(temp_cell_prefixed)))
subsets = temp_cell_prefixed[H].T
# calculate weights for the values in each window
if coefficients is not None:
a = coefficients
else:
# values from [1], Table II
a = [0.0046, 0.00046, -0.00023, -1.6e-5]
wind_speed = wind_speed.values
P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
timedeltas = np.arange(window, 0, -1) * time_step
weights = np.exp(-P[:, np.newaxis] * timedeltas)
# set weights corresponding to the prefix values to zero; otherwise the
# denominator of the weighted average below would be wrong
mask_idx = np.triu_indices(window)
np.fliplr(weights)[mask_idx] = 0
# change the first row of weights from zero to nan -- this is a
# trick to prevent div by zero warning when dividing by summed weights
weights[0, :] = np.nan
# finally, take the weighted average of each window
numerator = np.nansum(subsets[:-1] * weights, axis=1)
denominator = np.sum(weights, axis=1)
smoothed = numerator / denominator
smoothed[0] = temp_cell.values[0]
smoothed = pd.Series(smoothed, index=temp_cell.index)
return smoothed
|
17,412 |
def assert_identical(a, b):
if hasattr(a, "identical"):
msg = f"not identical:\n{a!r}\n{b!r}"
assert_identical(a, b), msg
else:
assert_array_equal(a, b)
|
def assert_identical(a, b):
if hasattr(a, "identical"):
msg = f"not identical:\n{a!r}\n{b!r}"
assert_identical(a, b)
else:
assert_array_equal(a, b)
|
59,717 |
def test_grdhisteq_outgrid(grid):
"""
Test the azimuth and direction parameters for grdhisteq with a set outgrid.
"""
with GMTTempFile(suffix=".nc") as tmpfile:
result = grdhisteq(grid=grid, outgrid=tmpfile.name)
assert result is None # return value is None
assert os.path.exists(path=tmpfile.name) # check that outgrid exists
result = (
grdinfo(grid=tmpfile.name, force_scan="a", per_column="n").strip().split()
)
|
def test_grdhisteq_outgrid(grid):
"""
Test the gaussian parameter of grdhisteq with a set outgrid.
"""
with GMTTempFile(suffix=".nc") as tmpfile:
result = grdhisteq(grid=grid, gaussian=True, outgrid=tmpfile.name)
assert result is None # return value is None
assert os.path.exists(path=tmpfile.name) # check that outgrid exists
result = (
grdinfo(grid=tmpfile.name, force_scan="a", per_column="n").strip().split()
)
|
27,803 |
def main():
init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("version", help="Release version")
parser.add_argument("template_name")
parser.add_argument("--skip-check-links", action="store_true", default=False)
options = parser.parse_args()
pre_release(
options.version,
options.template_name,
skip_check_links=options.skip_check_links,
)
|
def main():
init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("version", help="Release version")
parser.add_argument("template_name", help="Name of template file to use for release announcement")
parser.add_argument("--skip-check-links", action="store_true", default=False)
options = parser.parse_args()
pre_release(
options.version,
options.template_name,
skip_check_links=options.skip_check_links,
)
|
30,166 |
def valid_substitution(original, subs_key):
"""
Check if the substitution is valid.
E.g., if original is 24018,0,0,0,-9,24027,876,1341,1884,100,988.415,603,1260,0,462.550,0,477.262,0,100.0100.19.02
and subs_key 8,0
make sure that the comma does not get replaced by a dot at index 5.
"""
if not subs_key in original:
return False
starting_idx = original.index(subs_key)
if (starting_idx - 1) >= 0 and original[starting_idx - 1].isdigit():
return False
ending_idx = starting_idx + len(subs_key)
if (ending_idx + 1) < len(original) and original[ending_idx].isdigit():
return False
return True
|
def valid_substitution(original: str, subs_key: str):
"""
Check if the substitution is valid.
E.g., if original is 24018,0,0,0,-9,24027,876,1341,1884,100,988.415,603,1260,0,462.550,0,477.262,0,100.0100.19.02
and subs_key 8,0
make sure that the comma does not get replaced by a dot at index 5.
"""
if not subs_key in original:
return False
starting_idx = original.index(subs_key)
if (starting_idx - 1) >= 0 and original[starting_idx - 1].isdigit():
return False
ending_idx = starting_idx + len(subs_key)
if (ending_idx + 1) < len(original) and original[ending_idx].isdigit():
return False
return True
|
2,529 |
def test_check_dataframe_with_boolean():
"""Check that dataframe with boolean return a float array with dtype=None"""
pd = importorskip("pandas", minversion="1.0")
df = pd.DataFrame({"bool": pd.Series([True, False, True], dtype="boolean")})
array = check_array(df, dtype=None)
assert array.dtype == np.float64
assert_array_equal(array, [[True], [False], [True]])
|
def test_check_dataframe_with_only_boolean():
"""Check that dataframe with boolean return a float array with dtype=None"""
pd = importorskip("pandas", minversion="1.0")
df = pd.DataFrame({"bool": pd.Series([True, False, True], dtype="boolean")})
array = check_array(df, dtype=None)
assert array.dtype == np.float64
assert_array_equal(array, [[True], [False], [True]])
|
30,472 |
def fetch_indicators(client: Client, limit: int = -1) -> Tuple[List[Dict], List]:
"""Fetches indicators from the feed to the indicators tab.
Args:
client (Client): Client object configured according to instance arguments.
limit (int): Maximum number of indicators to return.
Returns:
Tuple of:
str. Information to be printed to war room.
Dict. Data to be entered to context.
Dict. The raw data of the indicators.
"""
iterator = client.build_iterator()
indicators = []
raw_response = []
if limit != -1:
iterator = iterator[:limit]
for indicator in iterator:
raw_data = {
'Value': indicator['value'],
'Type': indicator['type'],
'Azure_group_name': indicator['azure_name'],
'Azure_group_id': indicator['azure_id'],
'Azure_region': indicator['azure_region'],
'Azure_platform': indicator['azure_platform'],
'Azure_system_service': indicator['azure_system_service']
}
indicators.append({
'Value': indicator['value'],
'Type': indicator['type'],
'rawJSON': raw_data
})
raw_response.append(raw_data)
return indicators, raw_response
|
def fetch_indicators(client: Client, limit: int = -1) -> Tuple[List[Dict], List]:
"""Fetches indicators from the feed to the indicators tab.
Args:
client (Client): Client object configured according to instance arguments.
limit (int): Maximum number of indicators to return.
Returns:
Tuple of:
str. Information to be printed to war room.
Dict. Data to be entered to context.
Dict. The raw data of the indicators.
"""
iterator = client.build_iterator()
indicators = []
raw_response = []
if limit != -1:
iterator = iterator[:limit]
for indicator in iterator:
raw_data = {
'Value': indicator['value'],
'Type': indicator['type'],
'Azure_group_name': indicator['azure_name'],
'Azure_group_id': indicator['azure_id'],
'Azure_region': indicator['azure_region'],
'Azure_platform': indicator['azure_platform'],
'Azure_system_service': indicator['azure_system_service']
}
indicators.append({
'Value': indicator['value'],
'Type': indicator['type'],
'rawJSON': indicator
})
raw_response.append(raw_data)
return indicators, raw_response
|
43,383 |
def about():
"""
Prints the information for pennylane installation.
"""
print("PennyLane is a cross-platform Python library for")
print("quantum machine learning, automatic differentiation, and")
print("optimization of hybrid quantum-classical computations.")
print("")
print("Copyright 2018 Xanadu Quantum Technologies Inc.")
print("")
print("PennyLane Version: {}".format(pennylane.__version__))
print("Python Version: {0}.{1}.{2}".format(*sys.version_info[0:3]))
print("Platform Info: {}{}".format(platform.system(), platform.machine()))
pennylane_install_path = os.path.dirname(inspect.getsourcefile(pennylane))
print("Installation path: {}".format(pennylane_install_path))
print("Numpy Version: {}".format(numpy.__version__))
print("Scipy Version: {}".format(scipy.__version__))
|
def about():
"""
Prints the information for pennylane installation.
"""
print("PennyLane is a cross-platform Python library for")
print("quantum machine learning, automatic differentiation, and")
print("optimization of hybrid quantum-classical computations.")
print("")
print("Copyright 2019 Xanadu Quantum Technologies Inc.")
print("")
print("PennyLane Version: {}".format(pennylane.__version__))
print("Python Version: {0}.{1}.{2}".format(*sys.version_info[0:3]))
print("Platform Info: {}{}".format(platform.system(), platform.machine()))
pennylane_install_path = os.path.dirname(inspect.getsourcefile(pennylane))
print("Installation path: {}".format(pennylane_install_path))
print("Numpy Version: {}".format(numpy.__version__))
print("Scipy Version: {}".format(scipy.__version__))
|
6,956 |
def get_context(context):
redirect_to = frappe.local.request.args.get("redirect-to")
if frappe.session.user != "Guest":
if not redirect_to:
if frappe.session.data.user_type=="Website User":
redirect_to = get_home_page()
else:
redirect_to = "/app"
if redirect_to != 'login':
frappe.local.flags.redirect_location = redirect_to
raise frappe.Redirect
# get settings from site config
context.no_header = True
context.for_test = 'login.html'
context["title"] = "Login"
context["provider_logins"] = []
context["disable_signup"] = frappe.utils.cint(frappe.db.get_single_value("Website Settings", "disable_signup"))
context["logo"] = (frappe.db.get_single_value('Website Settings', 'app_logo') or
frappe.get_hooks("app_logo_url")[-1])
context["app_name"] = (frappe.db.get_single_value('Website Settings', 'app_name') or
frappe.get_system_settings("app_name") or _("Frappe"))
custom_signup = frappe.get_hooks("custom_signup_form")
if custom_signup and len(custom_signup) and custom_signup[0]:
context["custom_signup_form"] = frappe.get_template(custom_signup[0]).render()
providers = [i.name for i in frappe.get_all("Social Login Key", filters={"enable_social_login":1}, order_by="name")]
for provider in providers:
client_id, base_url = frappe.get_value("Social Login Key", provider, ["client_id", "base_url"])
client_secret = get_decrypted_password("Social Login Key", provider, "client_secret")
provider_name = frappe.get_value("Social Login Key", provider, "provider_name")
icon = None
icon_url = frappe.get_value("Social Login Key", provider, "icon")
if icon_url:
if provider_name != "Custom":
icon = "<img src='{0}' alt={1}>".format(icon_url, provider_name)
else:
icon = get_icon_html(icon_url, small=True)
if (get_oauth_keys(provider) and client_secret and client_id and base_url):
context.provider_logins.append({
"name": provider,
"provider_name": provider_name,
"auth_url": get_oauth2_authorize_url(provider, redirect_to),
"icon": icon
})
context["social_login"] = True
ldap_settings = LDAPSettings.get_ldap_client_settings()
context["ldap_settings"] = ldap_settings
login_label = [_("Email")]
if frappe.utils.cint(frappe.get_system_settings("allow_login_using_mobile_number")):
login_label.append(_("Mobile"))
if frappe.utils.cint(frappe.get_system_settings("allow_login_using_user_name")):
login_label.append(_("Username"))
context['login_label'] = ' {0} '.format(_('or')).join(login_label)
return context
|
def get_context(context):
redirect_to = frappe.local.request.args.get("redirect-to")
if frappe.session.user != "Guest":
if not redirect_to:
if frappe.session.data.user_type=="Website User":
redirect_to = get_home_page()
else:
redirect_to = "/app"
if redirect_to != 'login':
frappe.local.flags.redirect_location = redirect_to
raise frappe.Redirect
# get settings from site config
context.no_header = True
context.for_test = 'login.html'
context["title"] = "Login"
context["provider_logins"] = []
context["disable_signup"] = frappe.utils.cint(frappe.db.get_single_value("Website Settings", "disable_signup"))
context["logo"] = (frappe.db.get_single_value('Website Settings', 'app_logo') or
frappe.get_hooks("app_logo_url")[-1])
context["app_name"] = (frappe.db.get_single_value('Website Settings', 'app_name') or
frappe.get_system_settings("app_name") or _("Frappe"))
signup_form_template = frappe.get_hooks("signup_form_template")
if custom_signup and len(custom_signup) and custom_signup[0]:
context["custom_signup_form"] = frappe.get_template(custom_signup[0]).render()
providers = [i.name for i in frappe.get_all("Social Login Key", filters={"enable_social_login":1}, order_by="name")]
for provider in providers:
client_id, base_url = frappe.get_value("Social Login Key", provider, ["client_id", "base_url"])
client_secret = get_decrypted_password("Social Login Key", provider, "client_secret")
provider_name = frappe.get_value("Social Login Key", provider, "provider_name")
icon = None
icon_url = frappe.get_value("Social Login Key", provider, "icon")
if icon_url:
if provider_name != "Custom":
icon = "<img src='{0}' alt={1}>".format(icon_url, provider_name)
else:
icon = get_icon_html(icon_url, small=True)
if (get_oauth_keys(provider) and client_secret and client_id and base_url):
context.provider_logins.append({
"name": provider,
"provider_name": provider_name,
"auth_url": get_oauth2_authorize_url(provider, redirect_to),
"icon": icon
})
context["social_login"] = True
ldap_settings = LDAPSettings.get_ldap_client_settings()
context["ldap_settings"] = ldap_settings
login_label = [_("Email")]
if frappe.utils.cint(frappe.get_system_settings("allow_login_using_mobile_number")):
login_label.append(_("Mobile"))
if frappe.utils.cint(frappe.get_system_settings("allow_login_using_user_name")):
login_label.append(_("Username"))
context['login_label'] = ' {0} '.format(_('or')).join(login_label)
return context
|
31,363 |
def deescalation_reasons_command(client: Client) -> CommandResults:
"""Get deescalation reasons list from TOPdesk"""
deescalation_reasons = client.get_list("/incidents/deescalation-reasons")
return command_with_all_fields_readable_list(results=deescalation_reasons,
result_name='deescalation reasons',
output_prefix='deescalation_reason',
outputs_key_field='id')
|
def deescalation_reasons_command(client: Client) -> CommandResults:
"""Get deescalation reasons list from TOPdesk"""
deescalation_reasons = client.get_list("/incidents/deescalation-reasons")
return command_with_all_fields_readable_list(results=deescalation_reasons,
result_name='deescalation reasons',
output_prefix='DeescalationReason',
outputs_key_field='id')
|
57,736 |
def api_test(client: Client):
"""
Returning 'ok' indicates that the integration works like it is supposed to and the Connection to the service is successful.
:param client: Cymptom client
"""
try:
results = client.api_test()
# TODO: Validate api key
except DemistoException as e:
return_error(
f"There was an error in testing connection to URL: {client._base_url}, API Key: {client._headers['Authorization'].split()[-1]}. "
f"Please make sure that the API key is valid and has the right permissions, and that the URL is in the correct form.")
except Exception as e:
return_error(
f"There was an error in testing connection to URL: {client._base_url}, API Key: {client._headers['Authorization'].split()[-1]}. "
f"Please make sure that the API key is valid and has the right permissions, and that the URL is in the correct form.")
if results and results.get("status") == "ok":
return return_results('ok')
else:
return return_error("There was an error")
|
def api_test(client: Client):
"""
Returning 'ok' indicates that the integration works like it is supposed to and the Connection to the service is successful.
:param client: Cymptom client
"""
try:
results = client.api_test()
# TODO: Validate api key
except Exception as e:
return_error(
f"There was an error in testing connection to URL: {client._base_url}, API Key: {client._headers['Authorization'].split()[-1]}. "
f"Please make sure that the API key is valid and has the right permissions, and that the URL is in the correct form.")
if results and results.get("status") == "ok":
return return_results('ok')
else:
return return_error("There was an error")
|
1,408 |
def _validate_bad_defaults(obj):
if not hasattr(obj, "_bad_defaults"):
return
obj_values = {param: getattr(obj, param) for param in obj._bad_defaults}
bad_params = [param for param, value in obj_values.items()
if value == 'warn']
if bad_params:
msg = ("There is no good default value for the following "
"parameters in {}. Please consult the documentation "
"on how to set them for your data.\n\t".format(
obj.__class__.__name__))
msg += '\n\t'.join(["'{}' - using default value: {}".format(
param, obj._bad_defaults[param]) for param in bad_params])
warnings.warn(msg, UserWarning)
for param in bad_params:
setattr(obj, param, obj._bad_defaults[param])
|
def _validate_bad_defaults(obj):
if not hasattr(obj, "_bad_defaults"):
return
obj_values = {param: getattr(obj, param) for param in obj._bad_defaults}
bad_params = [param for param, value in obj_values.items()
if value == 'warn']
if bad_params:
msg = ("There is no good default value for the following "
"parameters in {}. Please consult the documentation "
"on how to set them for your data.\n\t".format(
obj.__class__.__name__))
msg += '\n\t'.join(["'{}' - using default value: {!r}".format(
param, obj._bad_defaults[param]) for param in bad_params])
warnings.warn(msg, UserWarning)
for param in bad_params:
setattr(obj, param, obj._bad_defaults[param])
|
59,293 |
def requires_limited_api(test):
try:
import _testcapi
except ImportError:
return unittest.skipIf(True, 'needs _testcapi module')(test)
return unittest.skipIf(
not _testcapi.LIMITED_API_AVAILABLE, 'needs Limited API support')(test)
|
def requires_limited_api(test):
try:
import _testcapi
except ImportError:
return unittest.skip('needs _testcapi module')(test)
return unittest.skipUnless(
_testcapi.LIMITED_API_AVAILABLE, 'needs Limited API support')(test)
|
11,919 |
def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None):
if xdisplay is None:
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp(".png")
os.close(fh)
args = ["screencapture"]
if bbox:
left, top, right, bottom = bbox
args += ["-R", f"{left},{top},{right-left},{bottom-top}"]
subprocess.call(args + ["-x", filepath])
im = Image.open(filepath)
im.load()
os.unlink(filepath)
if bbox:
im_resized = im.resize((right - left, bottom - top))
im.close()
return im_resized
return im
elif sys.platform == "win32":
offset, size, data = Image.core.grabscreen_win32(
include_layered_windows, all_screens
)
im = Image.frombytes(
"RGB",
size,
data,
# RGB, 32-bit line padding, origin lower left corner
"raw",
"BGR",
(size[0] * 3 + 3) & -4,
-1,
)
if bbox:
x0, y0 = offset
left, top, right, bottom = bbox
im = im.crop((left - x0, top - y0, right - x0, bottom - y0))
return im
elif not Image.core.HAVE_XCB and shutil.which("gnome-screenshot"):
fh, filepath = tempfile.mkstemp(".png")
os.close(fh)
subprocess.call(["gnome-screenshot", "-f", filepath])
im = Image.open(filepath)
im.load()
os.unlink(filepath)
if bbox:
im_cropped = im.crop(bbox)
im.close()
return im_cropped
return im
# use xdisplay=None for default display on non-win32/macOS systems
if not Image.core.HAVE_XCB:
raise OSError("Pillow was built without XCB support")
size, data = Image.core.grabscreen_x11(xdisplay)
im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1)
if bbox:
im = im.crop(bbox)
return im
|
def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None):
if xdisplay is None:
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp(".png")
os.close(fh)
args = ["screencapture"]
if bbox:
left, top, right, bottom = bbox
args += ["-R", f"{left},{top},{right-left},{bottom-top}"]
subprocess.call(args + ["-x", filepath])
im = Image.open(filepath)
im.load()
os.unlink(filepath)
if bbox:
im_resized = im.resize((right - left, bottom - top))
im.close()
return im_resized
return im
elif sys.platform == "win32":
offset, size, data = Image.core.grabscreen_win32(
include_layered_windows, all_screens
)
im = Image.frombytes(
"RGB",
size,
data,
# RGB, 32-bit line padding, origin lower left corner
"raw",
"BGR",
(size[0] * 3 + 3) & -4,
-1,
)
if bbox:
x0, y0 = offset
left, top, right, bottom = bbox
im = im.crop((left - x0, top - y0, right - x0, bottom - y0))
return im
elif shutil.which("gnome-screenshot"):
fh, filepath = tempfile.mkstemp(".png")
os.close(fh)
subprocess.call(["gnome-screenshot", "-f", filepath])
im = Image.open(filepath)
im.load()
os.unlink(filepath)
if bbox:
im_cropped = im.crop(bbox)
im.close()
return im_cropped
return im
# use xdisplay=None for default display on non-win32/macOS systems
if not Image.core.HAVE_XCB:
raise OSError("Pillow was built without XCB support")
size, data = Image.core.grabscreen_x11(xdisplay)
im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1)
if bbox:
im = im.crop(bbox)
return im
|
57,525 |
def get_current_account_id():
"""Returns account ID based on used AWS credentials."""
sts_client = boto3.client("sts")
caller_identity = sts_client.get_caller_identity()
if "Account" in caller_identity:
return caller_identity["Account"]
# Ideally sts_client itself will throw an exception if called with no credentials
return None
|
def get_current_account_id():
"""Returns account ID based on used AWS credentials."""
sts_client = boto3.client("sts")
caller_identity = sts_client.get_caller_identity()
# Ideally sts_client itself will throw an exception if called with no credentials
return caller_identity.get("Account")
|
43,350 |
def main():
"""Main function"""
if sys.version_info[:2] == (3, 4):
warnings.warn('Python 3.4 will reach the end of its life on March 18th, 2019. '
'cfn-lint will end support for python 3.4 on July 1st, 2020.', Warning, stacklevel=3)
elif sys.version_info[:2] == (2, 7):
warnings.warn('Python 2.7 will reach the end of its life on January 1st, 2020. '
'cfn-lint will end support for python 2.7 on December 31st, 2020.', Warning, stacklevel=3)
try:
(args, filenames, formatter) = cfnlint.core.get_args_filenames(sys.argv[1:])
matches = []
for filename in filenames:
LOGGER.debug('Begin linting of file: %s', str(filename))
(template, rules, template_matches) = cfnlint.core.get_template_rules(filename, args)
if not template_matches:
matches.extend(
cfnlint.core.run_cli(
filename, template, rules,
args.regions, args.override_spec, args.mandatory_checks))
else:
matches.extend(template_matches)
LOGGER.debug('Completed linting of file: %s', str(filename))
matches_output = formatter.print_matches(matches)
if matches_output:
print(matches_output)
return cfnlint.core.get_exit_code(matches)
except cfnlint.core.CfnLintExitException as e:
LOGGER.error(str(e))
return e.exit_code
|
def main():
"""Main function"""
if sys.version_info[:2] == (3, 4):
warnings.warn('Python 3.4 has reached end of life. '
'cfn-lint will end support for python 3.4 on July 1st, 2020.', Warning, stacklevel=3)
elif sys.version_info[:2] == (2, 7):
warnings.warn('Python 2.7 will reach the end of its life on January 1st, 2020. '
'cfn-lint will end support for python 2.7 on December 31st, 2020.', Warning, stacklevel=3)
try:
(args, filenames, formatter) = cfnlint.core.get_args_filenames(sys.argv[1:])
matches = []
for filename in filenames:
LOGGER.debug('Begin linting of file: %s', str(filename))
(template, rules, template_matches) = cfnlint.core.get_template_rules(filename, args)
if not template_matches:
matches.extend(
cfnlint.core.run_cli(
filename, template, rules,
args.regions, args.override_spec, args.mandatory_checks))
else:
matches.extend(template_matches)
LOGGER.debug('Completed linting of file: %s', str(filename))
matches_output = formatter.print_matches(matches)
if matches_output:
print(matches_output)
return cfnlint.core.get_exit_code(matches)
except cfnlint.core.CfnLintExitException as e:
LOGGER.error(str(e))
return e.exit_code
|
3,166 |
def to_datetime(
arg: DatetimeScalarOrArrayConvertible | DictConvertible,
errors: DateTimeErrorChoices = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
utc: bool | None = None,
format: str | None = None,
exact: bool = True,
unit: str | None = None,
infer_datetime_format: bool = False,
origin="unix",
cache: bool = True,
) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None:
"""
Convert argument to datetime.
This function converts a scalar, array-like, :class:`Series` or
:class:`DataFrame`/dict-like to a pandas datetime object.
Parameters
----------
arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like
The object to convert to a datetime. If a :class:`DataFrame` is provided, the
method expects minimally the following columns: :const:`"year"`,
:const:`"month"`, :const:`"day"`.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception.
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`.
- If :const:`'ignore'`, then invalid parsing will return the input.
dayfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"`
is parsed as :const:`2012-11-10`.
.. warning::
``dayfirst=True`` is not strict, but will prefer to parse
with day first. If a delimited date string cannot be parsed in
accordance with the given `dayfirst` option, e.g.
``to_datetime(['31-12-2021'])``, then a warning will be shown.
yearfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
- If :const:`True` parses dates with the year first, e.g.
:const:`"10/11/12"` is parsed as :const:`2010-11-12`.
- If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is
preceded (same as :mod:`dateutil`).
.. warning::
``yearfirst=True`` is not strict, but will prefer to parse
with year first.
utc : bool, default None
Control timezone-related parsing, localization and conversion.
- If :const:`True`, the function *always* returns a timezone-aware
UTC-localized :class:`Timestamp`, :class:`Series` or
:class:`DatetimeIndex`. To do this, timezone-naive inputs are
*localized* as UTC, while timezone-aware inputs are *converted* to UTC.
- If :const:`False` (default), inputs will not be coerced to UTC.
Timezone-naive inputs will remain naive, while timezone-aware ones
will keep their time offsets. Limitations exist for mixed
offsets (typically, daylight savings), see :ref:`Examples
<to_datetime_tz_examples>` section for details.
See also: pandas general documentation about `timezone conversion and
localization
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#time-zone-handling>`_.
format : str, default None
The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
`strftime documentation
<https://docs.python.org/3/library/datetime.html
#strftime-and-strptime-behavior>`_ for more information on choices.
- Note that :const:`"%f"` will parse all the way
up to nanoseconds.
- Note that :const:`"%S"` without :const:`"%f"` will capture all the way
up to nanoseconds if present as decimal places, but will also handle
the case where the number of seconds is an integer.
exact : bool, default True
Control how `format` is used:
- If :const:`True`, require an exact `format` match.
- If :const:`False`, allow the `format` to match anywhere in the target
string.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate
the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If :const:`True` and no `format` is given, attempt to infer the format
of the datetime strings based on the first non-NaN element,
and if it can be inferred, switch to a faster method of parsing them.
In some cases this can increase the parsing speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01.
- If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to
beginning of Julian Calendar. Julian day number :const:`0` is assigned
to the day starting at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
cache : bool, default True
If :const:`True`, use a cache of unique, converted dates to apply the
datetime conversion. May produce significant speed-up when parsing
duplicate date strings, especially ones with timezone offsets. The cache
is only used when there are at least 50 values. The presence of
out-of-bounds values will render the cache unusable and may slow down
parsing.
.. versionchanged:: 0.25.0
changed default value from :const:`False` to :const:`True`.
Returns
-------
datetime
If parsing succeeded.
Return type depends on input (types in parenthesis correspond to
fallback in case of unsuccessful timezone or out-of-range timestamp
parsing):
- scalar: :class:`Timestamp` (or :class:`datetime.datetime`)
- array-like: :class:`DatetimeIndex` (or :class:`Series` with
:class:`object` dtype containing :class:`datetime.datetime`)
- Series: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
- DataFrame: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
Raises
------
ParserError
When parsing a date from string fails.
ValueError
When another datetime conversion error happens. For example when one
of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or
when a Timezone-aware :class:`datetime.datetime` is found in an array-like
of mixed time offsets, and ``utc=False``.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
convert_dtypes : Convert dtypes.
Notes
-----
Many input types are supported, and lead to different output types:
- **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime`
module or :mod:`numpy`). They are converted to :class:`Timestamp` when
possible, otherwise they are converted to :class:`datetime.datetime`.
None/NaN/null scalars are converted to :const:`NaT`.
- **array-like** can contain int, float, str, datetime objects. They are
converted to :class:`DatetimeIndex` when possible, otherwise they are
converted to :class:`Index` with :class:`object` dtype, containing
:class:`datetime.datetime`. None/NaN/null entries are converted to
:const:`NaT` in both cases.
- **Series** are converted to :class:`Series` with :class:`datetime64`
dtype when possible, otherwise they are converted to :class:`Series` with
:class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null
entries are converted to :const:`NaT` in both cases.
- **DataFrame/dict-like** are converted to :class:`Series` with
:class:`datetime64` dtype. For each row a datetime is created from assembling
the various dataframe columns. Column keys can be common abbreviations
like [‘year’, ‘month’, ‘day’, ‘minute’, ‘second’, ‘ms’, ‘us’, ‘ns’]) or
plurals of the same.
The following causes are responsible for :class:`datetime.datetime` objects
being returned (possibly inside an :class:`Index` or a :class:`Series` with
:class:`object` dtype) instead of a proper pandas designated type
(:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series`
with :class:`datetime64` dtype):
- when any input element is before :const:`Timestamp.min` or after
:const:`Timestamp.max`, see `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_.
- when ``utc=False`` (default) and the input is an array-like or
:class:`Series` containing mixed naive/aware datetime, or aware with mixed
time offsets. Note that this happens in the (quite frequent) situation when
the timezone has a daylight savings policy. In that case you may wish to
use ``utc=True``.
Examples
--------
**Handling various input formats**
Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys
can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
Passing ``infer_datetime_format=True`` can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s, infer_datetime_format=True) # doctest: +SKIP
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s, infer_datetime_format=False) # doctest: +SKIP
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],
dtype='datetime64[ns]', freq=None)
**Exceptions to strptime behavior**
:const:`"%f"` will parse all the way up to nanoseconds.
>>> pd.to_datetime('2018-10-26 12:00:00.0000000011',
... format='%Y-%m-%d %H:%M:%S.%f')
Timestamp('2018-10-26 12:00:00.000000001')
:const:`"%S"` without :const:`"%f"` will capture all the way
up to nanoseconds if present as decimal places.
>>> pd.to_datetime('2017-03-22 15:16:45.433502912',
... format='%Y-%m-%d %H:%M:%S')
Timestamp('2017-03-22 15:16:45.433502912')
**Non-convertible date/times**
If a date does not meet the `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_, passing ``errors='ignore'``
will return the original input instead of raising any exception.
Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,
in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
.. _to_datetime_tz_examples:
**Timezones and time offsets**
The default behaviour (``utc=False``) is as follows:
- Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00:15'])
DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],
dtype='datetime64[ns]', freq=None)
- Timezone-aware inputs *with constant time offset* are converted to
timezone-aware :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])
DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],
dtype='datetime64[ns, pytz.FixedOffset(-300)]', freq=None)
- However, timezone-aware inputs *with mixed time offsets* (for example
issued from a timezone with daylight savings, such as Europe/Paris)
are **not successfully converted** to a :class:`DatetimeIndex`. Instead a
simple :class:`Index` containing :class:`datetime.datetime` objects is
returned:
>>> pd.to_datetime(['2020-10-25 02:00 +0200', '2020-10-25 04:00 +0100'])
Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00],
dtype='object')
- A mix of timezone-aware and timezone-naive inputs is converted to
a timezone-aware :class:`DatetimeIndex` if the offsets of the timezone-aware
are constant:
>>> from datetime import datetime
>>> pd.to_datetime(["2020-01-01 01:00 -01:00", datetime(2020, 1, 1, 3, 0)])
DatetimeIndex(['2020-01-01 01:00:00-01:00', '2020-01-01 02:00:00-01:00'],
dtype='datetime64[ns, pytz.FixedOffset(-60)]', freq=None)
|
Setting ``utc=True`` solves most of the above issues:
- Timezone-naive inputs are *localized* as UTC
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Timezone-aware inputs are *converted* to UTC (the output represents the
exact same datetime, but viewed from the UTC time offset `+00:00`).
>>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],
... utc=True)
DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Inputs can contain both naive and aware, string or datetime, the above
rules still apply
>>> from datetime import timezone, timedelta
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 12:00 -0530',
... datetime(2020, 1, 1, 18),
... datetime(2020, 1, 1, 18,
... tzinfo=timezone(-timedelta(hours=1)))],
... utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 17:30:00+00:00',
'2020-01-01 18:00:00+00:00', '2020-01-01 19:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
"""
if arg is None:
return None
if origin != "unix":
arg = _adjust_to_origin(arg, origin, unit)
tz = "utc" if utc else None
convert_listlike = partial(
_convert_listlike_datetimes,
tz=tz,
unit=unit,
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
infer_datetime_format=infer_datetime_format,
)
result: Timestamp | NaTType | Series | Index
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = arg.tz_convert(tz)
else:
result = arg.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, tz)
elif isinstance(arg, Index):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, name=arg.name)
else:
result = convert_listlike(arg, format, name=arg.name)
elif is_list_like(arg):
try:
# error: Argument 1 to "_maybe_cache" has incompatible type
# "Union[float, str, datetime, List[Any], Tuple[Any, ...], ExtensionArray,
# ndarray[Any, Any], Series]"; expected "Union[List[Any], Tuple[Any, ...],
# Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series], Series]"
argc = cast(
Union[list, tuple, ExtensionArray, np.ndarray, "Series", Index], arg
)
cache_array = _maybe_cache(argc, format, cache, convert_listlike)
except OutOfBoundsDatetime:
# caching attempts to create a DatetimeIndex, which may raise
# an OOB. If that's the desired behavior, then just reraise...
if errors == "raise":
raise
# ... otherwise, continue without the cache.
from pandas import Series
cache_array = Series([], dtype=object) # just an empty array
if not cache_array.empty:
result = _convert_and_box_cache(argc, cache_array)
else:
result = convert_listlike(argc, format)
else:
result = convert_listlike(np.array([arg]), format)[0]
if isinstance(arg, bool) and isinstance(result, np.bool_):
result = bool(result) # TODO: avoid this kludge.
# error: Incompatible return value type (got "Union[Timestamp, NaTType,
# Series, Index]", expected "Union[DatetimeIndex, Series, float, str,
# NaTType, None]")
return result # type: ignore[return-value]
|
def to_datetime(
arg: DatetimeScalarOrArrayConvertible | DictConvertible,
errors: DateTimeErrorChoices = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
utc: bool | None = None,
format: str | None = None,
exact: bool = True,
unit: str | None = None,
infer_datetime_format: bool = False,
origin="unix",
cache: bool = True,
) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None:
"""
Convert argument to datetime.
This function converts a scalar, array-like, :class:`Series` or
:class:`DataFrame`/dict-like to a pandas datetime object.
Parameters
----------
arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like
The object to convert to a datetime. If a :class:`DataFrame` is provided, the
method expects minimally the following columns: :const:`"year"`,
:const:`"month"`, :const:`"day"`.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception.
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`.
- If :const:`'ignore'`, then invalid parsing will return the input.
dayfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"`
is parsed as :const:`2012-11-10`.
.. warning::
``dayfirst=True`` is not strict, but will prefer to parse
with day first. If a delimited date string cannot be parsed in
accordance with the given `dayfirst` option, e.g.
``to_datetime(['31-12-2021'])``, then a warning will be shown.
yearfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
- If :const:`True` parses dates with the year first, e.g.
:const:`"10/11/12"` is parsed as :const:`2010-11-12`.
- If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is
preceded (same as :mod:`dateutil`).
.. warning::
``yearfirst=True`` is not strict, but will prefer to parse
with year first.
utc : bool, default None
Control timezone-related parsing, localization and conversion.
- If :const:`True`, the function *always* returns a timezone-aware
UTC-localized :class:`Timestamp`, :class:`Series` or
:class:`DatetimeIndex`. To do this, timezone-naive inputs are
*localized* as UTC, while timezone-aware inputs are *converted* to UTC.
- If :const:`False` (default), inputs will not be coerced to UTC.
Timezone-naive inputs will remain naive, while timezone-aware ones
will keep their time offsets. Limitations exist for mixed
offsets (typically, daylight savings), see :ref:`Examples
<to_datetime_tz_examples>` section for details.
See also: pandas general documentation about `timezone conversion and
localization
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#time-zone-handling>`_.
format : str, default None
The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
`strftime documentation
<https://docs.python.org/3/library/datetime.html
#strftime-and-strptime-behavior>`_ for more information on choices.
- Note that :const:`"%f"` will parse all the way
up to nanoseconds.
- Note that :const:`"%S"` without :const:`"%f"` will capture all the way
up to nanoseconds if present as decimal places, but will also handle
the case where the number of seconds is an integer.
exact : bool, default True
Control how `format` is used:
- If :const:`True`, require an exact `format` match.
- If :const:`False`, allow the `format` to match anywhere in the target
string.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate
the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If :const:`True` and no `format` is given, attempt to infer the format
of the datetime strings based on the first non-NaN element,
and if it can be inferred, switch to a faster method of parsing them.
In some cases this can increase the parsing speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01.
- If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to
beginning of Julian Calendar. Julian day number :const:`0` is assigned
to the day starting at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
cache : bool, default True
If :const:`True`, use a cache of unique, converted dates to apply the
datetime conversion. May produce significant speed-up when parsing
duplicate date strings, especially ones with timezone offsets. The cache
is only used when there are at least 50 values. The presence of
out-of-bounds values will render the cache unusable and may slow down
parsing.
.. versionchanged:: 0.25.0
changed default value from :const:`False` to :const:`True`.
Returns
-------
datetime
If parsing succeeded.
Return type depends on input (types in parenthesis correspond to
fallback in case of unsuccessful timezone or out-of-range timestamp
parsing):
- scalar: :class:`Timestamp` (or :class:`datetime.datetime`)
- array-like: :class:`DatetimeIndex` (or :class:`Series` with
:class:`object` dtype containing :class:`datetime.datetime`)
- Series: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
- DataFrame: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
Raises
------
ParserError
When parsing a date from string fails.
ValueError
When another datetime conversion error happens. For example when one
of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or
when a Timezone-aware :class:`datetime.datetime` is found in an array-like
of mixed time offsets, and ``utc=False``.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
convert_dtypes : Convert dtypes.
Notes
-----
Many input types are supported, and lead to different output types:
- **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime`
module or :mod:`numpy`). They are converted to :class:`Timestamp` when
possible, otherwise they are converted to :class:`datetime.datetime`.
None/NaN/null scalars are converted to :const:`NaT`.
- **array-like** can contain int, float, str, datetime objects. They are
converted to :class:`DatetimeIndex` when possible, otherwise they are
converted to :class:`Index` with :class:`object` dtype, containing
:class:`datetime.datetime`. None/NaN/null entries are converted to
:const:`NaT` in both cases.
- **Series** are converted to :class:`Series` with :class:`datetime64`
dtype when possible, otherwise they are converted to :class:`Series` with
:class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null
entries are converted to :const:`NaT` in both cases.
- **DataFrame/dict-like** are converted to :class:`Series` with
:class:`datetime64` dtype. For each row a datetime is created from assembling
the various dataframe columns. Column keys can be common abbreviations
like [‘year’, ‘month’, ‘day’, ‘minute’, ‘second’, ‘ms’, ‘us’, ‘ns’]) or
plurals of the same.
The following causes are responsible for :class:`datetime.datetime` objects
being returned (possibly inside an :class:`Index` or a :class:`Series` with
:class:`object` dtype) instead of a proper pandas designated type
(:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series`
with :class:`datetime64` dtype):
- when any input element is before :const:`Timestamp.min` or after
:const:`Timestamp.max`, see `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_.
- when ``utc=False`` (default) and the input is an array-like or
:class:`Series` containing mixed naive/aware datetime, or aware with mixed
time offsets. Note that this happens in the (quite frequent) situation when
the timezone has a daylight savings policy. In that case you may wish to
use ``utc=True``.
Examples
--------
**Handling various input formats**
Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys
can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
Passing ``infer_datetime_format=True`` can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s, infer_datetime_format=True) # doctest: +SKIP
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s, infer_datetime_format=False) # doctest: +SKIP
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],
dtype='datetime64[ns]', freq=None)
**Differences with strptime behavior**
:const:`"%f"` will parse all the way up to nanoseconds.
>>> pd.to_datetime('2018-10-26 12:00:00.0000000011',
... format='%Y-%m-%d %H:%M:%S.%f')
Timestamp('2018-10-26 12:00:00.000000001')
:const:`"%S"` without :const:`"%f"` will capture all the way
up to nanoseconds if present as decimal places.
>>> pd.to_datetime('2017-03-22 15:16:45.433502912',
... format='%Y-%m-%d %H:%M:%S')
Timestamp('2017-03-22 15:16:45.433502912')
**Non-convertible date/times**
If a date does not meet the `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_, passing ``errors='ignore'``
will return the original input instead of raising any exception.
Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,
in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
.. _to_datetime_tz_examples:
**Timezones and time offsets**
The default behaviour (``utc=False``) is as follows:
- Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00:15'])
DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],
dtype='datetime64[ns]', freq=None)
- Timezone-aware inputs *with constant time offset* are converted to
timezone-aware :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])
DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],
dtype='datetime64[ns, pytz.FixedOffset(-300)]', freq=None)
- However, timezone-aware inputs *with mixed time offsets* (for example
issued from a timezone with daylight savings, such as Europe/Paris)
are **not successfully converted** to a :class:`DatetimeIndex`. Instead a
simple :class:`Index` containing :class:`datetime.datetime` objects is
returned:
>>> pd.to_datetime(['2020-10-25 02:00 +0200', '2020-10-25 04:00 +0100'])
Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00],
dtype='object')
- A mix of timezone-aware and timezone-naive inputs is converted to
a timezone-aware :class:`DatetimeIndex` if the offsets of the timezone-aware
are constant:
>>> from datetime import datetime
>>> pd.to_datetime(["2020-01-01 01:00 -01:00", datetime(2020, 1, 1, 3, 0)])
DatetimeIndex(['2020-01-01 01:00:00-01:00', '2020-01-01 02:00:00-01:00'],
dtype='datetime64[ns, pytz.FixedOffset(-60)]', freq=None)
|
Setting ``utc=True`` solves most of the above issues:
- Timezone-naive inputs are *localized* as UTC
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Timezone-aware inputs are *converted* to UTC (the output represents the
exact same datetime, but viewed from the UTC time offset `+00:00`).
>>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],
... utc=True)
DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Inputs can contain both naive and aware, string or datetime, the above
rules still apply
>>> from datetime import timezone, timedelta
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 12:00 -0530',
... datetime(2020, 1, 1, 18),
... datetime(2020, 1, 1, 18,
... tzinfo=timezone(-timedelta(hours=1)))],
... utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 17:30:00+00:00',
'2020-01-01 18:00:00+00:00', '2020-01-01 19:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
"""
if arg is None:
return None
if origin != "unix":
arg = _adjust_to_origin(arg, origin, unit)
tz = "utc" if utc else None
convert_listlike = partial(
_convert_listlike_datetimes,
tz=tz,
unit=unit,
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
infer_datetime_format=infer_datetime_format,
)
result: Timestamp | NaTType | Series | Index
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = arg.tz_convert(tz)
else:
result = arg.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, tz)
elif isinstance(arg, Index):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, name=arg.name)
else:
result = convert_listlike(arg, format, name=arg.name)
elif is_list_like(arg):
try:
# error: Argument 1 to "_maybe_cache" has incompatible type
# "Union[float, str, datetime, List[Any], Tuple[Any, ...], ExtensionArray,
# ndarray[Any, Any], Series]"; expected "Union[List[Any], Tuple[Any, ...],
# Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series], Series]"
argc = cast(
Union[list, tuple, ExtensionArray, np.ndarray, "Series", Index], arg
)
cache_array = _maybe_cache(argc, format, cache, convert_listlike)
except OutOfBoundsDatetime:
# caching attempts to create a DatetimeIndex, which may raise
# an OOB. If that's the desired behavior, then just reraise...
if errors == "raise":
raise
# ... otherwise, continue without the cache.
from pandas import Series
cache_array = Series([], dtype=object) # just an empty array
if not cache_array.empty:
result = _convert_and_box_cache(argc, cache_array)
else:
result = convert_listlike(argc, format)
else:
result = convert_listlike(np.array([arg]), format)[0]
if isinstance(arg, bool) and isinstance(result, np.bool_):
result = bool(result) # TODO: avoid this kludge.
# error: Incompatible return value type (got "Union[Timestamp, NaTType,
# Series, Index]", expected "Union[DatetimeIndex, Series, float, str,
# NaTType, None]")
return result # type: ignore[return-value]
|
40,435 |
def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]:
r"""Splits the :obj:`edge_index` according to a :obj:`batch` vector.
Args:
edge_index (Tensor): The edge_index tensor.
batch (LongTensor): The batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. Must be ordered, consecutive, and
starts with 0.
:rtype: :class:`List[Tensor]`
"""
boundary = torch.cumsum(degree(batch), dim=0)
inc = torch.cat([boundary.new_tensor([0]), boundary[:-1]], dim=0)
edge_assignments = torch.bucketize(edge_index, boundary, right=True)
out = [(edge_index[edge_assignments == batch_idx].view(2, -1) -
inc[batch_idx]).to(torch.int64)
for batch_idx in range(batch.max().item() + 1)]
return out
|
def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]:
r"""Splits the :obj:`edge_index` according to a :obj:`batch` vector.
Args:
edge_index (Tensor): The edge_index tensor.
batch (LongTensor): The batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. Must be ordered.
starts with 0.
:rtype: :class:`List[Tensor]`
"""
boundary = torch.cumsum(degree(batch), dim=0)
inc = torch.cat([boundary.new_tensor([0]), boundary[:-1]], dim=0)
edge_assignments = torch.bucketize(edge_index, boundary, right=True)
out = [(edge_index[edge_assignments == batch_idx].view(2, -1) -
inc[batch_idx]).to(torch.int64)
for batch_idx in range(batch.max().item() + 1)]
return out
|
12,031 |
def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a combination of data from the provided 'region_cubes'.
The result metadata, including name and units, are also replaced by those
of the 'region_cubes' (which must all be the same).
Args:
* full_mesh_cube
Describes the full mesh and mesh-location to which the region data
refers, and acts as a template for the result.
Must have a :class:`~iris.experimental.ugrid.mesh.Mesh`.
* region_cubes
Contain data on a subset of the 'full_mesh_cube' mesh locations.
The region cubes do not need to have a mesh. There must be at least
1 of them, to determine the result phenomenon.
Their shapes and dimension-coords must all match those of
'full_mesh_cube', except in the mesh dimension, which can have
different sizes between the regions, and from the 'full_mesh_cube'.
The mesh dimension of each region cube must have a 1-D coord named by
'index_coord_name'. Although these region index coords can vary in
length, they must all have matching metadata (names, units and
attributes), and must also match the coord of that name in the
'full_mesh_cube', if there is one.
The ".points" values of the region index coords specify, for each
datapoint, its location in the original mesh -- i.e. they are indices
into the relevant mesh-location dimension.
* index_coord_name
Coord name of the index coords in each region cubes, containing the
mesh location indices.
Result:
* result_cube
An unstructured region_cube identical to 'full_mesh_cube', and with the
same mesh and location, but with its data and ".metadata" replaced by
that from the 'region_cubes'.
Where regions overlap, the result data comes from the last-listed of the
original region cubes which contain that location.
Where no region contains a datapoint, it will be masked in the result.
HINT: alternatively, values covered by no region can be taken from the
original 'full_mesh_cube' data, if 'full_mesh_cube' is *also* passed
as the first of the 'region_cubes'.
"""
if not region_cubes:
raise ValueError("'region_cubes' must be non-empty.")
mesh_dim = full_mesh_cube.mesh_dim()
if mesh_dim is None:
raise ValueError("'full_mesh_cube' has no \".mesh\".")
# Check the basic required properties of the input.
mesh_dim_coords = full_mesh_cube.coords(
dim_coords=True, dimensions=(mesh_dim,)
)
if not mesh_dim_coords:
err = (
"'full_mesh_cube' has no dim-coord on the mesh dimension, "
f"(dimension {mesh_dim})."
)
raise ValueError(err)
#
# Perform consistency checks on all the region-cubes.
#
def metadata_no_varname(cube_or_coord):
# Get a metadata object but omit any var_name.
metadata = cube_or_coord.metadata
fields = metadata._asdict()
fields["var_name"] = None
result = metadata.__class__(**fields)
return result
n_regions = len(region_cubes)
n_dims = full_mesh_cube.ndim
regioncube_metadata = None
indexcoord_metadata = None
for i_region, region_cube in enumerate(region_cubes):
reg_cube_str = (
f'Region cube #{i_region}/{n_regions}, "{region_cube.name()}"'
)
reg_ndims = region_cube.ndim
# Check dimensionality.
if reg_ndims != n_dims:
err = (
f"{reg_cube_str} has {reg_ndims} dimensions, but "
f"'full_mesh_cube' has {n_dims}."
)
raise ValueError(err)
# Get region_cube metadata, which will apply to the result..
region_cube_metadata = metadata_no_varname(region_cube)
if regioncube_metadata is None:
# Store the first region-cube metadata as a reference
regioncube_metadata = region_cube_metadata
elif region_cube_metadata != regioncube_metadata:
# Check subsequent region-cubes metadata against the first.
err = (
f"{reg_cube_str} has metadata {region_cube_metadata}, "
"which does not match that of the first region region_cube, "
f'"{region_cubes[0].name()}", '
f"which is {regioncube_metadata}."
)
raise ValueError(err)
# For each dim, check that coords match other regions, and full-cube.
for i_dim in range(full_mesh_cube.ndim):
if i_dim == mesh_dim:
# mesh dim : look for index coords (by name).
fulldim = full_mesh_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
regdim = region_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
else:
# non-mesh dims : look for dim-coords (only)
fulldim = full_mesh_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
regdim = region_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
if fulldim:
(fulldim,) = fulldim
full_dimname = fulldim.name()
fulldim_metadata = metadata_no_varname(fulldim)
if regdim:
(regdim,) = regdim
reg_dimname = regdim.name()
regdim_metadata = metadata_no_varname(regdim)
err = None
# N.B. checks for mesh- and non-mesh-dims are different.
if i_dim != mesh_dim:
# i_dim == mesh_dim : checks for non-mesh dims.
if fulldim and not regdim:
err = (
f"{reg_cube_str} has no dim-coord for dimension "
"{i_dim}, to match the 'full_mesh_cube' dimension "
f'"{full_dimname}".'
)
elif regdim and not fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, but 'full_mesh_cube' has none."
)
elif regdim != fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, which does not match that "
f"of 'full_mesh_cube', \"{full_dimname}\"."
)
else:
# i_dim == mesh_dim : different rules for this one
if not regdim:
# Must have an index coord on the mesh dimension
err = (
f'{reg_cube_str} has no "{index_coord_name}" coord on '
f"the mesh dimension (dimension {mesh_dim})."
)
elif fulldim and regdim_metadata != fulldim_metadata:
# May *not* have full-cube index, but if so it must match
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
"match that on 'full_mesh_cube' : "
f"{regdim_metadata} != {fulldim_metadata}."
)
# At this point, we know we *have* an index coord, and it does not
# conflict with the one on 'full_mesh_cube' (if any).
# Now check for matches between the region cubes.
if indexcoord_metadata is None:
# Store first occurrence (from first region-cube)
indexcoord_metadata = regdim_metadata
elif regdim_metadata != indexcoord_metadata:
# Compare subsequent occurences (from other region-cubes)
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
f"match that of the first region-cube : "
f"{regdim_metadata} != {indexcoord_metadata}."
)
if err:
raise ValueError(err)
# Use the mesh_dim to transpose inputs + outputs, if required, as it is
# simpler for all the array operations to always have the mesh dim *last*.
if mesh_dim == full_mesh_cube.ndim - 1:
# Mesh dim is already the last one : no tranposes required
untranspose_dims = None
else:
dim_range = np.arange(full_mesh_cube.ndim, dtype=int)
# Transpose all inputs to mesh-last order.
tranpose_dims = [i_dim for i_dim in dim_range if i_dim != mesh_dim] + [
mesh_dim
] # chop out mesh_dim + put it at the end.
def transposed_copy(cube, dim_order):
cube = cube.copy()
cube.transpose()
return cube
full_mesh_cube = transposed_copy(full_mesh_cube, tranpose_dims)
region_cubes = [
transposed_copy(region_cube, tranpose_dims)
for region_cube in region_cubes
]
# Also prepare for transforming the output back to the original order.
untranspose_dims = dim_range.copy()
# Neat trick to produce the reverse operation.
untranspose_dims[tranpose_dims] = dim_range
#
# Here's the core operation..
#
def fill_region(target, regiondata, regioninds):
if not target.flags.writeable:
# The initial input can be a section of a da.zeros(), which has no
# real array "behind" it. This means that real arrays created in
# memory are only chunk-sized, but it also means that 'target' may
# not be writeable. So take a copy to fix that, where needed.
target = target.copy()
# N.B. Indices are basically 1D, but may have leading *1 dims for
# alignment, to satisfy da.map_blocks
assert all(size == 1 for size in regioninds.shape[:-1])
inds = regioninds.flatten()
# Assign blocks with indexing on the last dim only.
target[..., inds] = regiondata
return target
# Create an initially 'empty' (all-masked) dask array matching the input.
# N.B. this does not use the full_mesh_cube.lazy_data() array, but only its
# shape and dtype, since the data itself is not used in the calculation.
# N.B. chunking matches the input cube, allowing performance control.
input_data = full_mesh_cube.lazy_data()
result_array = da.ma.masked_array(
da.zeros(
input_data.shape,
dtype=input_data.dtype,
chunks=input_data.chunksize,
),
True,
)
# Wrap this repeatedly with a lazy operation to assign each region.
# It is done this way because we couldn't get map_blocks to correctly wrap
# a function which does all regions in a single operation.
# TODO: replace with a single-stage solution: Probably better, if possible.
# Notes on resultant calculation properties:
# 1. map_blocks is chunk-mapped, so it is parallelisable and space-saving
# 2. However, fetching less than a whole chunk is not efficient
for region_cube in region_cubes:
# Lazy data array from the region cube
datarr = region_cube.lazy_data()
# Lazy indices from the mesh-dim coord.
mesh_dimcoord = region_cube.coord(
name_or_coord=index_coord_name, dimensions=region_cube.ndim - 1
)
indarr = mesh_dimcoord.lazy_points()
# Extend indarr dimensions to align it with the 'target' array dims.
assert indarr.ndim == 1
shape = (1,) * (region_cube.ndim - 1) + indarr.shape
indarr = indarr.reshape(shape)
# Apply the operation to paste from one region into the target.
# N.B. replacing 'result_array' each time around the loop.
result_array = da.map_blocks(
fill_region,
result_array,
datarr,
indarr,
dtype=result_array.dtype,
meta=np.ndarray,
)
# Construct the result cube.
result_cube = full_mesh_cube.copy()
result_cube.data = result_array
# Copy names, units + attributes from region data (N.B. but not var_name)
result_cube.metadata = regioncube_metadata
if untranspose_dims:
# Re-order dims as in the original input.
result_cube.transpose(untranspose_dims)
return result_cube
|
def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a combination of data from the provided 'region_cubes'.
The result metadata, including name and units, are also replaced by those
of the 'region_cubes' (which must all be the same).
Args:
* full_mesh_cube
Describes the full mesh and mesh-location to which the region data
refers, and acts as a template for the result.
Must have a :class:`~iris.experimental.ugrid.mesh.Mesh`.
* region_cubes
Contain data on a subset of the 'full_mesh_cube' mesh locations.
The region cubes do not need to have a mesh. There must be at least
1 of them, to determine the result phenomenon.
Their shapes and dimension-coords must all match those of
'full_mesh_cube', except in the mesh dimension, which can have
different sizes between the regions, and from the 'full_mesh_cube'.
The mesh dimension of each region cube must have a 1-D coord named by
'index_coord_name'. Although these region index coords can vary in
length, they must all have matching metadata (names, units and
attributes), and must also match the coord of that name in the
'full_mesh_cube', if there is one.
The ".points" values of the region index coords specify, for each
datapoint, its location in the original mesh -- i.e. they are indices
into the relevant mesh-location dimension.
* index_coord_name
Coord name of the index coords in each region cubes, containing the
mesh location indices.
Result:
* result_cube
An unstructured region_cube identical to 'full_mesh_cube', and with the
same mesh and location, but with its data and ".metadata" replaced by
that from the 'region_cubes'.
Where regions overlap, the result data comes from the last-listed of the
original region cubes which contain that location.
Where no region contains a datapoint, it will be masked in the result.
HINT: alternatively, values covered by no region can be taken from the
original 'full_mesh_cube' data, if 'full_mesh_cube' is *also* passed
as the first of the 'region_cubes'.
"""
if not region_cubes:
raise ValueError("'region_cubes' must be non-empty.")
mesh_dim = full_mesh_cube.mesh_dim()
if mesh_dim is None:
raise ValueError("'full_mesh_cube' has no \".mesh\".")
# Check the basic required properties of the input.
mesh_dim_coords = full_mesh_cube.coords(
dim_coords=True, dimensions=(mesh_dim,)
)
if not mesh_dim_coords:
err = (
"'full_mesh_cube' has no dim-coord on the mesh dimension, "
f"(dimension {mesh_dim})."
)
raise ValueError(err)
#
# Perform consistency checks on all the region-cubes.
#
def metadata_no_varname(cube_or_coord):
# Get a metadata object but omit any var_name.
metadata = cube_or_coord.metadata
fields = metadata._asdict()
fields["var_name"] = None
result = metadata.__class__(**fields)
return result
n_regions = len(region_cubes)
n_dims = full_mesh_cube.ndim
regioncube_metadata = None
indexcoord_metadata = None
for i_region, region_cube in enumerate(region_cubes):
reg_cube_str = (
f'Region cube #{i_region}/{n_regions}, "{region_cube.name()}"'
)
reg_ndims = region_cube.ndim
# Check dimensionality.
if reg_ndims != n_dims:
err = (
f"{reg_cube_str} has {reg_ndims} dimensions, but "
f"'full_mesh_cube' has {n_dims}."
)
raise ValueError(err)
# Get region_cube metadata, which will apply to the result.
region_cube_metadata = metadata_no_varname(region_cube)
if regioncube_metadata is None:
# Store the first region-cube metadata as a reference
regioncube_metadata = region_cube_metadata
elif region_cube_metadata != regioncube_metadata:
# Check subsequent region-cubes metadata against the first.
err = (
f"{reg_cube_str} has metadata {region_cube_metadata}, "
"which does not match that of the first region region_cube, "
f'"{region_cubes[0].name()}", '
f"which is {regioncube_metadata}."
)
raise ValueError(err)
# For each dim, check that coords match other regions, and full-cube.
for i_dim in range(full_mesh_cube.ndim):
if i_dim == mesh_dim:
# mesh dim : look for index coords (by name).
fulldim = full_mesh_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
regdim = region_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
else:
# non-mesh dims : look for dim-coords (only)
fulldim = full_mesh_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
regdim = region_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
if fulldim:
(fulldim,) = fulldim
full_dimname = fulldim.name()
fulldim_metadata = metadata_no_varname(fulldim)
if regdim:
(regdim,) = regdim
reg_dimname = regdim.name()
regdim_metadata = metadata_no_varname(regdim)
err = None
# N.B. checks for mesh- and non-mesh-dims are different.
if i_dim != mesh_dim:
# i_dim == mesh_dim : checks for non-mesh dims.
if fulldim and not regdim:
err = (
f"{reg_cube_str} has no dim-coord for dimension "
"{i_dim}, to match the 'full_mesh_cube' dimension "
f'"{full_dimname}".'
)
elif regdim and not fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, but 'full_mesh_cube' has none."
)
elif regdim != fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, which does not match that "
f"of 'full_mesh_cube', \"{full_dimname}\"."
)
else:
# i_dim == mesh_dim : different rules for this one
if not regdim:
# Must have an index coord on the mesh dimension
err = (
f'{reg_cube_str} has no "{index_coord_name}" coord on '
f"the mesh dimension (dimension {mesh_dim})."
)
elif fulldim and regdim_metadata != fulldim_metadata:
# May *not* have full-cube index, but if so it must match
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
"match that on 'full_mesh_cube' : "
f"{regdim_metadata} != {fulldim_metadata}."
)
# At this point, we know we *have* an index coord, and it does not
# conflict with the one on 'full_mesh_cube' (if any).
# Now check for matches between the region cubes.
if indexcoord_metadata is None:
# Store first occurrence (from first region-cube)
indexcoord_metadata = regdim_metadata
elif regdim_metadata != indexcoord_metadata:
# Compare subsequent occurences (from other region-cubes)
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
f"match that of the first region-cube : "
f"{regdim_metadata} != {indexcoord_metadata}."
)
if err:
raise ValueError(err)
# Use the mesh_dim to transpose inputs + outputs, if required, as it is
# simpler for all the array operations to always have the mesh dim *last*.
if mesh_dim == full_mesh_cube.ndim - 1:
# Mesh dim is already the last one : no tranposes required
untranspose_dims = None
else:
dim_range = np.arange(full_mesh_cube.ndim, dtype=int)
# Transpose all inputs to mesh-last order.
tranpose_dims = [i_dim for i_dim in dim_range if i_dim != mesh_dim] + [
mesh_dim
] # chop out mesh_dim + put it at the end.
def transposed_copy(cube, dim_order):
cube = cube.copy()
cube.transpose()
return cube
full_mesh_cube = transposed_copy(full_mesh_cube, tranpose_dims)
region_cubes = [
transposed_copy(region_cube, tranpose_dims)
for region_cube in region_cubes
]
# Also prepare for transforming the output back to the original order.
untranspose_dims = dim_range.copy()
# Neat trick to produce the reverse operation.
untranspose_dims[tranpose_dims] = dim_range
#
# Here's the core operation..
#
def fill_region(target, regiondata, regioninds):
if not target.flags.writeable:
# The initial input can be a section of a da.zeros(), which has no
# real array "behind" it. This means that real arrays created in
# memory are only chunk-sized, but it also means that 'target' may
# not be writeable. So take a copy to fix that, where needed.
target = target.copy()
# N.B. Indices are basically 1D, but may have leading *1 dims for
# alignment, to satisfy da.map_blocks
assert all(size == 1 for size in regioninds.shape[:-1])
inds = regioninds.flatten()
# Assign blocks with indexing on the last dim only.
target[..., inds] = regiondata
return target
# Create an initially 'empty' (all-masked) dask array matching the input.
# N.B. this does not use the full_mesh_cube.lazy_data() array, but only its
# shape and dtype, since the data itself is not used in the calculation.
# N.B. chunking matches the input cube, allowing performance control.
input_data = full_mesh_cube.lazy_data()
result_array = da.ma.masked_array(
da.zeros(
input_data.shape,
dtype=input_data.dtype,
chunks=input_data.chunksize,
),
True,
)
# Wrap this repeatedly with a lazy operation to assign each region.
# It is done this way because we couldn't get map_blocks to correctly wrap
# a function which does all regions in a single operation.
# TODO: replace with a single-stage solution: Probably better, if possible.
# Notes on resultant calculation properties:
# 1. map_blocks is chunk-mapped, so it is parallelisable and space-saving
# 2. However, fetching less than a whole chunk is not efficient
for region_cube in region_cubes:
# Lazy data array from the region cube
datarr = region_cube.lazy_data()
# Lazy indices from the mesh-dim coord.
mesh_dimcoord = region_cube.coord(
name_or_coord=index_coord_name, dimensions=region_cube.ndim - 1
)
indarr = mesh_dimcoord.lazy_points()
# Extend indarr dimensions to align it with the 'target' array dims.
assert indarr.ndim == 1
shape = (1,) * (region_cube.ndim - 1) + indarr.shape
indarr = indarr.reshape(shape)
# Apply the operation to paste from one region into the target.
# N.B. replacing 'result_array' each time around the loop.
result_array = da.map_blocks(
fill_region,
result_array,
datarr,
indarr,
dtype=result_array.dtype,
meta=np.ndarray,
)
# Construct the result cube.
result_cube = full_mesh_cube.copy()
result_cube.data = result_array
# Copy names, units + attributes from region data (N.B. but not var_name)
result_cube.metadata = regioncube_metadata
if untranspose_dims:
# Re-order dims as in the original input.
result_cube.transpose(untranspose_dims)
return result_cube
|
8,314 |
def has_packages_for_distro(sha1, os_type, os_version, flavor,
package_versions=None):
"""
Checks to see if gitbuilder has packages for the given sha1, os_type and
flavor.
See above for package_versions description.
:param sha1: The sha1 hash of the ceph version.
:param os_type: The distro we want to get packages for, given
the ceph sha1. Ex. 'ubuntu', 'rhel', etc.
:param flavor: The distro flavor
:param package_versions: Use this optionally to use cached results of
previous calls to gitbuilder.
:returns: True, if packages are found. False otherwise.
"""
os_type = str(os_type)
if package_versions is None:
package_versions = get_package_versions(
sha1, os_type, os_version, flavor)
flavors = package_versions.get(sha1, dict()).get(
os_type, dict()).get(
os_version, dict())
# we want to return a boolean here, not the actual package versions
return bool(flavors.get(flavor, None))
|
def has_packages_for_distro(sha1, os_type, os_version, flavor,
package_versions=None):
"""
Checks to see if gitbuilder has packages for the given sha1, os_type and
flavor.
See above for package_versions description.
:param sha1: The sha1 hash of the ceph version.
:param os_type: The distro we want to get packages for, given
the ceph sha1. Ex. 'ubuntu', 'rhel', etc.
:param flavor: The ceph packages shaman flavor
:param package_versions: Use this optionally to use cached results of
previous calls to gitbuilder.
:returns: True, if packages are found. False otherwise.
"""
os_type = str(os_type)
if package_versions is None:
package_versions = get_package_versions(
sha1, os_type, os_version, flavor)
flavors = package_versions.get(sha1, dict()).get(
os_type, dict()).get(
os_version, dict())
# we want to return a boolean here, not the actual package versions
return bool(flavors.get(flavor, None))
|
5,631 |
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba', for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1', fs=fs)
|
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1', fs=fs)
|
14,451 |
def expand(sconf, cwd=None, parent=None):
"""Return config with shorthand and inline properties expanded.
This is necessary to keep the code in the :class:`WorkspaceBuilder` clean
and also allow for neat, short-hand configurations.
As a simple example, internally, tmuxp expects that config options
like ``shell_command`` are a list (array)::
'shell_command': ['htop']
tmuxp configs allow for it to be simply a string::
'shell_command': 'htop'
Kaptan will load JSON/YAML files into python dicts for you.
Parameters
----------
sconf : dict
the configuration for the session
cwd : str
directory to expand relative paths against. should be the dir of the
config directory.
parent : str
(used on recursive entries) start_directory of parent window or session
object.
Returns
-------
dict
"""
# Note: cli.py will expand configs relative to project's config directory
# for the first cwd argument.
if not cwd:
cwd = os.getcwd()
if 'session_name' in sconf:
sconf['session_name'] = expandshell(sconf['session_name'])
if 'window_name' in sconf:
if not (sconf['window_name'] is None):
sconf['window_name'] = expandshell(sconf['window_name'])
else:
sconf['shell_command'] = 'tmux rename-session \'\''
if 'environment' in sconf:
for key in sconf['environment']:
val = sconf['environment'][key]
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['environment'][key] = val
if 'global_options' in sconf:
for key in sconf['global_options']:
val = sconf['global_options'][key]
if isinstance(val, string_types):
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['global_options'][key] = val
if 'options' in sconf:
for key in sconf['options']:
val = sconf['options'][key]
if isinstance(val, string_types):
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['options'][key] = val
# Any config section, session, window, pane that can contain the
# 'shell_command' value
if 'start_directory' in sconf:
sconf['start_directory'] = expandshell(sconf['start_directory'])
start_path = sconf['start_directory']
if any(start_path.startswith(a) for a in ['.', './']):
# if window has a session, or pane has a window with a
# start_directory of . or ./, make sure the start_directory can be
# relative to the parent.
#
# This is for the case where you may be loading a config from
# outside your shell current directory.
if parent:
cwd = parent['start_directory']
start_path = os.path.normpath(os.path.join(cwd, start_path))
sconf['start_directory'] = start_path
if 'before_script' in sconf:
sconf['before_script'] = expandshell(sconf['before_script'])
if any(sconf['before_script'].startswith(a) for a in ['.', './']):
sconf['before_script'] = os.path.normpath(
os.path.join(cwd, sconf['before_script'])
)
if 'shell_command' in sconf and isinstance(sconf['shell_command'], string_types):
sconf['shell_command'] = [sconf['shell_command']]
if 'shell_command_before' in sconf and isinstance(
sconf['shell_command_before'], string_types
):
sconf['shell_command_before'] = [sconf['shell_command_before']]
if 'shell_command_before' in sconf and isinstance(
sconf['shell_command_before'], list
):
sconf['shell_command_before'] = [
expandshell(scmd) for scmd in sconf['shell_command_before']
]
# recurse into window and pane config items
if 'windows' in sconf:
sconf['windows'] = [expand(window, parent=sconf) for window in sconf['windows']]
elif 'panes' in sconf:
for pconf in sconf['panes']:
p_index = sconf['panes'].index(pconf)
p = copy.deepcopy(pconf)
pconf = sconf['panes'][p_index] = {}
if isinstance(p, string_types):
p = {'shell_command': [p]}
elif not p:
p = {'shell_command': []}
assert isinstance(p, dict)
if 'shell_command' in p:
cmd = p['shell_command']
if isinstance(p['shell_command'], string_types):
cmd = [cmd]
if not cmd or any(a == cmd for a in [None, 'blank', 'pane']):
cmd = []
if isinstance(cmd, list) and len(cmd) == int(1):
if any(a in cmd for a in [None, 'blank', 'pane']):
cmd = []
p['shell_command'] = cmd
else:
p['shell_command'] = []
pconf.update(p)
sconf['panes'] = [expand(pane, parent=sconf) for pane in sconf['panes']]
return sconf
|
def expand(sconf, cwd=None, parent=None):
"""Return config with shorthand and inline properties expanded.
This is necessary to keep the code in the :class:`WorkspaceBuilder` clean
and also allow for neat, short-hand configurations.
As a simple example, internally, tmuxp expects that config options
like ``shell_command`` are a list (array)::
'shell_command': ['htop']
tmuxp configs allow for it to be simply a string::
'shell_command': 'htop'
Kaptan will load JSON/YAML files into python dicts for you.
Parameters
----------
sconf : dict
the configuration for the session
cwd : str
directory to expand relative paths against. should be the dir of the
config directory.
parent : str
(used on recursive entries) start_directory of parent window or session
object.
Returns
-------
dict
"""
# Note: cli.py will expand configs relative to project's config directory
# for the first cwd argument.
if not cwd:
cwd = os.getcwd()
if 'session_name' in sconf:
sconf['session_name'] = expandshell(sconf['session_name'])
if 'window_name' in sconf:
if sconf['window_name'] is not None:
sconf['window_name'] = expandshell(sconf['window_name'])
else:
sconf['shell_command'] = 'tmux rename-session \'\''
if 'environment' in sconf:
for key in sconf['environment']:
val = sconf['environment'][key]
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['environment'][key] = val
if 'global_options' in sconf:
for key in sconf['global_options']:
val = sconf['global_options'][key]
if isinstance(val, string_types):
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['global_options'][key] = val
if 'options' in sconf:
for key in sconf['options']:
val = sconf['options'][key]
if isinstance(val, string_types):
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['options'][key] = val
# Any config section, session, window, pane that can contain the
# 'shell_command' value
if 'start_directory' in sconf:
sconf['start_directory'] = expandshell(sconf['start_directory'])
start_path = sconf['start_directory']
if any(start_path.startswith(a) for a in ['.', './']):
# if window has a session, or pane has a window with a
# start_directory of . or ./, make sure the start_directory can be
# relative to the parent.
#
# This is for the case where you may be loading a config from
# outside your shell current directory.
if parent:
cwd = parent['start_directory']
start_path = os.path.normpath(os.path.join(cwd, start_path))
sconf['start_directory'] = start_path
if 'before_script' in sconf:
sconf['before_script'] = expandshell(sconf['before_script'])
if any(sconf['before_script'].startswith(a) for a in ['.', './']):
sconf['before_script'] = os.path.normpath(
os.path.join(cwd, sconf['before_script'])
)
if 'shell_command' in sconf and isinstance(sconf['shell_command'], string_types):
sconf['shell_command'] = [sconf['shell_command']]
if 'shell_command_before' in sconf and isinstance(
sconf['shell_command_before'], string_types
):
sconf['shell_command_before'] = [sconf['shell_command_before']]
if 'shell_command_before' in sconf and isinstance(
sconf['shell_command_before'], list
):
sconf['shell_command_before'] = [
expandshell(scmd) for scmd in sconf['shell_command_before']
]
# recurse into window and pane config items
if 'windows' in sconf:
sconf['windows'] = [expand(window, parent=sconf) for window in sconf['windows']]
elif 'panes' in sconf:
for pconf in sconf['panes']:
p_index = sconf['panes'].index(pconf)
p = copy.deepcopy(pconf)
pconf = sconf['panes'][p_index] = {}
if isinstance(p, string_types):
p = {'shell_command': [p]}
elif not p:
p = {'shell_command': []}
assert isinstance(p, dict)
if 'shell_command' in p:
cmd = p['shell_command']
if isinstance(p['shell_command'], string_types):
cmd = [cmd]
if not cmd or any(a == cmd for a in [None, 'blank', 'pane']):
cmd = []
if isinstance(cmd, list) and len(cmd) == int(1):
if any(a in cmd for a in [None, 'blank', 'pane']):
cmd = []
p['shell_command'] = cmd
else:
p['shell_command'] = []
pconf.update(p)
sconf['panes'] = [expand(pane, parent=sconf) for pane in sconf['panes']]
return sconf
|
47,194 |
def get_modified_python_files():
"""
Return a list of python files that have been modified between the current head and the master branch.
"""
repo = Repo(".")
print(f"Master is at {repo.refs.master.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.master, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
print("\n### DIFF ###\n")
code_diff = []
for commit in branching_commits:
for diff_obj in commit.diff(repo.head.commit):
# We always add new python files
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
# We check that deleted python files won't break correspondping tests.
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type == "M" and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications are in code and not docstrings.
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
|
def get_modified_python_files():
"""
Return a list of python files that have been modified between the current head and the master branch.
"""
repo = Repo(".")
print(f"Master is at {repo.refs.master.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.master, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
print("\n### DIFF ###\n")
code_diff = []
for commit in branching_commits:
for diff_obj in commit.diff(repo.head.commit):
# We always add new python files
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
# We check that deleted python files won't break correspondping tests.
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications are in code and not docstrings.
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
|
42,543 |
def count_unary_operator(
node: ast.AST,
operator: Type[ast.unaryop],
amount: int = 0,
) -> int:
"""Returns amount of unary operators matching input."""
parent = (node)
if parent is None or not isinstance(parent, ast.UnaryOp):
return amount
if isinstance(parent.op, operator):
return count_unary_operator(parent, operator, amount + 1)
return count_unary_operator(parent, operator, amount)
|
def count_unary_operator(
node: ast.AST,
operator: Type[ast.unaryop],
amount: int = 0,
) -> int:
"""Returns amount of unary operators matching input."""
parent = get_parent(node)
if parent is None or not isinstance(parent, ast.UnaryOp):
return amount
if isinstance(parent.op, operator):
return count_unary_operator(parent, operator, amount + 1)
return count_unary_operator(parent, operator, amount)
|
20,049 |
def _read_features(features_file: str) -> Dict[int, OpenMLDataFeature]:
features_pickle_file = features_file + ".pkl"
try:
with open(features_pickle_file, "rb") as fh_binary:
features = pickle.load(fh_binary)
except: # noqa E722
with io.open(features_file, encoding="utf8") as fh:
features_xml_string = fh.read()
xml_dict = xmltodict.parse(
features_xml_string, force_list=("oml:feature", "oml:nominal_value")
)
features_xml = xml_dict["oml:data_features"]
features = {}
for idx, xmlfeature in enumerate(features_xml["oml:feature"]):
nr_missing = xmlfeature.get("oml:number_of_missing_values", 0)
feature = OpenMLDataFeature(
int(xmlfeature["oml:index"]),
xmlfeature["oml:name"],
xmlfeature["oml:data_type"],
xmlfeature.get("oml:nominal_value"),
int(nr_missing),
)
if idx != feature.index:
raise ValueError("Data features not provided " "in right order")
features[feature.index] = feature
with open(features_pickle_file, "wb") as fh_binary:
pickle.dump(features, fh_binary)
return features
|
def _read_features(features_file: str) -> Dict[int, OpenMLDataFeature]:
features_pickle_file = features_file + ".pkl"
try:
with open(features_pickle_file, "rb") as fh_binary:
features = pickle.load(fh_binary)
except: # noqa E722
with io.open(features_file, encoding="utf8") as fh:
features_xml_string = fh.read()
xml_dict = xmltodict.parse(
features_xml_string, force_list=("oml:feature", "oml:nominal_value")
)
features_xml = xml_dict["oml:data_features"]
features = {}
for idx, xmlfeature in enumerate(features_xml["oml:feature"]):
nr_missing = xmlfeature.get("oml:number_of_missing_values", 0)
feature = OpenMLDataFeature(
int(xmlfeature["oml:index"]),
xmlfeature["oml:name"],
xmlfeature["oml:data_type"],
xmlfeature.get("oml:nominal_value"),
int(nr_missing),
)
if idx != feature.index:
raise ValueError("Data features not provided in right order")
features[feature.index] = feature
with open(features_pickle_file, "wb") as fh_binary:
pickle.dump(features, fh_binary)
return features
|
12,440 |
def analyze_member_var_access(name: str,
itype: Instance,
info: TypeInfo,
mx: MemberContext) -> Type:
"""Analyse attribute access that does not target a method.
This is logically part of analyze_member_access and the arguments are similar.
original_type is the type of E in the expression E.var
"""
# It was not a method. Try looking up a variable.
v = lookup_member_var_or_accessor(info, name, mx.is_lvalue)
vv = v
if isinstance(vv, Decorator):
# The associated Var node of a decorator contains the type.
v = vv.var
if isinstance(vv, TypeInfo):
# If the associated variable is a TypeInfo synthesize a Var node for
# the purposes of type checking. This enables us to type check things
# like accessing class attributes on an inner class.
v = Var(name, type=type_object_type(vv, mx.builtin_type))
v.info = info
if isinstance(vv, TypeAlias) and isinstance(vv.target, Instance):
# Similar to the above TypeInfo case, we allow using
# qualified type aliases in runtime context if it refers to an
# instance type. For example:
# class C:
# A = List[int]
# x = C.A() <- this is OK
typ = instance_alias_type(vv, mx.builtin_type)
v = Var(name, type=typ)
v.info = info
if isinstance(v, Var):
implicit = info[name].implicit
# An assignment to final attribute is always an error,
# independently of types.
if mx.is_lvalue and not mx.chk.get_final_context():
check_final_member(name, info, mx.msg, mx.context)
return analyze_var(name, v, itype, info, mx, implicit=implicit)
elif isinstance(v, FuncDef):
assert False, "Did not expect a function"
elif not v and name not in ['__getattr__', '__setattr__', '__getattribute__']:
if not mx.is_lvalue:
for method_name in ('__getattribute__', '__getattr__'):
method = info.get_method(method_name)
# __getattribute__ is defined on builtins.object and returns Any, so without
# the guard this search will always find object.__getattribute__ and conclude
# that the attribute exists
if method and method.info.fullname() != 'builtins.object':
function = function_type(method, mx.builtin_type('builtins.function'))
bound_method = bind_self(function, mx.original_type)
typ = map_instance_to_supertype(itype, method.info)
getattr_type = expand_type_by_instance(bound_method, typ)
if isinstance(getattr_type, CallableType):
result = getattr_type.ret_type
# call the get attribute hook before returning
fullname = '{}.{}'.format(info.fullname(), name)
hook = mx.chk.plugin.get_attribute_hook(fullname)
if hook:
result = hook(AttributeContext(mx.original_type, result,
mx.context, mx.chk))
return result
else:
setattr_meth = info.get_method('__setattr__')
if setattr_meth and setattr_meth.info.fullname() != 'builtins.object':
setattr_func = function_type(setattr_meth, mx.builtin_type('builtins.function'))
bound_type = bind_self(setattr_func, mx.original_type)
typ = map_instance_to_supertype(itype, setattr_meth.info)
setattr_type = expand_type_by_instance(bound_type, typ)
if isinstance(setattr_type, CallableType) and len(setattr_type.arg_types) > 0:
return setattr_type.arg_types[-1]
if itype.type.fallback_to_any:
return AnyType(TypeOfAny.special_form)
# Could not find the member.
if mx.is_super:
mx.msg.undefined_in_superclass(name, mx.context)
return AnyType(TypeOfAny.from_error)
else:
if mx.chk and mx.chk.should_suppress_optional_error([itype]):
return AnyType(TypeOfAny.from_error)
return mx.msg.has_no_attr(mx.original_type, itype, name, mx.context)
|
def analyze_member_var_access(name: str,
itype: Instance,
info: TypeInfo,
mx: MemberContext) -> Type:
"""Analyse attribute access that does not target a method.
This is logically part of analyze_member_access and the arguments are similar.
original_type is the type of E in the expression E.var
"""
# It was not a method. Try looking up a variable.
v = lookup_member_var_or_accessor(info, name, mx.is_lvalue)
vv = v
if isinstance(vv, Decorator):
# The associated Var node of a decorator contains the type.
v = vv.var
if isinstance(vv, TypeInfo):
# If the associated variable is a TypeInfo synthesize a Var node for
# the purposes of type checking. This enables us to type check things
# like accessing class attributes on an inner class.
v = Var(name, type=type_object_type(vv, mx.builtin_type))
v.info = info
if isinstance(vv, TypeAlias) and isinstance(vv.target, Instance):
# Similar to the above TypeInfo case, we allow using
# qualified type aliases in runtime context if it refers to an
# instance type. For example:
# class C:
# A = List[int]
# x = C.A() <- this is OK
typ = instance_alias_type(vv, mx.builtin_type)
v = Var(name, type=typ)
v.info = info
if isinstance(v, Var):
implicit = info[name].implicit
# An assignment to final attribute is always an error,
# independently of types.
if mx.is_lvalue and not mx.chk.get_final_context():
check_final_member(name, info, mx.msg, mx.context)
return analyze_var(name, v, itype, info, mx, implicit=implicit)
elif isinstance(v, FuncDef):
assert False, "Did not expect a function"
elif not v and name not in ['__getattr__', '__setattr__', '__getattribute__']:
if not mx.is_lvalue:
for method_name in ('__getattribute__', '__getattr__'):
method = info.get_method(method_name)
# __getattribute__ is defined on builtins.object and returns Any, so without
# the guard this search will always find object.__getattribute__ and conclude
# that the attribute exists
if method and method.info.fullname() != 'builtins.object':
function = function_type(method, mx.builtin_type('builtins.function'))
bound_method = bind_self(function, mx.original_type)
typ = map_instance_to_supertype(itype, method.info)
getattr_type = expand_type_by_instance(bound_method, typ)
if isinstance(getattr_type, CallableType):
result = getattr_type.ret_type
# Call the attribute hook before returning.
fullname = '{}.{}'.format(info.fullname(), name)
hook = mx.chk.plugin.get_attribute_hook(fullname)
if hook:
result = hook(AttributeContext(mx.original_type, result,
mx.context, mx.chk))
return result
else:
setattr_meth = info.get_method('__setattr__')
if setattr_meth and setattr_meth.info.fullname() != 'builtins.object':
setattr_func = function_type(setattr_meth, mx.builtin_type('builtins.function'))
bound_type = bind_self(setattr_func, mx.original_type)
typ = map_instance_to_supertype(itype, setattr_meth.info)
setattr_type = expand_type_by_instance(bound_type, typ)
if isinstance(setattr_type, CallableType) and len(setattr_type.arg_types) > 0:
return setattr_type.arg_types[-1]
if itype.type.fallback_to_any:
return AnyType(TypeOfAny.special_form)
# Could not find the member.
if mx.is_super:
mx.msg.undefined_in_superclass(name, mx.context)
return AnyType(TypeOfAny.from_error)
else:
if mx.chk and mx.chk.should_suppress_optional_error([itype]):
return AnyType(TypeOfAny.from_error)
return mx.msg.has_no_attr(mx.original_type, itype, name, mx.context)
|
28,327 |
def create_run(
conn: ConnectionPlus,
exp_id: int,
name: str,
guid: str,
parameters: Optional[Sequence[ParamSpec]] = None,
values: Optional[Sequence[Any]] = None,
metadata: Optional[Mapping[str, Any]] = None,
captured_run_id: Optional[int] = None,
captured_counter: Optional[int] = None,
parent_dataset_links: str = "[]",
create_run_table: bool = True,
snapshot_raw: Optional[str] = None,
description: Optional[RunDescriber] = None,
) -> Tuple[int, int, Optional[str]]:
"""Create a single run for the experiment.
This will register the run in the runs table, the counter in the
experiments table and optionally create a new table with the formatted name.
Note that it is an error to supply both Parameters and RunDescriber
Args:
- conn: the connection to the sqlite database
- exp_id: the experiment id we want to create the run into
- name: a friendly name for this run
- guid: the guid adhering to our internal guid format
- parameters: optional list of parameters this run has. This is not recommeded
please use description instead.
- values: optional list of values for the parameters
- metadata: optional metadata dictionary
- captured_run_id: The run_id this data was originally captured with.
Should only be supplied when inserting an already completed run
from another database into this database. Otherwise leave as None.
- captured_counter: The counter this data was originally captured with.
Should only be supplied when inserting an already completed run
from another database into this database. Otherwise leave as None.
- create_run_table: Should we create a table to insert the run into.
- snapshot_raw: Raw string of the snapshot to add to the run.
- description: An optional RunDescriber
Returns:
- run_counter: the id of the newly created run (not unique)
- run_id: the row id of the newly created run
- formatted_name: the name of the newly created table
"""
formatted_name: Optional[str]
if parameters is not None and description is not None:
raise RuntimeError(
"Passing both parameters and description to create_run is" "not supported."
)
if parameters is not None:
warnings.warn(
"passing parameters to create_run is deprecated and will "
"be removed in the future"
)
description = RunDescriber(old_to_new(v0.InterDependencies(*parameters)))
elif description is None:
description = RunDescriber(InterDependencies_())
with atomic(conn):
run_counter, formatted_name, run_id = _insert_run(
conn=conn,
exp_id=exp_id,
name=name,
guid=guid,
captured_run_id=captured_run_id,
captured_counter=captured_counter,
parent_dataset_links=parent_dataset_links,
description=description,
)
if metadata:
add_data_to_dynamic_columns(conn, run_id, metadata)
if snapshot_raw:
add_data_to_dynamic_columns(conn, run_id, {"snapshot": snapshot_raw})
_update_experiment_run_counter(conn, exp_id, run_counter)
if create_run_table:
_create_run_table(
conn, formatted_name, description.interdeps.paramspecs, values
)
else:
formatted_name = None
return run_counter, run_id, formatted_name
|
def create_run(
conn: ConnectionPlus,
exp_id: int,
name: str,
guid: str,
parameters: Optional[Sequence[ParamSpec]] = None,
values: Optional[Sequence[Any]] = None,
metadata: Optional[Mapping[str, Any]] = None,
captured_run_id: Optional[int] = None,
captured_counter: Optional[int] = None,
parent_dataset_links: str = "[]",
create_run_table: bool = True,
snapshot_raw: Optional[str] = None,
description: Optional[RunDescriber] = None,
) -> Tuple[int, int, Optional[str]]:
"""Create a single run for the experiment.
This will register the run in the runs table, the counter in the
experiments table and optionally create a new table with the formatted name.
Note that it is an error to supply both Parameters and RunDescriber
Args:
- conn: the connection to the sqlite database
- exp_id: the experiment id we want to create the run into
- name: a friendly name for this run
- guid: the guid adhering to our internal guid format
- parameters: optional list of parameters this run has. This is not recommeded
please use description instead.
- values: optional list of values for the parameters
- metadata: optional metadata dictionary
- captured_run_id: The run_id this data was originally captured with.
Should only be supplied when inserting an already completed run
from another database into this database. Otherwise leave as None.
- captured_counter: The counter this data was originally captured with.
Should only be supplied when inserting an already completed run
from another database into this database. Otherwise leave as None.
- create_run_table: Should we create a table to insert the run into.
- snapshot_raw: Raw string of the snapshot to add to the run.
- description: An optional RunDescriber
Returns:
- run_counter: the id of the newly created run (not unique)
- run_id: the row id of the newly created run
- formatted_name: the name of the newly created table
"""
formatted_name: Optional[str]
if parameters is not None and description is not None:
raise RuntimeError(
"Passing both parameters and description to create_run is not supported."
)
if parameters is not None:
warnings.warn(
"passing parameters to create_run is deprecated and will "
"be removed in the future"
)
description = RunDescriber(old_to_new(v0.InterDependencies(*parameters)))
elif description is None:
description = RunDescriber(InterDependencies_())
with atomic(conn):
run_counter, formatted_name, run_id = _insert_run(
conn=conn,
exp_id=exp_id,
name=name,
guid=guid,
captured_run_id=captured_run_id,
captured_counter=captured_counter,
parent_dataset_links=parent_dataset_links,
description=description,
)
if metadata:
add_data_to_dynamic_columns(conn, run_id, metadata)
if snapshot_raw:
add_data_to_dynamic_columns(conn, run_id, {"snapshot": snapshot_raw})
_update_experiment_run_counter(conn, exp_id, run_counter)
if create_run_table:
_create_run_table(
conn, formatted_name, description.interdeps.paramspecs, values
)
else:
formatted_name = None
return run_counter, run_id, formatted_name
|
43,600 |
def decompose_hamiltonian(H):
"""Decomposes a hamiltonian into tensor product of pauli matrices
Args:
H (matrix): dimensions 2**n
Yields:
list: coefficients for every tensor product of pauli matrix combinations
list: tensor product of pauli matrix combinations
"""
N = int(np.log2(len(H)))
if len(H) - 2 ** N != 0:
raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1")
#
paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ]
obs = []
coeffs = []
#
for term in itertools.product(paulis, repeat=N):
matrices = [i._matrix() for i in term]
coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / (2 ** N)
#
if not np.allclose(coeff, 0):
coeffs.append(coeff)
#
if not all(t is qml.Identity for t in term):
obs.append(
functools.reduce(
operator.matmul, [t(i) for i, t in enumerate(term) if t is not qml.Identity]
)
)
else:
obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)]))
# obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)]))
#
return coeffs, obs
|
def decompose_hamiltonian(H):
"""Decomposes a Hermitian matrix into a linear combination of Pauli operators.
Args:
H (matrix): dimensions 2**n
Yields:
list: coefficients for every tensor product of pauli matrix combinations
list: tensor product of pauli matrix combinations
"""
N = int(np.log2(len(H)))
if len(H) - 2 ** N != 0:
raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1")
#
paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ]
obs = []
coeffs = []
#
for term in itertools.product(paulis, repeat=N):
matrices = [i._matrix() for i in term]
coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / (2 ** N)
#
if not np.allclose(coeff, 0):
coeffs.append(coeff)
#
if not all(t is qml.Identity for t in term):
obs.append(
functools.reduce(
operator.matmul, [t(i) for i, t in enumerate(term) if t is not qml.Identity]
)
)
else:
obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)]))
# obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)]))
#
return coeffs, obs
|
35,365 |
def _download_file(filename, directory=None):
url = _get_file_url(filename, directory)
try:
return _retrieve_file(url, filename)
except Exception as e: # Genering exception
raise Exception(
"For the reason mentioned below, retrieving the file from internet failed.\n"
"You can download this file from:\n"
f"{url}\n"
"\n"
"The reported error message is:\n"
f"{str(e)}"
)
|
def _download_file(filename, directory=None):
url = _get_file_url(filename, directory)
try:
return _retrieve_file(url, filename)
except Exception as e: # Genering exception
raise RuntimeError(
"For the reason mentioned below, retrieving the file from internet failed.\n"
"You can download this file from:\n"
f"{url}\n"
"\n"
"The reported error message is:\n"
f"{str(e)}"
)
|
12,309 |
def state_number_qobj(dims, state, *, dtype=_data.Dense):
"""
Return a Qobj representation of a quantum state specified by the state
array `state`.
Example:
>>> state_number_qobj([2, 2, 2], [1, 0, 1]) # doctest: +SKIP
Quantum object: dims = [[2, 2, 2], [1, 1, 1]], \
shape = [8, 1], type = ket
Qobj data =
[[ 0.]
[ 0.]
[ 0.]
[ 0.]
[ 0.]
[ 1.]
[ 0.]
[ 0.]]
Parameters
----------
dims : list or array
The quantum state dimensions array, as it would appear in a Qobj.
state : list
State number array.
dtype : type or str
Storage representation. Any data-layer known to `qutip.data.to` is
accepted.
Returns
-------
state : :class:`qutip.Qobj.qobj`
The state as a :class:`qutip.Qobj.qobj` instance.
.. note:
Deprecated in v5, use :func:`basis` instead.
"""
warnings.warn("basis() is a drop-in replacement for this",
DeprecationWarning)
return basis(dims, state, dtype=dtype)
|
def state_number_qobj(dims, state, *, dtype=_data.Dense):
"""
Return a Qobj representation of a quantum state specified by the state
array `state`.
Example:
>>> state_number_qobj([2, 2, 2], [1, 0, 1]) # doctest: +SKIP
Quantum object: dims = [[2, 2, 2], [1, 1, 1]], \
shape = [8, 1], type = ket
Qobj data =
[[ 0.]
[ 0.]
[ 0.]
[ 0.]
[ 0.]
[ 1.]
[ 0.]
[ 0.]]
Parameters
----------
dims : list or array
The quantum state dimensions array, as it would appear in a Qobj.
state : list
State number array.
dtype : type or str
Storage representation. Any data-layer known to `qutip.data.to` is
accepted.
Returns
-------
state : :class:`qutip.Qobj.qobj`
The state as a :class:`qutip.Qobj.qobj` instance.
.. note:
Deprecated in QuTiP 5.0, use :func:`basis` instead.
"""
warnings.warn("basis() is a drop-in replacement for this",
DeprecationWarning)
return basis(dims, state, dtype=dtype)
|
31,799 |
def main():
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
headers = {'Accept': 'application', 'Content-Type': 'application/json'}
verify_certificate = not demisto.params().get('insecure', False)
base_url = urljoin(demisto.params()['url'])
proxy = demisto.params().get('proxy', False)
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'user-authentication':
result = authenticate_command(client)
demisto.results(result)
elif demisto.command() == 'get-all-workflow':
result = workflow_command(client, demisto.args())
demisto.results(result)
elif demisto.command() == 'create-pp-ticket':
result = create_pp_ticket_command(client, demisto.args())
demisto.results(result)
elif demisto.command() == 'pca':
result = pca_new_command(client, demisto.args())
demisto.results(result)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
headers = {'Accept': 'application', 'Content-Type': 'application/json'}
verify_certificate = not demisto.params().get('insecure', False)
base_url = urljoin(demisto.params()['url'])
proxy = demisto.params().get('proxy', False)
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'user-authentication':
return_results(authenticate_command(client))
elif demisto.command() == 'get-all-workflow':
result = workflow_command(client, demisto.args())
demisto.results(result)
elif demisto.command() == 'create-pp-ticket':
result = create_pp_ticket_command(client, demisto.args())
demisto.results(result)
elif demisto.command() == 'pca':
result = pca_new_command(client, demisto.args())
demisto.results(result)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
60 |
def add_cover(cover_url, ekey, account=None):
"""
Adds a cover to coverstore and returns the cover id.
:param str cover_url: URL of cover image
:param str ekey: Edition key /book/OL..M
:rtype: int or None
:return: Cover id, or None if upload did not succeed
"""
olid = ekey.split('/')[-1]
coverstore_url = config.get('coverstore_url').rstrip('/')
upload_url = coverstore_url + '/b/upload2'
if upload_url.startswith('//'):
upload_url = '{}:{}'.format(web.ctx.get('protocol', 'http'), upload_url)
user = account or accounts.get_current_user()
if not user:
raise RuntimeError("accounts.get_current_user() failed")
params = {
'author': user.get('key') or user.get('_key'),
'data': None,
'source_url': cover_url,
'olid': olid,
'ip': web.ctx.ip,
}
reply = None
for attempt in range(10):
try:
payload = requests.compat.urlencode(params).encode('utf-8')
response = requests.post(upload_url, data=payload)
except requests.HTTPError:
sleep(2)
continue
body = response.text
if response.getcode() == 500:
raise CoverNotSaved(body)
if body not in ['', 'None']:
reply = response.json()
if response.getcode() == 200 and 'id' in reply:
break
sleep(2)
if not reply or reply.get('message') == 'Invalid URL':
return
cover_id = int(reply['id'])
return cover_id
|
def add_cover(cover_url, ekey, account=None):
"""
Adds a cover to coverstore and returns the cover id.
:param str cover_url: URL of cover image
:param str ekey: Edition key /book/OL..M
:rtype: int or None
:return: Cover id, or None if upload did not succeed
"""
olid = ekey.split('/')[-1]
coverstore_url = config.get('coverstore_url').rstrip('/')
upload_url = coverstore_url + '/b/upload2'
if upload_url.startswith('//'):
upload_url = '{}:{}'.format(web.ctx.get('protocol', 'http'), upload_url)
user = account or accounts.get_current_user()
if not user:
raise RuntimeError("accounts.get_current_user() failed")
params = {
'author': user.get('key') or user.get('_key'),
'data': None,
'source_url': cover_url,
'olid': olid,
'ip': web.ctx.ip,
}
reply = None
for attempt in range(10):
try:
payload = requests.compat.urlencode(params).encode('utf-8')
response = requests.post(upload_url, data=payload)
except requests.HTTPError:
sleep(2)
continue
body = response.text
if response.getcode() == 500:
raise CoverNotSaved(body)
if body not in ['', 'None']:
reply = response.json()
if response.status_code == 200 and 'id' in reply:
break
sleep(2)
if not reply or reply.get('message') == 'Invalid URL':
return
cover_id = int(reply['id'])
return cover_id
|
58,418 |
def matview_search_filter(filters, model, for_downloads=False):
queryset = model.objects.all()
recipient_scope_q = Q(recipient_location_country_code="USA") | Q(recipient_location_country_name="UNITED STATES")
pop_scope_q = Q(pop_country_code="USA") | Q(pop_country_name="UNITED STATES")
faba_flag = False
faba_queryset = FinancialAccountsByAwards.objects.filter(award__isnull=False)
for key, value in filters.items():
if value is None:
raise InvalidParameterException("Invalid filter: " + key + " has null as its value.")
key_list = [
"keywords",
"elasticsearch_keyword",
"time_period",
"award_type_codes",
"prime_and_sub_award_types",
"agencies",
"legal_entities",
"recipient_id",
"recipient_search_text",
"recipient_scope",
"recipient_locations",
"recipient_type_names",
"place_of_performance_scope",
"place_of_performance_locations",
"award_amounts",
"award_ids",
"program_numbers",
"naics_codes",
"psc_codes",
"contract_pricing_type_codes",
"set_aside_type_codes",
"extent_competed_type_codes",
"tas_codes",
# next 3 keys used by federal account page
"federal_account_ids",
"object_class",
"program_activity",
]
if key not in key_list:
raise InvalidParameterException("Invalid filter: " + key + " does not exist.")
if key == "keywords":
def keyword_parse(keyword):
# keyword_ts_vector & award_ts_vector are Postgres TS_vectors.
# keyword_ts_vector = recipient_name + naics_code + naics_description
# + psc_description + awards_description
# award_ts_vector = piid + fain + uri
filter_obj = Q(keyword_ts_vector=keyword) | Q(award_ts_vector=keyword)
if keyword.isnumeric():
filter_obj |= Q(naics_code__contains=keyword)
if len(keyword) == 4 and PSC.objects.all().filter(code__iexact=keyword).exists():
filter_obj |= Q(product_or_service_code__iexact=keyword)
return filter_obj
filter_obj = Q()
for keyword in value:
filter_obj |= keyword_parse(keyword)
potential_duns = list(filter((lambda x: len(x) > 7 and len(x) < 10), value))
if len(potential_duns) > 0:
filter_obj |= Q(recipient_unique_id__in=potential_duns) | Q(
parent_recipient_unique_id__in=potential_duns
)
queryset = queryset.filter(filter_obj)
elif key == "elasticsearch_keyword":
keyword = " ".join(value) if isinstance(value, list) else value
transaction_ids = elasticsearch_helper.get_download_ids(keyword=keyword, field="transaction_id")
# flatten IDs
transaction_ids = list(itertools.chain.from_iterable(transaction_ids))
logger.info("Found {} transactions based on keyword: {}".format(len(transaction_ids), keyword))
transaction_ids = [str(transaction_id) for transaction_id in transaction_ids]
queryset = queryset.extra(
where=['"transaction_normalized"."id" = ANY(\'{{{}}}\'::int[])'.format(",".join(transaction_ids))]
)
elif key == "time_period":
min_date = API_SEARCH_MIN_DATE
if for_downloads:
min_date = API_MIN_DATE
queryset &= combine_date_range_queryset(value, model, min_date, API_MAX_DATE)
elif key == "award_type_codes":
queryset = queryset.filter(type__in=value)
elif key == "prime_and_sub_award_types":
award_types = value.get("prime_awards")
if award_types:
queryset = queryset.filter(type__in=award_types)
elif key == "agencies":
# TODO: Make function to match agencies in award filter throwing dupe error
funding_toptier = Q()
funding_subtier = Q()
awarding_toptier = Q()
awarding_subtier = Q()
for v in value:
type = v["type"]
tier = v["tier"]
name = v["name"]
if type == "funding":
if tier == "toptier":
funding_toptier |= Q(funding_toptier_agency_name=name)
elif tier == "subtier":
if "toptier_name" in v:
funding_subtier |= Q(funding_subtier_agency_name=name) & Q(
funding_toptier_agency_name=v["toptier_name"]
)
else:
funding_subtier |= Q(funding_subtier_agency_name=name)
elif type == "awarding":
if tier == "toptier":
awarding_toptier |= Q(awarding_toptier_agency_name=name)
elif tier == "subtier":
if "toptier_name" in v:
awarding_subtier |= Q(awarding_subtier_agency_name=name) & Q(
awarding_toptier_agency_name=v["toptier_name"]
)
else:
awarding_subtier |= Q(awarding_subtier_agency_name=name)
awarding_queryfilter = Q()
funding_queryfilter = Q()
# Since these are Q filters, no DB hits for boolean checks
if funding_toptier:
funding_queryfilter |= funding_toptier
if funding_subtier:
funding_queryfilter |= funding_subtier
if awarding_toptier:
awarding_queryfilter |= awarding_toptier
if awarding_subtier:
awarding_queryfilter |= awarding_subtier
queryset = queryset.filter(funding_queryfilter & awarding_queryfilter)
elif key == "legal_entities":
# This filter key has effectively become obsolete by recipient_search_text
msg = 'API request included "{}" key. No filtering will occur with provided value "{}"'
logger.info(msg.format(key, value))
# in_query = [v for v in value]
# if len(in_query) != 0:
# queryset &= model.objects.filter(recipient_id__in=in_query)
elif key == "recipient_search_text":
all_filters_obj = Q()
for recip in value:
upper_recipient_string = str(recip).upper()
# recipient_name_ts_vector is a postgres TS_Vector
filter_obj = Q(recipient_name_ts_vector=upper_recipient_string)
if len(upper_recipient_string) == 9 and upper_recipient_string[:5].isnumeric():
filter_obj |= Q(recipient_unique_id=upper_recipient_string)
all_filters_obj |= filter_obj
queryset = queryset.filter(all_filters_obj)
elif key == "recipient_id":
filter_obj = Q()
recipient_hash = value[:-2]
if value.endswith("P"): # For parent types, gather all of the children's transactions
parent_duns_rows = RecipientProfile.objects.filter(
recipient_hash=recipient_hash, recipient_level="P"
).values("recipient_unique_id")
if len(parent_duns_rows) == 1:
parent_duns = parent_duns_rows[0]["recipient_unique_id"]
filter_obj = Q(parent_recipient_unique_id=parent_duns)
elif len(parent_duns_rows) > 2:
# shouldn't occur
raise InvalidParameterException("Non-unique parent record found in RecipientProfile")
elif value.endswith("C"):
filter_obj = Q(recipient_hash=recipient_hash, parent_recipient_unique_id__isnull=False)
else:
# "R" recipient level
filter_obj = Q(recipient_hash=recipient_hash, parent_recipient_unique_id__isnull=True)
queryset = queryset.filter(filter_obj)
elif key == "recipient_scope":
if value == "domestic":
queryset = queryset.filter(recipient_scope_q)
elif value == "foreign":
queryset = queryset.exclude(recipient_scope_q)
else:
raise InvalidParameterException("Invalid filter: recipient_scope type is invalid.")
elif key == "recipient_locations":
queryset = queryset.filter(geocode_filter_locations("recipient_location", value))
elif key == "recipient_type_names":
if len(value) != 0:
queryset = queryset.filter(business_categories__overlap=value)
elif key == "place_of_performance_scope":
if value == "domestic":
queryset = queryset.filter(pop_scope_q)
elif value == "foreign":
queryset = queryset.exclude(pop_scope_q)
else:
raise InvalidParameterException("Invalid filter: place_of_performance_scope is invalid.")
elif key == "place_of_performance_locations":
queryset = queryset.filter(geocode_filter_locations("pop", value))
elif key == "award_amounts":
queryset &= total_obligation_queryset(value, model, filters)
elif key == "award_ids":
queryset = build_award_ids_filter(queryset, value, ("piid", "fain", "uri"))
elif key == "program_numbers":
in_query = [v for v in value]
if len(in_query) != 0:
queryset = queryset.filter(cfda_number__in=in_query)
elif key == "naics_codes":
if isinstance(value, list):
require = value
elif isinstance(value, dict):
require = value.get("require") or []
if value.get("exclude"):
raise NotImplementedException(
"NOT IMPLEMENTED: postgres endpoint does not currently support excluded naics!"
)
else:
raise InvalidParameterException("naics_codes must be an array or object")
if [value for value in require if len(str(value)) not in [2, 4, 6]]:
raise InvalidParameterException(
"naics code filtering only supported for codes with lengths of 2, 4, and 6"
)
regex = f"^({'|'.join([str(elem) for elem in require])}).*"
queryset = queryset.filter(naics_code__regex=regex)
elif key == "psc_codes":
in_query = [v for v in value]
if len(in_query) != 0:
queryset = queryset.filter(product_or_service_code__in=in_query)
elif key == "contract_pricing_type_codes":
in_query = [v for v in value]
if len(in_query) != 0:
queryset = queryset.filter(type_of_contract_pricing__in=in_query)
elif key == "set_aside_type_codes":
or_queryset = Q()
for v in value:
or_queryset |= Q(type_set_aside__exact=v)
queryset = queryset.filter(or_queryset)
elif key == "extent_competed_type_codes":
or_queryset = Q()
for v in value:
or_queryset |= Q(extent_competed__exact=v)
queryset = queryset.filter(or_queryset)
# Because these two filters OR with each other, we need to know about the presense of both filters to know what to do
# This filter was picked arbitrarily to be the one that checks for the other
elif key == TasCodes.underscore_name:
if TreasuryAccounts.underscore_name in filters.keys():
q = TasCodes.build_tas_codes_filter(queryset, value)
q |= TreasuryAccounts.build_tas_codes_filter(queryset, filters[TreasuryAccounts.underscore_name])
queryset = queryset.filter(q)
else:
queryset = queryset.filter(TasCodes.build_tas_codes_filter(queryset, value))
elif key == TreasuryAccounts.underscore_name and TasCodes.underscore_name not in filters.keys():
queryset = queryset.filter(TreasuryAccounts.build_tas_codes_filter(queryset, value))
# Federal Account Filter
elif key == "federal_account_ids":
faba_flag = True
or_queryset = Q()
for v in value:
or_queryset |= Q(treasury_account__federal_account_id=v)
faba_queryset = faba_queryset.filter(or_queryset)
# Federal Account Filter
elif key == "object_class":
result = Q()
for oc in value:
subresult = Q()
subresult &= filter_on("award__financial_set__object_class", "object_class", oc)
result |= subresult
queryset = queryset.filter(result)
# Federal Account Filter
elif key == "program_activity":
or_queryset = Q()
for v in value:
or_queryset |= Q(award__financial_set__program_activity__id=v)
queryset = queryset.filter(or_queryset)
if faba_flag:
award_ids = faba_queryset.values("award_id")
queryset = queryset.filter(award_id__in=award_ids)
return queryset
|
def matview_search_filter(filters, model, for_downloads=False):
queryset = model.objects.all()
recipient_scope_q = Q(recipient_location_country_code="USA") | Q(recipient_location_country_name="UNITED STATES")
pop_scope_q = Q(pop_country_code="USA") | Q(pop_country_name="UNITED STATES")
faba_flag = False
faba_queryset = FinancialAccountsByAwards.objects.filter(award__isnull=False)
for key, value in filters.items():
if value is None:
raise InvalidParameterException("Invalid filter: " + key + " has null as its value.")
key_list = [
"keywords",
"elasticsearch_keyword",
"time_period",
"award_type_codes",
"prime_and_sub_award_types",
"agencies",
"legal_entities",
"recipient_id",
"recipient_search_text",
"recipient_scope",
"recipient_locations",
"recipient_type_names",
"place_of_performance_scope",
"place_of_performance_locations",
"award_amounts",
"award_ids",
"program_numbers",
"naics_codes",
"psc_codes",
"contract_pricing_type_codes",
"set_aside_type_codes",
"extent_competed_type_codes",
"tas_codes",
# next 3 keys used by federal account page
"federal_account_ids",
"object_class",
"program_activity",
]
if key not in key_list:
raise InvalidParameterException("Invalid filter: " + key + " does not exist.")
if key == "keywords":
def keyword_parse(keyword):
# keyword_ts_vector & award_ts_vector are Postgres TS_vectors.
# keyword_ts_vector = recipient_name + naics_code + naics_description
# + psc_description + awards_description
# award_ts_vector = piid + fain + uri
filter_obj = Q(keyword_ts_vector=keyword) | Q(award_ts_vector=keyword)
if keyword.isnumeric():
filter_obj |= Q(naics_code__contains=keyword)
if len(keyword) == 4 and PSC.objects.all().filter(code__iexact=keyword).exists():
filter_obj |= Q(product_or_service_code__iexact=keyword)
return filter_obj
filter_obj = Q()
for keyword in value:
filter_obj |= keyword_parse(keyword)
potential_duns = list(filter((lambda x: len(x) > 7 and len(x) < 10), value))
if len(potential_duns) > 0:
filter_obj |= Q(recipient_unique_id__in=potential_duns) | Q(
parent_recipient_unique_id__in=potential_duns
)
queryset = queryset.filter(filter_obj)
elif key == "elasticsearch_keyword":
keyword = " ".join(value) if isinstance(value, list) else value
transaction_ids = elasticsearch_helper.get_download_ids(keyword=keyword, field="transaction_id")
# flatten IDs
transaction_ids = list(itertools.chain.from_iterable(transaction_ids))
logger.info("Found {} transactions based on keyword: {}".format(len(transaction_ids), keyword))
transaction_ids = [str(transaction_id) for transaction_id in transaction_ids]
queryset = queryset.extra(
where=['"transaction_normalized"."id" = ANY(\'{{{}}}\'::int[])'.format(",".join(transaction_ids))]
)
elif key == "time_period":
min_date = API_SEARCH_MIN_DATE
if for_downloads:
min_date = API_MIN_DATE
queryset &= combine_date_range_queryset(value, model, min_date, API_MAX_DATE)
elif key == "award_type_codes":
queryset = queryset.filter(type__in=value)
elif key == "prime_and_sub_award_types":
award_types = value.get("prime_awards")
if award_types:
queryset = queryset.filter(type__in=award_types)
elif key == "agencies":
# TODO: Make function to match agencies in award filter throwing dupe error
funding_toptier = Q()
funding_subtier = Q()
awarding_toptier = Q()
awarding_subtier = Q()
for v in value:
type = v["type"]
tier = v["tier"]
name = v["name"]
if type == "funding":
if tier == "toptier":
funding_toptier |= Q(funding_toptier_agency_name=name)
elif tier == "subtier":
if "toptier_name" in v:
funding_subtier |= Q(funding_subtier_agency_name=name) & Q(
funding_toptier_agency_name=v["toptier_name"]
)
else:
funding_subtier |= Q(funding_subtier_agency_name=name)
elif type == "awarding":
if tier == "toptier":
awarding_toptier |= Q(awarding_toptier_agency_name=name)
elif tier == "subtier":
if "toptier_name" in v:
awarding_subtier |= Q(awarding_subtier_agency_name=name) & Q(
awarding_toptier_agency_name=v["toptier_name"]
)
else:
awarding_subtier |= Q(awarding_subtier_agency_name=name)
awarding_queryfilter = Q()
funding_queryfilter = Q()
# Since these are Q filters, no DB hits for boolean checks
if funding_toptier:
funding_queryfilter |= funding_toptier
if funding_subtier:
funding_queryfilter |= funding_subtier
if awarding_toptier:
awarding_queryfilter |= awarding_toptier
if awarding_subtier:
awarding_queryfilter |= awarding_subtier
queryset = queryset.filter(funding_queryfilter & awarding_queryfilter)
elif key == "legal_entities":
# This filter key has effectively become obsolete by recipient_search_text
msg = 'API request included "{}" key. No filtering will occur with provided value "{}"'
logger.info(msg.format(key, value))
# in_query = [v for v in value]
# if len(in_query) != 0:
# queryset &= model.objects.filter(recipient_id__in=in_query)
elif key == "recipient_search_text":
all_filters_obj = Q()
for recip in value:
upper_recipient_string = str(recip).upper()
# recipient_name_ts_vector is a postgres TS_Vector
filter_obj = Q(recipient_name_ts_vector=upper_recipient_string)
if len(upper_recipient_string) == 9 and upper_recipient_string[:5].isnumeric():
filter_obj |= Q(recipient_unique_id=upper_recipient_string)
all_filters_obj |= filter_obj
queryset = queryset.filter(all_filters_obj)
elif key == "recipient_id":
filter_obj = Q()
recipient_hash = value[:-2]
if value.endswith("P"): # For parent types, gather all of the children's transactions
parent_duns_rows = RecipientProfile.objects.filter(
recipient_hash=recipient_hash, recipient_level="P"
).values("recipient_unique_id")
if len(parent_duns_rows) == 1:
parent_duns = parent_duns_rows[0]["recipient_unique_id"]
filter_obj = Q(parent_recipient_unique_id=parent_duns)
elif len(parent_duns_rows) > 2:
# shouldn't occur
raise InvalidParameterException("Non-unique parent record found in RecipientProfile")
elif value.endswith("C"):
filter_obj = Q(recipient_hash=recipient_hash, parent_recipient_unique_id__isnull=False)
else:
# "R" recipient level
filter_obj = Q(recipient_hash=recipient_hash, parent_recipient_unique_id__isnull=True)
queryset = queryset.filter(filter_obj)
elif key == "recipient_scope":
if value == "domestic":
queryset = queryset.filter(recipient_scope_q)
elif value == "foreign":
queryset = queryset.exclude(recipient_scope_q)
else:
raise InvalidParameterException("Invalid filter: recipient_scope type is invalid.")
elif key == "recipient_locations":
queryset = queryset.filter(geocode_filter_locations("recipient_location", value))
elif key == "recipient_type_names":
if len(value) != 0:
queryset = queryset.filter(business_categories__overlap=value)
elif key == "place_of_performance_scope":
if value == "domestic":
queryset = queryset.filter(pop_scope_q)
elif value == "foreign":
queryset = queryset.exclude(pop_scope_q)
else:
raise InvalidParameterException("Invalid filter: place_of_performance_scope is invalid.")
elif key == "place_of_performance_locations":
queryset = queryset.filter(geocode_filter_locations("pop", value))
elif key == "award_amounts":
queryset &= total_obligation_queryset(value, model, filters)
elif key == "award_ids":
queryset = build_award_ids_filter(queryset, value, ("piid", "fain", "uri"))
elif key == "program_numbers":
in_query = [v for v in value]
if len(in_query) != 0:
queryset = queryset.filter(cfda_number__in=in_query)
elif key == "naics_codes":
if isinstance(value, list):
require = value
elif isinstance(value, dict):
require = value.get("require") or []
if value.get("exclude"):
raise NotImplementedException(
"NOT IMPLEMENTED: postgres endpoint does not currently support excluded naics!"
)
else:
raise InvalidParameterException("naics_codes must be an array or object")
if [value for value in require if len(str(value)) not in [2, 4, 6]]:
raise InvalidParameterException(
"naics code filtering only supported for codes with lengths of 2, 4, and 6"
)
regex = f"^({'|'.join([str(elem) for elem in require])}).*"
queryset = queryset.filter(naics_code__regex=regex)
elif key == "psc_codes":
in_query = [v for v in value]
if len(in_query) != 0:
queryset = queryset.filter(product_or_service_code__in=in_query)
elif key == "contract_pricing_type_codes":
in_query = [v for v in value]
if len(in_query) != 0:
queryset = queryset.filter(type_of_contract_pricing__in=in_query)
elif key == "set_aside_type_codes":
or_queryset = Q()
for v in value:
or_queryset |= Q(type_set_aside__exact=v)
queryset = queryset.filter(or_queryset)
elif key == "extent_competed_type_codes":
or_queryset = Q()
for v in value:
or_queryset |= Q(extent_competed__exact=v)
queryset = queryset.filter(or_queryset)
# Because these two filters OR with each other, we need to know about the presense of both filters to know what to do
# This filter was picked arbitrarily to be the one that checks for the other
elif key == TasCodes.underscore_name:
q = TasCodes.build_tas_codes_filter(queryset, value)
if TreasuryAccounts.underscore_name in filters.keys():
q |= TreasuryAccounts.build_tas_codes_filter(queryset, filters[TreasuryAccounts.underscore_name])
queryset = queryset.filter(q)
elif key == TreasuryAccounts.underscore_name and TasCodes.underscore_name not in filters.keys():
queryset = queryset.filter(TreasuryAccounts.build_tas_codes_filter(queryset, value))
# Federal Account Filter
elif key == "federal_account_ids":
faba_flag = True
or_queryset = Q()
for v in value:
or_queryset |= Q(treasury_account__federal_account_id=v)
faba_queryset = faba_queryset.filter(or_queryset)
# Federal Account Filter
elif key == "object_class":
result = Q()
for oc in value:
subresult = Q()
subresult &= filter_on("award__financial_set__object_class", "object_class", oc)
result |= subresult
queryset = queryset.filter(result)
# Federal Account Filter
elif key == "program_activity":
or_queryset = Q()
for v in value:
or_queryset |= Q(award__financial_set__program_activity__id=v)
queryset = queryset.filter(or_queryset)
if faba_flag:
award_ids = faba_queryset.values("award_id")
queryset = queryset.filter(award_id__in=award_ids)
return queryset
|
29,177 |
def filtered_skills_with_none_type_keys(degrees_of_mastery):
"""Given a dict of skills id's with there corresponding
mastery. It first split the dict and then returns a list of skill
id.
Args:
degrees_of_mastery: dict. The dict of type (skill_ids, float|None)
Returns:
list. Sorted list of skill ids.
"""
skill_dict_with_float_value = {
skill_id: degree for skill_id, degree in degrees_of_mastery.items()
if degree is not None}
sorted_skill_ids_with_float_value = sorted(
skill_dict_with_float_value, key=skill_dict_with_float_value.get)
skill_ids_with_none_value = [
skill_id for skill_id, degree in degrees_of_mastery.items()
if degree is None]
if feconf.MAX_NUMBER_OF_SKILL_IDS <= len(skill_ids_with_none_value):
return skill_ids_with_none_value[:feconf.MAX_NUMBER_OF_SKILL_IDS]
else:
return (skill_ids_with_none_value + sorted_skill_ids_with_float_value[:(
feconf.MAX_NUMBER_OF_SKILL_IDS - len(skill_ids_with_none_value))])
|
def filtered_skills_with_none_type_keys(degrees_of_mastery):
"""Given a dict of skills id's with there corresponding
mastery. It first split the dict and then returns a list of skill
id.
Args:
degrees_of_mastery: dict(str, float|None). Dict mapping skill ids to mastery level.
Returns:
list. Sorted list of skill ids.
"""
skill_dict_with_float_value = {
skill_id: degree for skill_id, degree in degrees_of_mastery.items()
if degree is not None}
sorted_skill_ids_with_float_value = sorted(
skill_dict_with_float_value, key=skill_dict_with_float_value.get)
skill_ids_with_none_value = [
skill_id for skill_id, degree in degrees_of_mastery.items()
if degree is None]
if feconf.MAX_NUMBER_OF_SKILL_IDS <= len(skill_ids_with_none_value):
return skill_ids_with_none_value[:feconf.MAX_NUMBER_OF_SKILL_IDS]
else:
return (skill_ids_with_none_value + sorted_skill_ids_with_float_value[:(
feconf.MAX_NUMBER_OF_SKILL_IDS - len(skill_ids_with_none_value))])
|
43,927 |
def expansion(la, lb, ra, rb, alpha, beta, t):
r"""Compute Hermite Gaussian expansion coefficients recursively for two Gaussian functions.
An overlap distribution, which defines the product of two Gaussians, can be written as a Hermite
expansion as [`Helgaker (1995) p798 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
\Omega_{ij} = \sum_{t=0}^{i+j} E_t^{ij} \Lambda_t,
where :math:`\Lambda` is a Hermite polynomial of degree t, :math:`E` denotes the expansion
coefficients, :math:`\Omega_{ij} = G_i G_j` and :math:`G` is a Gaussian function. The overalp
integral between two Gaussian functions can be simply computed by integrating over the overlap
distribution which requires obtaining the expansion coefficients. This can be done recursively
as [`Helgaker (1995) p799 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
E_t^{i+1,j} = \frac{1}{2p} E_{t-1}^{ij} - \frac{qr}{\alpha} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
and
.. math::
E_t^{i,j+1} = \frac{1}{2p} E_{t-1}^{ij} + \frac{qr}{\beta} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
where :math:`p = \alpha + \beta` and :math:`q = \alpha \beta / (\alpha + \beta)` are computed
from the Gaussian exponents :math:`\alpha, \beta` and the position :math:`r` is computed as
:math:`r = r_\alpha - r_\beta`. The starting coefficient is
.. math::
E_0^{00} = e^{-qr^2},
and :math:`E_t^{ij} = 0` is :math:`t < 0` or :math:`t > (i+j)`.
Args:
la (integer): angular momentum component for the first Gaussian function
lb (integer): angular momentum component for the second Gaussian function
ra (float): position component of the the first Gaussian function
rb (float): position component of the the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
t(integer): number of nodes in the Hermite Gaussian
Returns:
array[float]: expansion coefficients for each Gaussian combination
**Example**
>>> la, lb = 0, 0
>>> ra, rb = 0.0, 0.0
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> t = 0
>>> c = expansion(la, lb, ra, rb, alpha, beta, t)
>>> c
array([1.])
"""
p = alpha + beta
q = alpha * beta / p
r = ra - rb
if la == lb == t == 0:
return anp.exp(-q * r ** 2)
elif t < 0 or t > (la + lb):
return 0.0
elif lb == 0:
return (
(1 / (2 * p)) * expansion(la - 1, lb, ra, rb, alpha, beta, t - 1)
- (q * r / alpha) * expansion(la - 1, lb, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la - 1, lb, ra, rb, alpha, beta, t + 1)
)
else:
return (
(1 / (2 * p)) * expansion(la, lb - 1, ra, rb, alpha, beta, t - 1)
+ (q * r / beta) * expansion(la, lb - 1, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la, lb - 1, ra, rb, alpha, beta, t + 1)
)
|
def expansion(la, lb, ra, rb, alpha, beta, t):
r"""Compute Hermite Gaussian expansion coefficients recursively for two Gaussian functions.
An overlap distribution, which defines the product of two Gaussians, can be written as a Hermite
expansion as [`Helgaker (1995) p798 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
\Omega_{ij} = \sum_{t=0}^{i+j} E_t^{ij} \Lambda_t,
where :math:`\Lambda` is a Hermite polynomial of degree t, :math:`E` denotes the expansion
coefficients, :math:`\Omega_{ij} = G_i G_j` and :math:`G` is a Gaussian function. The overalp
integral between two Gaussian functions can be simply computed by integrating over the overlap
distribution which requires obtaining the expansion coefficients. This can be done recursively
as [`Helgaker (1995) p799 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
E_t^{i+1,j} = \frac{1}{2p} E_{t-1}^{ij} - \frac{qr}{\alpha} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
and
.. math::
E_t^{i,j+1} = \frac{1}{2p} E_{t-1}^{ij} + \frac{qr}{\beta} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
where :math:`p = \alpha + \beta` and :math:`q = \alpha \beta / (\alpha + \beta)` are computed
from the Gaussian exponents :math:`\alpha, \beta` and the position :math:`r` is computed as
:math:`r = r_\alpha - r_\beta`. The starting coefficient is
.. math::
E_0^{00} = e^{-qr^2},
and :math:`E_t^{ij} = 0` is :math:`t < 0` or :math:`t > (i+j)`.
Args:
la (integer): angular momentum component for the first Gaussian function
lb (integer): angular momentum component for the second Gaussian function
ra (float): position component of the the first Gaussian function
rb (float): position component of the the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
t (integer): number of nodes in the Hermite Gaussian
Returns:
array[float]: expansion coefficients for each Gaussian combination
**Example**
>>> la, lb = 0, 0
>>> ra, rb = 0.0, 0.0
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> t = 0
>>> c = expansion(la, lb, ra, rb, alpha, beta, t)
>>> c
array([1.])
"""
p = alpha + beta
q = alpha * beta / p
r = ra - rb
if la == lb == t == 0:
return anp.exp(-q * r ** 2)
elif t < 0 or t > (la + lb):
return 0.0
elif lb == 0:
return (
(1 / (2 * p)) * expansion(la - 1, lb, ra, rb, alpha, beta, t - 1)
- (q * r / alpha) * expansion(la - 1, lb, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la - 1, lb, ra, rb, alpha, beta, t + 1)
)
else:
return (
(1 / (2 * p)) * expansion(la, lb - 1, ra, rb, alpha, beta, t - 1)
+ (q * r / beta) * expansion(la, lb - 1, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la, lb - 1, ra, rb, alpha, beta, t + 1)
)
|
17,408 |
def _infer_dtype(array, name=None):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.dtype.kind != "O":
raise TypeError("infer_type must be called on a dtype=object array")
if array.size == 0:
return np.dtype(float)
native_dtypes = set(map(lambda x: type(x), array.flatten()))
if len(native_dtypes) > 1 and native_dtypes != {bytes, str}:
raise ValueError(
"unable to infer dtype on variable {!r}; object array "
"contains mixed native types: {}".format(
name, ",".join(map(lambda x: x.__name__, native_dtypes))
)
)
element = array[(0,) * array.ndim]
if isinstance(element, (bytes, str)):
return strings.create_vlen_dtype(type(element))
dtype = np.array(element).dtype
if dtype.kind != "O":
return dtype
raise ValueError(
"unable to infer dtype on variable {!r}; xarray "
"cannot serialize arbitrary Python objects".format(name)
)
|
def _infer_dtype(array, name=None):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.dtype.kind != "O":
raise TypeError("infer_type must be called on a dtype=object array")
if array.size == 0:
return np.dtype(float)
native_dtypes = set(map(lambda x: type(x), array.flatten()))
if len(native_dtypes) > 1 and native_dtypes != {bytes, str}:
raise ValueError(
"unable to infer dtype on variable {!r}; object array "
"contains mixed native types: {}".format(
name, ", ".join(map(lambda x: x.__name__, native_dtypes))
)
)
element = array[(0,) * array.ndim]
if isinstance(element, (bytes, str)):
return strings.create_vlen_dtype(type(element))
dtype = np.array(element).dtype
if dtype.kind != "O":
return dtype
raise ValueError(
"unable to infer dtype on variable {!r}; xarray "
"cannot serialize arbitrary Python objects".format(name)
)
|
27,319 |
def write_wale_environment(placeholders, prefix, overwrite):
s3_names = ['WALE_S3_PREFIX', 'WALG_S3_PREFIX', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY',
'WALE_S3_ENDPOINT', 'AWS_ENDPOINT', 'AWS_REGION', 'AWS_INSTANCE_PROFILE',
'WALG_S3_SSE_KMS_ID', 'WALG_S3_SSE', 'WALG_DISABLE_S3_SSE', 'AWS_S3_FORCE_PATH_STYLE']
azure_names = ['WALG_AZ_PREFIX', 'AZURE_STORAGE_ACCOUNT', 'AZURE_STORAGE_ACCESS_KEY']
gs_names = ['WALE_GS_PREFIX', 'WALG_GS_PREFIX', 'GOOGLE_APPLICATION_CREDENTIALS']
swift_names = ['WALE_SWIFT_PREFIX', 'SWIFT_AUTHURL', 'SWIFT_TENANT', 'SWIFT_TENANT_ID', 'SWIFT_USER',
'SWIFT_USER_ID', 'SWIFT_USER_DOMAIN_NAME', 'SWIFT_USER_DOMAIN_ID', 'SWIFT_PASSWORD',
'SWIFT_AUTH_VERSION', 'SWIFT_ENDPOINT_TYPE', 'SWIFT_REGION', 'SWIFT_DOMAIN_NAME', 'SWIFT_DOMAIN_ID',
'SWIFT_PROJECT_NAME', 'SWIFT_PROJECT_ID', 'SWIFT_PROJECT_DOMAIN_NAME', 'SWIFT_PROJECT_DOMAIN_ID']
walg_names = ['WALG_DELTA_MAX_STEPS', 'WALG_DELTA_ORIGIN', 'WALG_DOWNLOAD_CONCURRENCY',
'WALG_UPLOAD_CONCURRENCY', 'WALG_UPLOAD_DISK_CONCURRENCY', 'WALG_DISK_RATE_LIMIT',
'WALG_NETWORK_RATE_LIMIT', 'WALG_COMPRESSION_METHOD', 'USE_WALG_BACKUP',
'USE_WALG_RESTORE', 'WALG_BACKUP_COMPRESSION_METHOD', 'WALG_BACKUP_FROM_REPLICA',
'WALG_SENTINEL_USER_DATA', 'WALG_PREVENT_WAL_OVERWRITE']
wale = defaultdict(lambda: '')
for name in ['PGVERSION', 'WALE_ENV_DIR', 'SCOPE', 'WAL_BUCKET_SCOPE_PREFIX', 'WAL_BUCKET_SCOPE_SUFFIX',
'WAL_S3_BUCKET', 'WAL_GCS_BUCKET', 'WAL_GS_BUCKET', 'WAL_SWIFT_BUCKET', 'BACKUP_NUM_TO_RETAIN',
'ENABLE_WAL_PATH_COMPAT'] + s3_names + swift_names + gs_names + walg_names + azure_names:
wale[name] = placeholders.get(prefix + name, '')
if wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX') or wale.get('WALG_S3_PREFIX'):
wale_endpoint = wale.pop('WALE_S3_ENDPOINT', None)
aws_endpoint = wale.pop('AWS_ENDPOINT', None)
aws_region = wale.pop('AWS_REGION', None)
# for S3-compatible storage we want to specify WALE_S3_ENDPOINT and AWS_ENDPOINT, but not AWS_REGION
if aws_endpoint or wale_endpoint:
if not aws_endpoint:
aws_endpoint = wale_endpoint.replace('+path://', '://')
elif not wale_endpoint:
wale_endpoint = aws_endpoint.replace('://', '+path://')
wale.update(WALE_S3_ENDPOINT=wale_endpoint, AWS_ENDPOINT=aws_endpoint, WALG_DISABLE_S3_SSE='true')
if wale.get('USE_WALG_BACKUP') and wale.get('USE_WALG_BACKUP') == 'true':
wale['AWS_REGION'] = aws_region
elif not aws_region:
# try to determine region from the endpoint or bucket name
name = wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX')
match = re.search(r'.*(\w{2}-\w+-\d)-.*', name)
if match:
aws_region = match.group(1)
else:
aws_region = placeholders['instance_data']['zone'][:-1]
wale['AWS_REGION'] = aws_region
else:
wale['AWS_REGION'] = aws_region
if not (wale.get('AWS_SECRET_ACCESS_KEY') and wale.get('AWS_ACCESS_KEY_ID')):
wale['AWS_INSTANCE_PROFILE'] = 'true'
if wale.get('USE_WALG_BACKUP') and wale.get('WALG_DISABLE_S3_SSE') != 'true' and not wale.get('WALG_S3_SSE'):
wale['WALG_S3_SSE'] = 'AES256'
write_envdir_names = s3_names + walg_names
elif wale.get('WAL_GCS_BUCKET') or wale.get('WAL_GS_BUCKET') or\
wale.get('WALE_GCS_PREFIX') or wale.get('WALE_GS_PREFIX') or wale.get('WALG_GS_PREFIX'):
if wale.get('WALE_GCS_PREFIX'):
wale['WALE_GS_PREFIX'] = wale['WALE_GCS_PREFIX']
elif wale.get('WAL_GCS_BUCKET'):
wale['WAL_GS_BUCKET'] = wale['WAL_GCS_BUCKET']
write_envdir_names = gs_names + walg_names
elif wale.get('WAL_SWIFT_BUCKET') or wale.get('WALE_SWIFT_PREFIX'):
write_envdir_names = swift_names
elif wale.get("WALG_AZ_PREFIX"):
write_envdir_names = azure_names + walg_names
else:
return
prefix_env_name = write_envdir_names[0]
store_type = prefix_env_name[5:].split('_')[0]
if not wale.get(prefix_env_name): # WALE_*_PREFIX is not defined in the environment
bucket_path = '/spilo/{WAL_BUCKET_SCOPE_PREFIX}{SCOPE}{WAL_BUCKET_SCOPE_SUFFIX}/wal/{PGVERSION}'.format(**wale)
prefix_template = '{0}://{{WAL_{1}_BUCKET}}{2}'.format(store_type.lower(), store_type, bucket_path)
wale[prefix_env_name] = prefix_template.format(**wale)
# Set WALG_*_PREFIX for future compatibility
if store_type in ('S3', 'GS') and not wale.get(write_envdir_names[1]):
wale[write_envdir_names[1]] = wale[prefix_env_name]
if not os.path.exists(wale['WALE_ENV_DIR']):
os.makedirs(wale['WALE_ENV_DIR'])
wale['WALE_LOG_DESTINATION'] = 'stderr'
for name in write_envdir_names + ['WALE_LOG_DESTINATION'] + ([] if prefix else ['BACKUP_NUM_TO_RETAIN']):
if wale.get(name):
path = os.path.join(wale['WALE_ENV_DIR'], name)
write_file(wale[name], path, overwrite)
adjust_owner(placeholders, path, gid=-1)
if not os.path.exists(placeholders['WALE_TMPDIR']):
os.makedirs(placeholders['WALE_TMPDIR'])
os.chmod(placeholders['WALE_TMPDIR'], 0o1777)
write_file(placeholders['WALE_TMPDIR'], os.path.join(wale['WALE_ENV_DIR'], 'TMPDIR'), True)
|
def write_wale_environment(placeholders, prefix, overwrite):
s3_names = ['WALE_S3_PREFIX', 'WALG_S3_PREFIX', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY',
'WALE_S3_ENDPOINT', 'AWS_ENDPOINT', 'AWS_REGION', 'AWS_INSTANCE_PROFILE',
'WALG_S3_SSE_KMS_ID', 'WALG_S3_SSE', 'WALG_DISABLE_S3_SSE', 'AWS_S3_FORCE_PATH_STYLE']
azure_names = ['WALG_AZ_PREFIX', 'AZURE_STORAGE_ACCOUNT', 'AZURE_STORAGE_ACCESS_KEY']
gs_names = ['WALE_GS_PREFIX', 'WALG_GS_PREFIX', 'GOOGLE_APPLICATION_CREDENTIALS']
swift_names = ['WALE_SWIFT_PREFIX', 'SWIFT_AUTHURL', 'SWIFT_TENANT', 'SWIFT_TENANT_ID', 'SWIFT_USER',
'SWIFT_USER_ID', 'SWIFT_USER_DOMAIN_NAME', 'SWIFT_USER_DOMAIN_ID', 'SWIFT_PASSWORD',
'SWIFT_AUTH_VERSION', 'SWIFT_ENDPOINT_TYPE', 'SWIFT_REGION', 'SWIFT_DOMAIN_NAME', 'SWIFT_DOMAIN_ID',
'SWIFT_PROJECT_NAME', 'SWIFT_PROJECT_ID', 'SWIFT_PROJECT_DOMAIN_NAME', 'SWIFT_PROJECT_DOMAIN_ID']
walg_names = ['WALG_DELTA_MAX_STEPS', 'WALG_DELTA_ORIGIN', 'WALG_DOWNLOAD_CONCURRENCY',
'WALG_UPLOAD_CONCURRENCY', 'WALG_UPLOAD_DISK_CONCURRENCY', 'WALG_DISK_RATE_LIMIT',
'WALG_NETWORK_RATE_LIMIT', 'WALG_COMPRESSION_METHOD', 'USE_WALG_BACKUP',
'USE_WALG_RESTORE', 'WALG_BACKUP_COMPRESSION_METHOD', 'WALG_BACKUP_FROM_REPLICA',
'WALG_SENTINEL_USER_DATA', 'WALG_PREVENT_WAL_OVERWRITE']
wale = defaultdict(lambda: '')
for name in ['PGVERSION', 'WALE_ENV_DIR', 'SCOPE', 'WAL_BUCKET_SCOPE_PREFIX', 'WAL_BUCKET_SCOPE_SUFFIX',
'WAL_S3_BUCKET', 'WAL_GCS_BUCKET', 'WAL_GS_BUCKET', 'WAL_SWIFT_BUCKET', 'BACKUP_NUM_TO_RETAIN',
'ENABLE_WAL_PATH_COMPAT'] + s3_names + swift_names + gs_names + walg_names + azure_names:
wale[name] = placeholders.get(prefix + name, '')
if wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX') or wale.get('WALG_S3_PREFIX'):
wale_endpoint = wale.pop('WALE_S3_ENDPOINT', None)
aws_endpoint = wale.pop('AWS_ENDPOINT', None)
aws_region = wale.pop('AWS_REGION', None)
# for S3-compatible storage we want to specify WALE_S3_ENDPOINT and AWS_ENDPOINT, but not AWS_REGION
if aws_endpoint or wale_endpoint:
if not aws_endpoint:
aws_endpoint = wale_endpoint.replace('+path://', '://')
elif not wale_endpoint:
wale_endpoint = aws_endpoint.replace('://', '+path://')
wale.update(WALE_S3_ENDPOINT=wale_endpoint, AWS_ENDPOINT=aws_endpoint, WALG_DISABLE_S3_SSE='true')
if aws_region and wale.get('USE_WALG_BACKUP') == 'true':
wale['AWS_REGION'] = aws_region
elif not aws_region:
# try to determine region from the endpoint or bucket name
name = wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX')
match = re.search(r'.*(\w{2}-\w+-\d)-.*', name)
if match:
aws_region = match.group(1)
else:
aws_region = placeholders['instance_data']['zone'][:-1]
wale['AWS_REGION'] = aws_region
else:
wale['AWS_REGION'] = aws_region
if not (wale.get('AWS_SECRET_ACCESS_KEY') and wale.get('AWS_ACCESS_KEY_ID')):
wale['AWS_INSTANCE_PROFILE'] = 'true'
if wale.get('USE_WALG_BACKUP') and wale.get('WALG_DISABLE_S3_SSE') != 'true' and not wale.get('WALG_S3_SSE'):
wale['WALG_S3_SSE'] = 'AES256'
write_envdir_names = s3_names + walg_names
elif wale.get('WAL_GCS_BUCKET') or wale.get('WAL_GS_BUCKET') or\
wale.get('WALE_GCS_PREFIX') or wale.get('WALE_GS_PREFIX') or wale.get('WALG_GS_PREFIX'):
if wale.get('WALE_GCS_PREFIX'):
wale['WALE_GS_PREFIX'] = wale['WALE_GCS_PREFIX']
elif wale.get('WAL_GCS_BUCKET'):
wale['WAL_GS_BUCKET'] = wale['WAL_GCS_BUCKET']
write_envdir_names = gs_names + walg_names
elif wale.get('WAL_SWIFT_BUCKET') or wale.get('WALE_SWIFT_PREFIX'):
write_envdir_names = swift_names
elif wale.get("WALG_AZ_PREFIX"):
write_envdir_names = azure_names + walg_names
else:
return
prefix_env_name = write_envdir_names[0]
store_type = prefix_env_name[5:].split('_')[0]
if not wale.get(prefix_env_name): # WALE_*_PREFIX is not defined in the environment
bucket_path = '/spilo/{WAL_BUCKET_SCOPE_PREFIX}{SCOPE}{WAL_BUCKET_SCOPE_SUFFIX}/wal/{PGVERSION}'.format(**wale)
prefix_template = '{0}://{{WAL_{1}_BUCKET}}{2}'.format(store_type.lower(), store_type, bucket_path)
wale[prefix_env_name] = prefix_template.format(**wale)
# Set WALG_*_PREFIX for future compatibility
if store_type in ('S3', 'GS') and not wale.get(write_envdir_names[1]):
wale[write_envdir_names[1]] = wale[prefix_env_name]
if not os.path.exists(wale['WALE_ENV_DIR']):
os.makedirs(wale['WALE_ENV_DIR'])
wale['WALE_LOG_DESTINATION'] = 'stderr'
for name in write_envdir_names + ['WALE_LOG_DESTINATION'] + ([] if prefix else ['BACKUP_NUM_TO_RETAIN']):
if wale.get(name):
path = os.path.join(wale['WALE_ENV_DIR'], name)
write_file(wale[name], path, overwrite)
adjust_owner(placeholders, path, gid=-1)
if not os.path.exists(placeholders['WALE_TMPDIR']):
os.makedirs(placeholders['WALE_TMPDIR'])
os.chmod(placeholders['WALE_TMPDIR'], 0o1777)
write_file(placeholders['WALE_TMPDIR'], os.path.join(wale['WALE_ENV_DIR'], 'TMPDIR'), True)
|
9,885 |
def mandatory(a, msg=None):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
if a._undefined_name is not None:
name = "'%s' " % to_text(a._undefined_name)
else:
name = ''
if msg is not None:
raise AnsibleFilterError(str(msg))
else:
raise AnsibleFilterError("Mandatory variable %s not defined." % name)
return a
|
def mandatory(a, msg=None):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
if a._undefined_name is not None:
name = "'%s' " % to_text(a._undefined_name)
else:
name = ''
if msg is not None:
raise AnsibleFilterError(to_native(msg))
else:
raise AnsibleFilterError("Mandatory variable %s not defined." % name)
return a
|
41,043 |
def _entrate_sp(x, sm_window):
"""
Calculate the entropy rate of a stationary Gaussian random process using
spectrum estimation with smoothing window.
Parameters
----------
x :
sm_window :
Returns
-------
out :
"""
n = x.shape
# Normalize x_sb to be unit variance
x_std = np.std(np.reshape(x, (np.prod(n), 1)))
if x_std < 1e-10:
x_std = 1e-10
x = x / x_std
if (sm_window == 1):
M = [int(i) for i in np.ceil(np.array(n) / 10)]
if (x.ndim >= 3):
parzen_w_3 = np.zeros((2 * n[2] - 1, ))
parzen_w_3[(n[2] - M[2] - 1):(n[2] +
M[2])] = _parzen_win(2 * M[2] + 1)
if (x.ndim >= 2):
parzen_w_2 = np.zeros((2 * n[1] - 1, ))
parzen_w_2[(n[1] - M[1] - 1):(n[1] +
M[1])] = _parzen_win(2 * M[1] + 1)
if (x.ndim >= 1):
parzen_w_1 = np.zeros((2 * n[0] - 1, ))
parzen_w_1[(n[0] - M[0] - 1):(n[0] +
M[0])] = _parzen_win(2 * M[0] + 1)
if x.ndim == 2 and min(n) == 1: # 1D
xc = _autocorr(x)
xc = xc * parzen_w_1
xf = fftshift(fft(xc))
elif x.ndim == 2 and min(n) != 1: # 2D
xc = _autocorr(x) # default option: computes raw correlations with NO
# normalization -- Matlab help on xcorr
# Bias correction
v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0,
-1)))[np.newaxis, :]
v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0,
-1)))[np.newaxis, :]
vd = np.dot(v1.T, v2)
xc = xc / vd
parzen_window_2D = np.dot(parzen_w_1, parzen_w_2.T)
xc = xc * parzen_window_2D
xf = fftshift(fft2(xc))
elif x.ndim == 3 and min(n) != 1: # 3D
xc = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2] - 1):
temp = np.zeros((2 * n[0] - 1, 2 * n[1] - 1))
for k in range(n[2] - m3):
temp = temp + correlate2d(x[:, :, k + m3], x[:, :, k])
# default option:
# computes raw correlations with NO normalization
# -- Matlab help on xcorr
xc[:, :, (n[2] - 1) - m3] = temp
xc[:, :, (n[2] - 1) + m3] = temp
# Bias correction
v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0,
-1)))[np.newaxis, :]
v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0,
-1)))[np.newaxis, :]
v3 = np.arange(n[2], 0, -1)
vd = np.dot(v1.T, v2)
vcu = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2]):
vcu[:, :, (n[2] - 1) - m3] = vd * v3[m3]
vcu[:, :, (n[2] - 1) + m3] = vd * v3[m3]
# Possible source of NAN values
xc = xc / vcu
parzen_window_2D = np.dot(parzen_w_1[np.newaxis, :].T,
parzen_w_2[np.newaxis, :])
parzen_window_3D = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2] - 1):
parzen_window_3D[:, :, (n[2] - 1) - m3] = np.dot(
parzen_window_2D, parzen_w_3[n[2] - 1 - m3])
parzen_window_3D[:, :, (n[2] - 1) + m3] = np.dot(
parzen_window_2D, parzen_w_3[n[2] - 1 + m3])
xc = xc * parzen_window_3D
xf = fftshift(fftn(xc))
else:
raise ValueError('Unrecognized matrix dimension.')
xf = abs(xf)
xf[xf < 1e-4] = 1e-4
out = 0.5 * np.log(2 * np.pi * np.exp(1)) + _sumN(np.log(abs(
(xf)))) / 2 / _sumN(abs(xf))
return out
|
def _entrate_sp(x, sm_window):
"""
Calculate the entropy rate of a stationary Gaussian random process using
spectrum estimation with smoothing window.
Parameters
----------
x :
sm_window :
Returns
-------
out :
"""
n = x.shape
# Normalize x_sb to be unit variance
x_std = np.std(np.reshape(x, (np.prod(n), 1)))
if x_std < 1e-10:
x_std = 1e-10
x = x / x_std
if (sm_window == 1):
M = [int(i) for i in np.ceil(np.array(n) / 10)]
if (x.ndim >= 3):
parzen_w_3 = np.zeros((2 * n[2] - 1, ))
parzen_w_3[(n[2] - M[2] - 1):(n[2] +
M[2])] = _parzen_win(2 * M[2] + 1)
if (x.ndim >= 2):
parzen_w_2 = np.zeros((2 * n[1] - 1, ))
parzen_w_2[(n[1] - M[1] - 1):(n[1] +
M[1])] = _parzen_win(2 * M[1] + 1)
if (x.ndim >= 1):
parzen_w_1 = np.zeros((2 * n[0] - 1, ))
parzen_w_1[(n[0] - M[0] - 1):(n[0] +
M[0])] = _parzen_win(2 * M[0] + 1)
if (x.ndim == 2) and (min(n) == 1): # 1D
xc = _autocorr(x)
xc = xc * parzen_w_1
xf = fftshift(fft(xc))
elif x.ndim == 2 and min(n) != 1: # 2D
xc = _autocorr(x) # default option: computes raw correlations with NO
# normalization -- Matlab help on xcorr
# Bias correction
v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0,
-1)))[np.newaxis, :]
v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0,
-1)))[np.newaxis, :]
vd = np.dot(v1.T, v2)
xc = xc / vd
parzen_window_2D = np.dot(parzen_w_1, parzen_w_2.T)
xc = xc * parzen_window_2D
xf = fftshift(fft2(xc))
elif x.ndim == 3 and min(n) != 1: # 3D
xc = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2] - 1):
temp = np.zeros((2 * n[0] - 1, 2 * n[1] - 1))
for k in range(n[2] - m3):
temp = temp + correlate2d(x[:, :, k + m3], x[:, :, k])
# default option:
# computes raw correlations with NO normalization
# -- Matlab help on xcorr
xc[:, :, (n[2] - 1) - m3] = temp
xc[:, :, (n[2] - 1) + m3] = temp
# Bias correction
v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0,
-1)))[np.newaxis, :]
v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0,
-1)))[np.newaxis, :]
v3 = np.arange(n[2], 0, -1)
vd = np.dot(v1.T, v2)
vcu = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2]):
vcu[:, :, (n[2] - 1) - m3] = vd * v3[m3]
vcu[:, :, (n[2] - 1) + m3] = vd * v3[m3]
# Possible source of NAN values
xc = xc / vcu
parzen_window_2D = np.dot(parzen_w_1[np.newaxis, :].T,
parzen_w_2[np.newaxis, :])
parzen_window_3D = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1))
for m3 in range(n[2] - 1):
parzen_window_3D[:, :, (n[2] - 1) - m3] = np.dot(
parzen_window_2D, parzen_w_3[n[2] - 1 - m3])
parzen_window_3D[:, :, (n[2] - 1) + m3] = np.dot(
parzen_window_2D, parzen_w_3[n[2] - 1 + m3])
xc = xc * parzen_window_3D
xf = fftshift(fftn(xc))
else:
raise ValueError('Unrecognized matrix dimension.')
xf = abs(xf)
xf[xf < 1e-4] = 1e-4
out = 0.5 * np.log(2 * np.pi * np.exp(1)) + _sumN(np.log(abs(
(xf)))) / 2 / _sumN(abs(xf))
return out
|
58,797 |
def test_constant_as_input():
"""Test to check that constants specified as inputs aren't
interpreted as an encoded constant."""
def get_graph():
dtype = "uint8"
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype=dtype)
conv1 = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
add1 = make_ethosu_binary_elementwise(
conv1, scalar, ifm_channels=32, ifm2_channels=1, operator_type="ADD", ofm_dtype=dtype
)
func = relay.Function(relay.analysis.free_vars(add1), add1)
func = run_opt_pass(func, relay.transform.InferType())
return func
tir_mod, params = lower_to_tir(get_graph(), copy_constants())
# Check tile address for the scalar constant input hasn't been
# overwritten.
extern_calls = tir_mod["main"].body.body.body.body.body
binary_elmtwise = extern_calls[-1].value
args = binary_elmtwise.args
reason = "Tile address overwritten"
assert args[26] == 0, reason
assert args[27] == 0, reason
assert args[28] == 0, reason
# More generally, check compiles successfully to make sure
# nothing else was overrwritten.
tir_to_cs_translator.translate(tir_mod, params)
|
def test_constant_as_input():
"""Test to check that constants specified as inputs aren't
interpreted as an encoded constant."""
def get_graph():
dtype = "uint8"
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype=dtype)
conv1 = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
add1 = make_ethosu_binary_elementwise(
conv1, scalar, ifm_channels=32, ifm2_channels=1, operator_type="ADD", ofm_dtype=dtype
)
func = relay.Function(relay.analysis.free_vars(add1), add1)
func = run_opt_pass(func, relay.transform.InferType())
return func
tir_mod, params = lower_to_tir(get_graph(), copy_constants())
# Check tile address for the scalar constant input hasn't been
# overwritten.
extern_calls = tir_mod["main"].body.body.body.body.body
binary_elementwise = extern_calls[-1].value
args = binary_elementwise.args
reason = "Tile address overwritten"
assert args[26] == 0, reason
assert args[27] == 0, reason
assert args[28] == 0, reason
# More generally, check compiles successfully to make sure
# nothing else was overrwritten.
tir_to_cs_translator.translate(tir_mod, params)
|
37,253 |
def tweedledum2qiskit(tweedledum_circuit, name=None, qregs=None):
""" Converts a Tweedledum circuit into a Qiskit circuit. A Tweedledum circuit is a
dictionary with the following shape:
{
"num_qubits": 2,
"gates": [{
"gate": "X",
"qubits": [1],
"control_qubits": [0],
"control_state": "1"
}]
Args:
tweedledum_circuit (dict): Tweedledum circuit.
name (str): NName for the resulting Qiskit circuit.
qregs (list(QuantumRegister)): Optional. List of QuantumRegisters on which the
circuit would operate. If not provided, it will create a flat register.
Returns:
QuantumCircuit: A Qiskit quantum circuit.
Raises:
OracleCompilerError: If there a gate in the Tweedledum circuit has no Qiskit equivalent.
"""
gates = {'z': ZGate, 't': TGate, 's': SGate, 'tdg': TdgGate, 'sdg': SdgGate, 'u1': U1Gate,
'x': XGate, 'h': HGate, 'u3': U3Gate}
if qregs:
circuit = QuantumCircuit(*qregs, name=name)
else:
circuit = QuantumCircuit(tweedledum_circuit['num_qubits'], name=name)
for gate in tweedledum_circuit['gates']:
basegate = gates.get(gate['gate'].lower())
if basegate is None:
raise OracleCompilerError('The Tweedledum gate %s has no Qiskit equivalent'
% gate['gate'])
ctrl_qubits = gate.get('control_qubits', [])
trgt_qubits = gate.get('qubits', [])
if ctrl_qubits:
gate = basegate().control(len(ctrl_qubits), ctrl_state=gate.get('control_state'))
else:
gate = basegate()
circuit.append(gate, ctrl_qubits + trgt_qubits)
return circuit
|
def tweedledum2qiskit(tweedledum_circuit, name=None, qregs=None):
""" Converts a Tweedledum circuit into a Qiskit circuit. A Tweedledum circuit is a
dictionary with the following shape:
{
"num_qubits": 2,
"gates": [{
"gate": "X",
"qubits": [1],
"control_qubits": [0],
"control_state": "1"
}]
Args:
tweedledum_circuit (dict): Tweedledum circuit.
name (str): NName for the resulting Qiskit circuit.
qregs (list(QuantumRegister)): Optional. List of QuantumRegisters on which the
circuit would operate. If not provided, it will create a flat register.
Returns:
QuantumCircuit: The Tweedledum circuit converted to a Qiskit circuit.
Raises:
OracleCompilerError: If there a gate in the Tweedledum circuit has no Qiskit equivalent.
"""
gates = {'z': ZGate, 't': TGate, 's': SGate, 'tdg': TdgGate, 'sdg': SdgGate, 'u1': U1Gate,
'x': XGate, 'h': HGate, 'u3': U3Gate}
if qregs:
circuit = QuantumCircuit(*qregs, name=name)
else:
circuit = QuantumCircuit(tweedledum_circuit['num_qubits'], name=name)
for gate in tweedledum_circuit['gates']:
basegate = gates.get(gate['gate'].lower())
if basegate is None:
raise OracleCompilerError('The Tweedledum gate %s has no Qiskit equivalent'
% gate['gate'])
ctrl_qubits = gate.get('control_qubits', [])
trgt_qubits = gate.get('qubits', [])
if ctrl_qubits:
gate = basegate().control(len(ctrl_qubits), ctrl_state=gate.get('control_state'))
else:
gate = basegate()
circuit.append(gate, ctrl_qubits + trgt_qubits)
return circuit
|
6,032 |
def get_action_enabled(action, model_view):
"""Helper funciton to return if a tool is enabled.
Parameters
----------
action : Action
model_view : ModelView
"""
context = model_view.trait_get()
context.update(model_view.trait_context())
return eval(
compile(action.enabled_when, "<string>", "eval"),
{},
context,
)
|
def get_action_enabled(action, model_view):
"""Helper function to return if a tool is enabled.
Parameters
----------
action : Action
model_view : ModelView
"""
context = model_view.trait_get()
context.update(model_view.trait_context())
return eval(
compile(action.enabled_when, "<string>", "eval"),
{},
context,
)
|
8,517 |
def update_config():
''' This code needs to be run when the config is changed to take those
changes into account. It is called whenever a plugin is loaded as the
plugin might have changed the config values (for instance it might
change ckan.site_url) '''
config_declaration.setup()
config_declaration.make_safe(config)
config_declaration.normalize(config)
webassets_init()
for plugin in p.PluginImplementations(p.IConfigurer):
# must do update in place as this does not work:
# config = plugin.update_config(config)
plugin.update_config(config)
# Set whitelisted env vars on config object
# This is set up before globals are initialized
ckan_db = os.environ.get('CKAN_DB', None)
if ckan_db:
msg = 'Setting CKAN_DB as an env var is deprecated and will be' \
' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'
log.warn(msg)
config['sqlalchemy.url'] = ckan_db
for option in CONFIG_FROM_ENV_VARS:
from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
if from_env:
config[option] = from_env
if config.get_value("config.mode") == "strict":
_, errors = config_declaration.validate(config)
if errors:
msg = "\n".join(
"{}: {}".format(key, "; ".join(issues))
for key, issues in errors.items()
)
raise CkanConfigurationException(msg)
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
site_url = config.get_value('ckan.site_url')
if not site_url:
raise RuntimeError(
'ckan.site_url is not configured and it must have a value.'
' Please amend your .ini file.')
if not site_url.lower().startswith('http'):
raise RuntimeError(
'ckan.site_url should be a full URL, including the schema '
'(http or https)')
# Remove backslash from site_url if present
config['ckan.site_url'] = site_url.rstrip('/')
display_timezone = config.get_value('ckan.display_timezone')
if (display_timezone and
display_timezone != 'server' and
display_timezone not in pytz.all_timezones):
raise CkanConfigurationException(
"ckan.display_timezone is not 'server' or a valid timezone"
)
# Init SOLR settings and check if the schema is compatible
# from ckan.lib.search import SolrSettings, check_solr_schema_version
# lib.search is imported here as we need the config enabled and parsed
search.SolrSettings.init(config.get_value('solr_url'),
config.get_value('solr_user'),
config.get_value('solr_password'))
search.check_solr_schema_version()
lib_plugins.reset_package_plugins()
lib_plugins.register_package_plugins()
lib_plugins.reset_group_plugins()
lib_plugins.register_group_plugins()
# initialise the globals
app_globals.app_globals._init()
helpers.load_plugin_helpers()
# Templates and CSS loading from configuration
valid_base_templates_folder_names = ['templates', 'templates-bs3']
templates = config.get('ckan.base_templates_folder', 'templates')
config['ckan.base_templates_folder'] = templates
if templates not in valid_base_templates_folder_names:
raise CkanConfigurationException(
'You provided an invalid value for ckan.base_templates_folder. '
'Possible values are: "templates-bs3".'
)
jinja2_templates_path = os.path.join(root, templates)
log.info('Loading templates from %s' % jinja2_templates_path)
template_paths = [jinja2_templates_path]
extra_template_paths = config.get_value('extra_template_paths')
if extra_template_paths:
# must be first for them to override defaults
template_paths = extra_template_paths.split(',') + template_paths
config['computed_template_paths'] = template_paths
# Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
# to eliminate database errors due to stale pooled connections
config.setdefault('sqlalchemy.pool_pre_ping', True)
# Initialize SQLAlchemy
engine = sqlalchemy.engine_from_config(config)
model.init_model(engine)
for plugin in p.PluginImplementations(p.IConfigurable):
plugin.configure(config)
# clear other caches
logic.clear_actions_cache()
logic.clear_validators_cache()
authz.clear_auth_functions_cache()
# Here we create the site user if they are not already in the database
try:
logic.get_action('get_site_user')({'ignore_auth': True}, None)
except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
# The database is not yet initialised. It happens in `ckan db init`
pass
except sqlalchemy.exc.IntegrityError:
# Race condition, user already exists.
pass
# Close current session and open database connections to ensure a clean
# clean environment even if an error occurs later on
model.Session.remove()
model.Session.bind.dispose()
|
def update_config():
''' This code needs to be run when the config is changed to take those
changes into account. It is called whenever a plugin is loaded as the
plugin might have changed the config values (for instance it might
change ckan.site_url) '''
config_declaration.setup()
config_declaration.make_safe(config)
config_declaration.normalize(config)
webassets_init()
for plugin in p.PluginImplementations(p.IConfigurer):
# must do update in place as this does not work:
# config = plugin.update_config(config)
plugin.update_config(config)
# Set whitelisted env vars on config object
# This is set up before globals are initialized
ckan_db = os.environ.get('CKAN_DB', None)
if ckan_db:
msg = 'Setting CKAN_DB as an env var is deprecated and will be' \
' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'
log.warn(msg)
config['sqlalchemy.url'] = ckan_db
for option in CONFIG_FROM_ENV_VARS:
from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
if from_env:
config[option] = from_env
if config.get_value("config.mode") == "strict":
_, errors = config_declaration.validate(config)
if errors:
msg = "\n".join(
"{}: {}".format(key, "; ".join(issues))
for key, issues in errors.items()
)
raise CkanConfigurationException(msg)
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
site_url = config.get_value('ckan.site_url')
if not site_url:
raise RuntimeError(
'ckan.site_url is not configured and it must have a value.'
' Please amend your .ini file.')
if not site_url.lower().startswith('http'):
raise RuntimeError(
'ckan.site_url should be a full URL, including the schema '
'(http or https)')
# Remove backslash from site_url if present
config['ckan.site_url'] = site_url.rstrip('/')
display_timezone = config.get_value('ckan.display_timezone')
if (display_timezone and
display_timezone != 'server' and
display_timezone not in pytz.all_timezones):
raise CkanConfigurationException(
"ckan.display_timezone is not 'server' or a valid timezone"
)
# Init SOLR settings and check if the schema is compatible
# from ckan.lib.search import SolrSettings, check_solr_schema_version
# lib.search is imported here as we need the config enabled and parsed
search.SolrSettings.init(config.get_value('solr_url'),
config.get_value('solr_user'),
config.get_value('solr_password'))
search.check_solr_schema_version()
lib_plugins.reset_package_plugins()
lib_plugins.register_package_plugins()
lib_plugins.reset_group_plugins()
lib_plugins.register_group_plugins()
# initialise the globals
app_globals.app_globals._init()
helpers.load_plugin_helpers()
# Templates and CSS loading from configuration
valid_base_templates_folder_names = ['templates', 'templates-bs3']
templates = config.get('ckan.base_templates_folder', 'templates')
config['ckan.base_templates_folder'] = templates
if templates not in valid_base_templates_folder_names:
raise CkanConfigurationException(
'You provided an invalid value for ckan.base_templates_folder. '
'Possible values are: "templates" and "templates-bs3".'
)
jinja2_templates_path = os.path.join(root, templates)
log.info('Loading templates from %s' % jinja2_templates_path)
template_paths = [jinja2_templates_path]
extra_template_paths = config.get_value('extra_template_paths')
if extra_template_paths:
# must be first for them to override defaults
template_paths = extra_template_paths.split(',') + template_paths
config['computed_template_paths'] = template_paths
# Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
# to eliminate database errors due to stale pooled connections
config.setdefault('sqlalchemy.pool_pre_ping', True)
# Initialize SQLAlchemy
engine = sqlalchemy.engine_from_config(config)
model.init_model(engine)
for plugin in p.PluginImplementations(p.IConfigurable):
plugin.configure(config)
# clear other caches
logic.clear_actions_cache()
logic.clear_validators_cache()
authz.clear_auth_functions_cache()
# Here we create the site user if they are not already in the database
try:
logic.get_action('get_site_user')({'ignore_auth': True}, None)
except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
# The database is not yet initialised. It happens in `ckan db init`
pass
except sqlalchemy.exc.IntegrityError:
# Race condition, user already exists.
pass
# Close current session and open database connections to ensure a clean
# clean environment even if an error occurs later on
model.Session.remove()
model.Session.bind.dispose()
|
32,283 |
def panorama_query_logs_command(args: dict):
"""
Query logs
"""
log_type = args.get('log-type')
number_of_logs = args.get('number_of_logs')
query = args.get('query')
address_src = args.get('addr-src')
address_dst = args.get('addr-dst')
ip_ = args.get('ip')
zone_src = args.get('zone-src')
zone_dst = args.get('zone-dst')
time_generated = args.get('time-generated')
action = args.get('action')
port_dst = args.get('port-dst')
rule = args.get('rule')
filedigest = args.get('filedigest')
url = args.get('url')
use_polling = args.get('polling', 'false') == 'true'
job_id = args.get('job_id')
cmd = demisto.command()
interval_in_seconds = int(args.get('interval_in_seconds', 60))
timeout = int(args.get('timeout', 600))
script_results = []
if query and (address_src or address_dst or zone_src or zone_dst
or time_generated or action or port_dst or rule or url or filedigest):
raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.')
if use_polling:
ScheduledCommand.raise_error_if_not_supported()
if not job_id:
# create new search
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
job_id = result['response']['result']['job']
polling_args = {
'job_id': job_id,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
readable_output = f"Panorama log query search created successfully (Job ID: {job_id})"
script_results.append(CommandResults(
readable_output=readable_output,
scheduled_command=scheduled_command
))
else:
result = panorama_get_traffic_logs(job_id)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
if result['response']['result']['job']['status'] != "FIN":
polling_args = {
'job_id': job_id,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
script_results.append(CommandResults(
scheduled_command=scheduled_command
))
else:
result = panorama_get_traffic_logs(job_id)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
query_logs_output = {
'JobID': job_id,
'Status': 'Complete'
}
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \
or 'status' not in result['response']['result']['job']:
raise Exception('Missing JobID status in response.')
if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][
'result'] \
or 'logs' not in result['response']['result']['log']:
raise Exception('Missing logs in response.')
logs = result['response']['result']['log']['logs']
if logs['@count'] == '0':
human_readable = f'No {log_type} logs matched the query.'
else:
pretty_logs = prettify_logs(logs['entry'])
query_logs_output['Logs'] = pretty_logs
human_readable = tableToMarkdown(f'Query {log_type} Logs:', query_logs_output['Logs'],
['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application',
'Action', 'Rule', 'URLOrFilename'], removeNull=True)
script_results.append(CommandResults(
outputs_prefix='Panorama.Monitor',
outputs_key_field='JobID',
outputs=result,
readable_output=human_readable,
ignore_auto_extract=True))
else:
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
raise Exception(f"Query logs failed. Reason is: {result['response']['msg']['line']}")
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
query_logs_output = {
'JobID': result['response']['result']['job'],
'Status': 'Pending',
'LogType': log_type,
'Message': result['response']['result']['msg']['line']
}
script_results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True),
'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output}
})
return_results(script_results)
|
def panorama_query_logs_command(args: dict):
"""
Query logs
"""
log_type = args.get('log-type')
number_of_logs = args.get('number_of_logs')
query = args.get('query')
address_src = args.get('addr-src')
address_dst = args.get('addr-dst')
ip_ = args.get('ip')
zone_src = args.get('zone-src')
zone_dst = args.get('zone-dst')
time_generated = args.get('time-generated')
action = args.get('action')
port_dst = args.get('port-dst')
rule = args.get('rule')
filedigest = args.get('filedigest')
url = args.get('url')
use_polling = args.get('polling', 'false') == 'true'
job_id = args.get('job_id')
cmd = demisto.command()
interval_in_seconds = int(args.get('interval_in_seconds', 60))
timeout = int(args.get('timeout', 600))
script_results = []
if query and (address_src or address_dst or zone_src or zone_dst
or time_generated or action or port_dst or rule or url or filedigest):
raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.')
if use_polling:
ScheduledCommand.raise_error_if_not_supported()
if not job_id:
# create new search
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
job_id = result['response']['result']['job']
polling_args = {
'job_id': job_id,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
readable_output = f"Panorama log query search created successfully (Job ID: {job_id})"
script_results.append(CommandResults(
readable_output=readable_output,
scheduled_command=scheduled_command
))
else:
result = panorama_get_traffic_logs(job_id)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception(f'Query logs failed {message}')
else:
raise Exception('Query logs failed.')
if result['response']['result']['job']['status'] != "FIN":
polling_args = {
'job_id': job_id,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
script_results.append(CommandResults(
scheduled_command=scheduled_command
))
else:
result = panorama_get_traffic_logs(job_id)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
query_logs_output = {
'JobID': job_id,
'Status': 'Complete'
}
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \
or 'status' not in result['response']['result']['job']:
raise Exception('Missing JobID status in response.')
if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][
'result'] \
or 'logs' not in result['response']['result']['log']:
raise Exception('Missing logs in response.')
logs = result['response']['result']['log']['logs']
if logs['@count'] == '0':
human_readable = f'No {log_type} logs matched the query.'
else:
pretty_logs = prettify_logs(logs['entry'])
query_logs_output['Logs'] = pretty_logs
human_readable = tableToMarkdown(f'Query {log_type} Logs:', query_logs_output['Logs'],
['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application',
'Action', 'Rule', 'URLOrFilename'], removeNull=True)
script_results.append(CommandResults(
outputs_prefix='Panorama.Monitor',
outputs_key_field='JobID',
outputs=result,
readable_output=human_readable,
ignore_auto_extract=True))
else:
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
raise Exception(f"Query logs failed. Reason is: {result['response']['msg']['line']}")
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
query_logs_output = {
'JobID': result['response']['result']['job'],
'Status': 'Pending',
'LogType': log_type,
'Message': result['response']['result']['msg']['line']
}
script_results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True),
'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output}
})
return_results(script_results)
|
16,926 |
def test_setup_without_migraton(hass_recorder):
"""Verify the schema version without a migration."""
hass = hass_recorder()
assert recorder.get_instance(hass).schema_version == SCHEMA_VERSION
|
def test_setup_without_migration(hass_recorder):
"""Verify the schema version without a migration."""
hass = hass_recorder()
assert recorder.get_instance(hass).schema_version == SCHEMA_VERSION
|
38,025 |
def data_kind(data, x=None, y=None, z=None, required_z=False):
"""
Check what kind of data is provided to a module.
Possible types:
* a file name provided as 'data'
* an xarray.DataArray provided as 'data'
* a matrix provided as 'data'
* 1D arrays x and y (and z, optionally)
Arguments should be ``None`` if not used. If doesn't fit any of these
categories (or fits more than one), will raise an exception.
Parameters
----------
data : str or xarray.DataArray or {table-like} or None
Pass in either a file name to an ASCII data table, an
:class:`xarray.DataArray`, a 1D/2D
{table-classes}.
x/y : 1d arrays or None
x and y columns as numpy arrays.
z : 1d array or None
z column as numpy array. To be used optionally when x and y
are given.
Returns
-------
kind : str
One of: ``'file'``, ``'grid'``, ``'matrix'``, ``'vectors'``.
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> data_kind(data=None, x=np.array([1, 2, 3]), y=np.array([4, 5, 6]))
'vectors'
>>> data_kind(data=np.arange(10).reshape((5, 2)), x=None, y=None)
'matrix'
>>> data_kind(data="my-data-file.txt", x=None, y=None)
'file'
>>> data_kind(data=xr.DataArray(np.random.rand(4, 3)))
'grid'
"""
if data is None and x is None and y is None:
raise GMTInvalidInput("No input data provided.")
if data is not None and (x is not None or y is not None or z is not None):
raise GMTInvalidInput("Too much data. Use either data or x and y.")
if data is None and (x is None or y is None):
raise GMTInvalidInput("Must provided both x and y.")
if data is None and required_z and (x is None or y is None or z is None):
raise GMTInvalidInput("Must provided both x, y, and z.")
if isinstance(data, str):
kind = "file"
elif isinstance(data, xr.DataArray):
kind = "grid"
elif hasattr(data, "__geo_interface__"):
kind = "geojson"
elif data is not None:
kind = "matrix"
else:
kind = "vectors"
return kind
|
def data_kind(data, x=None, y=None, z=None, required_z=False):
"""
Check what kind of data is provided to a module.
Possible types:
* a file name provided as 'data'
* an xarray.DataArray provided as 'data'
* a matrix provided as 'data'
* 1D arrays x and y (and z, optionally)
Arguments should be ``None`` if not used. If doesn't fit any of these
categories (or fits more than one), will raise an exception.
Parameters
----------
data : str or xarray.DataArray or {table-like} or None
Pass in either a file name to an ASCII data table, an
:class:`xarray.DataArray`, a 1D/2D
{table-classes}.
x/y : 1d arrays or None
x and y columns as numpy arrays.
z : 1d array or None
z column as numpy array. To be used optionally when x and y
are given.
Returns
-------
kind : str
One of: ``'file'``, ``'grid'``, ``'matrix'``, ``'vectors'``.
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> data_kind(data=None, x=np.array([1, 2, 3]), y=np.array([4, 5, 6]))
'vectors'
>>> data_kind(data=np.arange(10).reshape((5, 2)), x=None, y=None)
'matrix'
>>> data_kind(data="my-data-file.txt", x=None, y=None)
'file'
>>> data_kind(data=xr.DataArray(np.random.rand(4, 3)))
'grid'
"""
if data is None and x is None and y is None:
raise GMTInvalidInput("No input data provided.")
if data is not None and (x is not None or y is not None or z is not None):
raise GMTInvalidInput("Too much data. Use either data or x and y.")
if data is None and (x is None or y is None):
raise GMTInvalidInput("Must provided both x and y.")
if data is None and required_z and (x is None or y is None or z is None):
raise GMTInvalidInput("Must provide x, y, and z.")
if isinstance(data, str):
kind = "file"
elif isinstance(data, xr.DataArray):
kind = "grid"
elif hasattr(data, "__geo_interface__"):
kind = "geojson"
elif data is not None:
kind = "matrix"
else:
kind = "vectors"
return kind
|
4,300 |
def parse_nedf_header(filename):
"""
Read the header information from the first 10kB of an .nedf file
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
info : dict
A dictionary with information from the header
dt : numpy.dtype
structure of the binary EEG+accelerometer+trigger data in the file
"""
info = {}
# nedf files have some accelerometer channels sampled at 100Hz and
# several other channels sampled at 500Hz.
# The layout is
# (100HzCh1S1, 100HzCh2S1, 100HzCh3S1),
# ((500HzCh1S1, 500HzCh2S1, …, 500HzChnS1),…,
# (500HzCh1S2, 500HzCh2S2, …, 500HzChnS2), …
# (500HzCh1S5, 500HzCh2S5, …, 500HzChnS5)),
# (100HzCh1S2, 100HzCh2S2, 100HzCh3S2) and so on
# dtype for the binary data block
dt = []
# dtype for a single EEG sample
datadt = []
with open(filename, 'rb') as f:
header = f.read(10240)
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
print('Unexpected NEDFversion, hope this works anyway')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(eegset.find('TotalNumberOfChannels').text)
info['nchan'] = nchantotal
info['sfreq'] = int(eegset.find('EEGSamplingRate').text)
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
info['units'] = eegset.find('EEGUnits')
if headerxml.find('STIMSettings'):
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
if 'AdditionalChannelStatus' in headerxml:
raise RuntimeError('Unexpected AdditionalChannelStatus')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
print('Found Starstim, not sure how to handle this')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', '')
info['meas_date'] = datetime.datetime.utcfromtimestamp(int(date) / 1000)
return info, np.dtype(dt)
|
def parse_nedf_header(filename):
"""
Read the header information from the first 10kB of an .nedf file
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
info : dict
A dictionary with information from the header
dt : numpy.dtype
structure of the binary EEG+accelerometer+trigger data in the file
"""
info = {}
# nedf files have some accelerometer channels sampled at 100Hz and
# several other channels sampled at 500Hz.
# The layout is
# (100HzCh1S1, 100HzCh2S1, 100HzCh3S1),
# ((500HzCh1S1, 500HzCh2S1, …, 500HzChnS1),…,
# (500HzCh1S2, 500HzCh2S2, …, 500HzChnS2), …
# (500HzCh1S5, 500HzCh2S5, …, 500HzChnS5)),
# (100HzCh1S2, 100HzCh2S2, 100HzCh3S2) and so on
# dtype for the binary data block
dt = []
# dtype for a single EEG sample
datadt = []
with open(filename, 'rb') as f:
header = f.read(10240)
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
print('Unexpected NEDFversion, hope this works anyway')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(eegset.find('TotalNumberOfChannels').text)
info['nchan'] = nchantotal
info['sfreq'] = int(eegset.find('EEGSamplingRate').text)
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
info['units'] = eegset.find('EEGUnits')
if headerxml.find('STIMSettings'):
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
if 'AdditionalChannelStatus' in headerxml:
raise RuntimeError('Unexpected AdditionalChannelStatus')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
print('Found Starstim, not sure how to handle this')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', '')
info['meas_date'] = datetime.datetime.utcfromtimestamp(int(date) / 1000)
return info, np.dtype(dt)
|
31,227 |
def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""Get Connector Runs command.
Args:
client: Client which connects to api
Returns:
Human Readable
Entry Context
Raw Data
"""
connector_id = demisto.getArg("connector_id")
url_suffix = '/connectors/%s/connector_runs' % connector_id
human_readable = []
context: Dict[str, Any] = {}
connectors: List[Dict[str, Any]] = client.http_request(message='GET', suffix=url_suffix)
if connectors:
keys = [
"id", "start_time",
"end_time", "success",
"total_payload_count",
"processed_palyoad_count",
"failed_payload_count",
"processed_assets_count",
"assets_with_tags_reset_count",
"processed_scanner_vuln_count",
"created_scanner_vuln_count",
"closed_scanner_vuln_count",
"autoclosed_scanner_vuln_count",
"reopened_scanner_vuln_count",
"closed_vuln_count",
"autoclosed_vuln_count",
"reopened_vuln_count"
]
context_list = parse_response(connectors, keys, keys)
for connector in connectors:
curr_dict = {
"id": connector.get("id"),
"start_time": connector.get("start_time"),
"end_time": connector.get("end_time"),
"success": connector.get("success"),
"total_payload_count": connector.get("total_payload_count"),
"processed_payload_count": connector.get("total_payload_count"),
"failed_payload_count": connector.get("failed_payload_count"),
"processed_assets_count": connector.get("processed_assets_count"),
"assets_with_tags_reset_count": connector.get("assets_with_tags_reset_count"),
"processed_scanner_vuln_count": connector.get("processed_scanner_vuln_count"),
"updated_scanner_vuln_count": connector.get("updated_scanner_vuln_count"),
"created_scanner_vuln_count": connector.get("created_scanner_vuln_count"),
"closed_scanner_vuln_count": connector.get("closed_scanner_vuln_count"),
"autoclosed_scanner_vuln_count": connector.get("autoclosed_scanner_vuln_count"),
"reopened_scanner_vuln_count": connector.get("reopened_scanner_vuln_count"),
"closed_vuln_count": connector.get("closed_vuln_count"),
"autoclosed_vuln_count": connector.get("closed_vuln_count"),
"reopened_vuln_count": connector.get("reopened_vuln_count")
}
human_readable.append(curr_dict)
context = {
'Kenna.ConnectorRunsList(val.ID === obj.ID)': context_list
}
human_readable_markdown = tableToMarkdown('Kenna Connector Runs', human_readable, removeNull=True)
else:
human_readable_markdown = "no connectors in get response."
return human_readable_markdown, context, connectors
|
def get_connector_runs(client: Client, args: dict) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""Get Connector Runs command.
Args:
client: Client which connects to api
Returns:
Human Readable
Entry Context
Raw Data
"""
connector_id = demisto.getArg("connector_id")
url_suffix = '/connectors/%s/connector_runs' % connector_id
human_readable = []
context: Dict[str, Any] = {}
connectors: List[Dict[str, Any]] = client.http_request(message='GET', suffix=url_suffix)
if connectors:
keys = [
"id", "start_time",
"end_time", "success",
"total_payload_count",
"processed_palyoad_count",
"failed_payload_count",
"processed_assets_count",
"assets_with_tags_reset_count",
"processed_scanner_vuln_count",
"created_scanner_vuln_count",
"closed_scanner_vuln_count",
"autoclosed_scanner_vuln_count",
"reopened_scanner_vuln_count",
"closed_vuln_count",
"autoclosed_vuln_count",
"reopened_vuln_count"
]
context_list = parse_response(connectors, keys, keys)
for connector in connectors:
curr_dict = {
"id": connector.get("id"),
"start_time": connector.get("start_time"),
"end_time": connector.get("end_time"),
"success": connector.get("success"),
"total_payload_count": connector.get("total_payload_count"),
"processed_payload_count": connector.get("total_payload_count"),
"failed_payload_count": connector.get("failed_payload_count"),
"processed_assets_count": connector.get("processed_assets_count"),
"assets_with_tags_reset_count": connector.get("assets_with_tags_reset_count"),
"processed_scanner_vuln_count": connector.get("processed_scanner_vuln_count"),
"updated_scanner_vuln_count": connector.get("updated_scanner_vuln_count"),
"created_scanner_vuln_count": connector.get("created_scanner_vuln_count"),
"closed_scanner_vuln_count": connector.get("closed_scanner_vuln_count"),
"autoclosed_scanner_vuln_count": connector.get("autoclosed_scanner_vuln_count"),
"reopened_scanner_vuln_count": connector.get("reopened_scanner_vuln_count"),
"closed_vuln_count": connector.get("closed_vuln_count"),
"autoclosed_vuln_count": connector.get("closed_vuln_count"),
"reopened_vuln_count": connector.get("reopened_vuln_count")
}
human_readable.append(curr_dict)
context = {
'Kenna.ConnectorRunsList(val.ID === obj.ID)': context_list
}
human_readable_markdown = tableToMarkdown('Kenna Connector Runs', human_readable, removeNull=True)
else:
human_readable_markdown = "no connectors in get response."
return human_readable_markdown, context, connectors
|
43,687 |
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that assigns
lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds
to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2]
..UsageDetails::
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the `edge_driver`
method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example,
given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`,
the `edge_driver` method will output the following Hamiltonian:
..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too
must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same.
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that assigns
lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds
to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2]
..UsageDetails::
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`|01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the `edge_driver`
method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example,
given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`,
the `edge_driver` method will output the following Hamiltonian:
..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too
must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same.
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
6,197 |
def gitlabSetup(GITLABTOKEN):
"""Import the GITLAB Token and add proper header."""
LOGGER.info('Setting up GitLab')
if not GITLABTOKEN:
try:
from GitTokens import GITLABTOKEN
except ImportError:
raise ImportError(G_ERROR)
if GITLABTOKEN:
SESSION.headers.update({'PRIVATE-TOKEN': GITLABTOKEN})
|
def gitlabSetup(GITLABTOKEN=''):
"""Import the GITLAB Token and add proper header."""
LOGGER.info('Setting up GitLab')
if not GITLABTOKEN:
try:
from GitTokens import GITLABTOKEN
except ImportError:
raise ImportError(G_ERROR)
if GITLABTOKEN:
SESSION.headers.update({'PRIVATE-TOKEN': GITLABTOKEN})
|
54,299 |
def test_combine_merge_channels(tmpdir, script_runner):
temp_1 = tmpdir.join("parsed_output.json")
temp_2 = tmpdir.join("renamed_output.json")
command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1.strpath} --hide-progress'
ret = script_runner.run(*shlex.split(command))
command = (
f'pyhf prune {temp_1.strpath} --sample signal --output-file {temp_2.strpath}'
)
ret = script_runner.run(*shlex.split(command))
command = f'pyhf combine --merge-channels --join "left outer" {temp_1.strpath} {temp_2.strpath}'
ret = script_runner.run(*shlex.split(command))
assert ret.success
|
def test_combine_merge_channels(tmpdir, script_runner):
temp_1 = tmpdir.join("parsed_output.json")
temp_2 = tmpdir.join("renamed_output.json")
command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1.strpath} --hide-progress'
ret = script_runner.run(*shlex.split(command))
assert ret.success
command = (
f'pyhf prune {temp_1.strpath} --sample signal --output-file {temp_2.strpath}'
)
ret = script_runner.run(*shlex.split(command))
assert ret.success
command = f'pyhf combine --merge-channels --join "left outer" {temp_1.strpath} {temp_2.strpath}'
ret = script_runner.run(*shlex.split(command))
assert ret.success
|
34,711 |
def signal_handler(sig: int, frame: Any) -> NoReturn:
print("Goodbye 👋")
sys.exit(0)
|
def signal_handler(sig: int, frame: types.FrameType) -> NoReturn:
print("Goodbye 👋")
sys.exit(0)
|
5,374 |
def test_present():
"""
Test to verify that the specified host is known by the specified user.
"""
name = "github.com"
user = "root"
key = "16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48"
fingerprint = [key]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
with patch.dict(ssh_known_hosts.__opts__, {"test": True}):
with patch.object(os.path, "isabs", MagicMock(return_value=False)):
comt = 'If not specifying a "user", ' 'specify an absolute "config".'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name) == ret
comt = 'Specify either "key" or "fingerprint", not both.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key, fingerprint=[key]) == ret
comt = 'Required argument "enc" if using "key" argument.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key) == ret
mock = MagicMock(side_effect=["exists", "add", "update"])
with patch.dict(ssh_known_hosts.__salt__, {"ssh.check_known_host": mock}):
comt = "Host github.com is already in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be" " added to .ssh/known_hosts"
ret.update({"comment": comt, "result": None})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be " "updated in .ssh/known_hosts"
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
with patch.dict(ssh_known_hosts.__opts__, {"test": False}):
result = {"status": "exists", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "github.com already exists in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
result = {"status": "error", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
ret.update({"comment": "", "result": False})
assert ssh_known_hosts.present(name, user) == ret
result = {
"status": "updated",
"error": "",
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "{}'s key saved to .ssh/known_hosts (key: {})".format(name, key)
ret.update(
{
"comment": comt,
"result": True,
"changes": {
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
},
}
)
assert ssh_known_hosts.present(name, user, key=key) == ret
comt = "{}'s key saved to .ssh/known_hosts (fingerprint: {})".format(
name, fingerprint
)
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
|
def test_present():
"""
Test to verify that the specified host is known by the specified user.
"""
name = "github.com"
user = "root"
key = "16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48"
fingerprint = [key]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
with patch.dict(ssh_known_hosts.__opts__, {"test": True}):
with patch.object(os.path, "isabs", MagicMock(return_value=False)):
comt = 'If not specifying a "user", specify an absolute "config".'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name) == ret
comt = 'Specify either "key" or "fingerprint", not both.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key, fingerprint=[key]) == ret
comt = 'Required argument "enc" if using "key" argument.'
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user, key=key) == ret
mock = MagicMock(side_effect=["exists", "add", "update"])
with patch.dict(ssh_known_hosts.__salt__, {"ssh.check_known_host": mock}):
comt = "Host github.com is already in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be" " added to .ssh/known_hosts"
ret.update({"comment": comt, "result": None})
assert ssh_known_hosts.present(name, user) == ret
comt = "Key for github.com is set to be " "updated in .ssh/known_hosts"
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
with patch.dict(ssh_known_hosts.__opts__, {"test": False}):
result = {"status": "exists", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "github.com already exists in .ssh/known_hosts"
ret.update({"comment": comt, "result": True})
assert ssh_known_hosts.present(name, user) == ret
result = {"status": "error", "error": ""}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
ret.update({"comment": "", "result": False})
assert ssh_known_hosts.present(name, user) == ret
result = {
"status": "updated",
"error": "",
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
}
mock = MagicMock(return_value=result)
with patch.dict(ssh_known_hosts.__salt__, {"ssh.set_known_host": mock}):
comt = "{}'s key saved to .ssh/known_hosts (key: {})".format(name, key)
ret.update(
{
"comment": comt,
"result": True,
"changes": {
"new": [{"fingerprint": fingerprint, "key": key}],
"old": "",
},
}
)
assert ssh_known_hosts.present(name, user, key=key) == ret
comt = "{}'s key saved to .ssh/known_hosts (fingerprint: {})".format(
name, fingerprint
)
ret.update({"comment": comt})
assert ssh_known_hosts.present(name, user) == ret
|
44,497 |
def generate_code_to_edit_component_yaml(
component_ref: ComponentReference, component_store: ComponentStore = None,
):
'''Generates code to edit the component YAML text.
This function can work with components loaded from URLs, files or text.
'''
component_spec = component_ref.spec
if not component_spec:
if not component_store:
component_store = ComponentStore.default_store
component_ref = component_store._load_component_spec_in_component_ref(component_ref)
component_spec = component_ref.spec
component_data = component_spec._data
local_path = getattr(component_spec, '_local_path', None)
if not local_path:
if component_ref.url:
# generating local path from the URL
# Removing schema
local_path = component_ref.url[component_ref.url.index('://') + 3:]
# Cleaning path parts
path_parts = [
part for part in local_path.split('/') if part not in ['.', '..', '']
]
# Joining path parts together
local_path = os.path.join(
'.', *[re.sub(r'[^-\w\s.]', '_', part) for part in path_parts]
)
else:
local_path = os.path.join(component_spec._digest, 'component.yaml')
if not local_path.endswith('.yaml'):
warnings.warn(
'The component file does not have the ".yaml" extension: "{}".'.format(local_path)
)
yaml = component_data.decode('utf-8')
quotes = "'''"
if quotes in yaml:
quotes = '"""'
if quotes in yaml:
raise NotImplementedError('Editing components that use both triple single quotes and triple double quotes is not supported.')
editing_code = r"""# ! This code writes to a local file. Inspect the path carefully before running.
# This code writes the edited component to a file and loads it back.
# When satisfied with the edited component, upload it online to share.
import kfp
from pathlib import Path
component_path = '{component_path}'
Path(component_path).parent.mkdir(parents=True, exist_ok=True)
Path(component_path).write_text(r{quotes}
{yaml}{quotes}.lstrip('\n'))
my_op = kfp.components.load_component_from_file('{component_path}')
""".format(
yaml=yaml,
component_path=local_path,
quotes=quotes,
)
return editing_code
|
def generate_code_to_edit_component_yaml(
component_ref: ComponentReference, component_store: Optional[ComponentStore] = None,
):
'''Generates code to edit the component YAML text.
This function can work with components loaded from URLs, files or text.
'''
component_spec = component_ref.spec
if not component_spec:
if not component_store:
component_store = ComponentStore.default_store
component_ref = component_store._load_component_spec_in_component_ref(component_ref)
component_spec = component_ref.spec
component_data = component_spec._data
local_path = getattr(component_spec, '_local_path', None)
if not local_path:
if component_ref.url:
# generating local path from the URL
# Removing schema
local_path = component_ref.url[component_ref.url.index('://') + 3:]
# Cleaning path parts
path_parts = [
part for part in local_path.split('/') if part not in ['.', '..', '']
]
# Joining path parts together
local_path = os.path.join(
'.', *[re.sub(r'[^-\w\s.]', '_', part) for part in path_parts]
)
else:
local_path = os.path.join(component_spec._digest, 'component.yaml')
if not local_path.endswith('.yaml'):
warnings.warn(
'The component file does not have the ".yaml" extension: "{}".'.format(local_path)
)
yaml = component_data.decode('utf-8')
quotes = "'''"
if quotes in yaml:
quotes = '"""'
if quotes in yaml:
raise NotImplementedError('Editing components that use both triple single quotes and triple double quotes is not supported.')
editing_code = r"""# ! This code writes to a local file. Inspect the path carefully before running.
# This code writes the edited component to a file and loads it back.
# When satisfied with the edited component, upload it online to share.
import kfp
from pathlib import Path
component_path = '{component_path}'
Path(component_path).parent.mkdir(parents=True, exist_ok=True)
Path(component_path).write_text(r{quotes}
{yaml}{quotes}.lstrip('\n'))
my_op = kfp.components.load_component_from_file('{component_path}')
""".format(
yaml=yaml,
component_path=local_path,
quotes=quotes,
)
return editing_code
|
34,702 |
def run(args: argparse.Namespace) -> "NoReturn":
"""Entrypoint for `rasa run`.
Args:
args: The CLI arguments.
"""
import rasa
args.endpoints = rasa.cli.utils.get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
args.credentials = rasa.cli.utils.get_validated_path(
args.credentials, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if args.enable_api:
if not args.remote_storage:
args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH)
rasa.run(**vars(args))
return
# if the API is not enable you cannot start without a model
# make sure either a model server, a remote storage, or a local model is
# configured
from rasa.model import get_model
from rasa.core.utils import AvailableEndpoints
# start server if remote storage is configured
if args.remote_storage is not None:
rasa.run(**vars(args))
return
# start server if model server is configured
endpoints = AvailableEndpoints.read_endpoints(args.endpoints)
model_server = endpoints.model if endpoints and endpoints.model else None
if model_server is not None:
rasa.run(**vars(args))
return
# start server if local model found
args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH)
local_model_set = True
try:
get_model(args.model)
except ModelNotFound:
local_model_set = False
if local_model_set:
rasa.run(**vars(args))
return
rasa.shared.utils.cli.print_error(
f"No model found. You have three options to provide a model:\n"
f"1. Configure a model server in the endpoint configuration and provide "
f"the configuration via '--endpoints'.\n"
f"2. Specify a remote storage via '--remote-storage' to load the model "
f"from.\n"
f"3. Train a model before running the server using `rasa train` and "
f"use '--model' to provide the model path.\n"
f"For more information check {DOCS_BASE_URL}/model-storage."
)
|
def run(args: argparse.Namespace) -> NoReturn:
"""Entrypoint for `rasa run`.
Args:
args: The CLI arguments.
"""
import rasa
args.endpoints = rasa.cli.utils.get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
args.credentials = rasa.cli.utils.get_validated_path(
args.credentials, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if args.enable_api:
if not args.remote_storage:
args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH)
rasa.run(**vars(args))
return
# if the API is not enable you cannot start without a model
# make sure either a model server, a remote storage, or a local model is
# configured
from rasa.model import get_model
from rasa.core.utils import AvailableEndpoints
# start server if remote storage is configured
if args.remote_storage is not None:
rasa.run(**vars(args))
return
# start server if model server is configured
endpoints = AvailableEndpoints.read_endpoints(args.endpoints)
model_server = endpoints.model if endpoints and endpoints.model else None
if model_server is not None:
rasa.run(**vars(args))
return
# start server if local model found
args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH)
local_model_set = True
try:
get_model(args.model)
except ModelNotFound:
local_model_set = False
if local_model_set:
rasa.run(**vars(args))
return
rasa.shared.utils.cli.print_error(
f"No model found. You have three options to provide a model:\n"
f"1. Configure a model server in the endpoint configuration and provide "
f"the configuration via '--endpoints'.\n"
f"2. Specify a remote storage via '--remote-storage' to load the model "
f"from.\n"
f"3. Train a model before running the server using `rasa train` and "
f"use '--model' to provide the model path.\n"
f"For more information check {DOCS_BASE_URL}/model-storage."
)
|
46,915 |
def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None:
"""Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict.
Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once!
Args:
pl_ckpt_path: (str) path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files.
If a directory is passed, all .ckpt files inside it will be averaged!
hf_src_model_dir: (str) path to a directory containing a correctly shaped checkpoint
save_path: (str) directory to save the new model
"""
hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir)
if os.path.isfile(pl_ckpt_path):
ckpt_files = [pl_ckpt_path]
else:
assert os.path.isdir(pl_ckpt_path)
ckpt_files = list(Path(pl_ckpt_path).glob("*.ckpt"))
assert ckpt_files, f"could not find any ckpt files inside the {pl_ckpt_path} directory"
if len(ckpt_files) > 1:
logger.info(f"averaging {ckpt_files}")
state_dicts = [sanitize(torch.load(x, map_location="cpu")["state_dict"]) for x in ckpt_files]
state_dict = average_state_dicts(state_dicts)
missing, unexpected = hf_model.load_state_dict(state_dict, strict=False)
assert not missing, f"missing keys: {missing}"
hf_model.save_pretrained(save_path)
try:
tok = AutoTokenizer.from_pretrained(hf_src_model_dir)
tok.save_pretrained(save_path)
except Exception:
pass
# dont copy tokenizer if cant
|
def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None:
"""Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict.
Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once!
Args:
pl_ckpt_path (:obj:`str`): Path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files.
If a directory is passed, all .ckpt files inside it will be averaged!
hf_src_model_dir: (str) path to a directory containing a correctly shaped checkpoint
save_path: (str) directory to save the new model
"""
hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir)
if os.path.isfile(pl_ckpt_path):
ckpt_files = [pl_ckpt_path]
else:
assert os.path.isdir(pl_ckpt_path)
ckpt_files = list(Path(pl_ckpt_path).glob("*.ckpt"))
assert ckpt_files, f"could not find any ckpt files inside the {pl_ckpt_path} directory"
if len(ckpt_files) > 1:
logger.info(f"averaging {ckpt_files}")
state_dicts = [sanitize(torch.load(x, map_location="cpu")["state_dict"]) for x in ckpt_files]
state_dict = average_state_dicts(state_dicts)
missing, unexpected = hf_model.load_state_dict(state_dict, strict=False)
assert not missing, f"missing keys: {missing}"
hf_model.save_pretrained(save_path)
try:
tok = AutoTokenizer.from_pretrained(hf_src_model_dir)
tok.save_pretrained(save_path)
except Exception:
pass
# dont copy tokenizer if cant
|
57,436 |
def generate_index_from_links(links: List[Link], with_headers: bool):
if with_headers:
output = main_index_template(links, True)
else:
output = main_index_template(links, True, MINIMAL_INDEX_TEMPLATE)
return output
|
def generate_index_from_links(links: List[Link], with_headers: bool):
if with_headers:
output = main_index_template(links)
else:
output = main_index_template(links, template=MINIMAL_INDEX_TEMPLATE)
|
31,083 |
def get_user_if_by_mail(client, email):
"""
Search user by email, if the user exists return the user id, else return ""
"""
user_id = ''
user_where = f"Email='{email}'"
res = client.search_user_profile(email, user_where)
if res.status_code == 200:
res_json = res.json()
search_records = res_json.get('searchRecords')
if len(search_records) > 0:
for search_record in search_records:
user_id = search_record.get('Id')
return user_id
|
def get_user_id_by_mail(client, email):
"""
Search user by email, if the user exists return the user id, else return ""
"""
user_id = ''
user_where = f"Email='{email}'"
res = client.search_user_profile(email, user_where)
if res.status_code == 200:
res_json = res.json()
search_records = res_json.get('searchRecords')
if len(search_records) > 0:
for search_record in search_records:
user_id = search_record.get('Id')
return user_id
|
32,540 |
def censys_view_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Returns host information for the specified IP address or structured certificate data for the specified SHA-256
"""
index = args.get('index', '')
query = args.get('query', '')
res = client.censys_view_request(index, query)
if index == 'ipv4':
result = res.get('result', {})
content = {
'Name': result.get('autonomous_system', {}).get('name'),
'Bgp Prefix': result.get('autonomous_system', {}).get('bgp_prefix'),
'ASN': result.get('autonomous_system', {}).get('asn'),
'Service': [{
'Port': service.get('port'),
'Service Name': service.get('service_name')
} for service in result.get('services', [])],
'Last Updated': result.get('last_updated_at')
}
city = result.get('location', {}).get('city')
province = result.get('location', {}).get('province')
postal = result.get('location', {}).get('postal_code')
country_code = result.get('location', {}).get('country_code')
country = result.get('location', {}).get('country')
description = ', '.join(filter(None, [city, province, postal, country_code]))
lat = result.get('location', {}).get('coordinates', {}).get('latitude')
lon = result.get('location', {}).get('coordinates', {}).get('longitude')
indicator = Common.IP(
ip=query,
dbot_score=Common.DBotScore(indicator=query,
indicator_type=DBotScoreType.IP,
score=Common.DBotScore.NONE),
asn=result.get('autonomous_system', {}).get('asn'),
geo_latitude=str(lat) if lat else None,
geo_longitude=str(lon) if lon else None,
geo_description=description or None,
geo_country=country,
as_owner=result.get('autonomous_system', {}).get('name'))
human_readable = tableToMarkdown(f'Information for IP {query}', content)
return CommandResults(
readable_output=human_readable,
outputs_prefix='Censys.View',
outputs_key_field='ip',
outputs=result,
indicator=indicator,
raw_response=res
)
else:
metadata = res.get('metadata', {})
content = {
'SHA 256': res.get('fingerprint_sha256'),
'Tags': res.get('tags'),
'Source': metadata.get('source'),
'Added': metadata.get('added_at'),
'Updated': metadata.get('updated_at')
}
human_readable = tableToMarkdown('Information for certificate', content)
return CommandResults(
readable_output=human_readable,
outputs_prefix='Censys.View',
outputs_key_field='fingerprint_sha256',
outputs=res,
raw_response=res
)
|
def censys_view_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Returns host information for the specified IP address or structured certificate data for the specified SHA-256
"""
index = args.get('index', '')
query = args.get('query', '')
res = client.censys_view_request(index, query)
if index == 'ipv4':
result = res.get('result', {})
content = {
'Name': result.get('autonomous_system', {}).get('name'),
'Bgp Prefix': result.get('autonomous_system', {}).get('bgp_prefix'),
'ASN': result.get('autonomous_system', {}).get('asn'),
'Service': [{
'Port': service.get('port'),
'Service Name': service.get('service_name')
} for service in result.get('services', [])],
'Last Updated': result.get('last_updated_at')
}
city = result.get('location', {}).get('city')
province = result.get('location', {}).get('province')
postal = result.get('location', {}).get('postal_code')
country_code = demisto.get(result,'location.country_code')
country = result.get('location', {}).get('country')
description = ', '.join(filter(None, [city, province, postal, country_code]))
lat = result.get('location', {}).get('coordinates', {}).get('latitude')
lon = result.get('location', {}).get('coordinates', {}).get('longitude')
indicator = Common.IP(
ip=query,
dbot_score=Common.DBotScore(indicator=query,
indicator_type=DBotScoreType.IP,
score=Common.DBotScore.NONE),
asn=result.get('autonomous_system', {}).get('asn'),
geo_latitude=str(lat) if lat else None,
geo_longitude=str(lon) if lon else None,
geo_description=description or None,
geo_country=country,
as_owner=result.get('autonomous_system', {}).get('name'))
human_readable = tableToMarkdown(f'Information for IP {query}', content)
return CommandResults(
readable_output=human_readable,
outputs_prefix='Censys.View',
outputs_key_field='ip',
outputs=result,
indicator=indicator,
raw_response=res
)
else:
metadata = res.get('metadata', {})
content = {
'SHA 256': res.get('fingerprint_sha256'),
'Tags': res.get('tags'),
'Source': metadata.get('source'),
'Added': metadata.get('added_at'),
'Updated': metadata.get('updated_at')
}
human_readable = tableToMarkdown('Information for certificate', content)
return CommandResults(
readable_output=human_readable,
outputs_prefix='Censys.View',
outputs_key_field='fingerprint_sha256',
outputs=res,
raw_response=res
)
|
12,501 |
def generate_stub_for_c_module(module_name: str,
target: str,
sigs: Optional[Dict[str, str]] = None,
class_sigs: Optional[Dict[str, str]] = None) -> None:
"""Generate stub for C module.
This combines simple runtime introspection (looking for docstrings and attributes
with simple builtin types) and signatures inferred from .rst documentation (if given).
If directory for target doesn't exist it will be created. Existing stub
will be overwritten.
"""
module = importlib.import_module(module_name)
assert is_c_module(module), f'{module_name} is not a C module'
subdir = os.path.dirname(target)
if subdir and not os.path.isdir(subdir):
os.makedirs(subdir)
imports: List[str] = []
functions: List[str] = []
done = set()
items = sorted(module.__dict__.items(), key=lambda x: x[0])
for name, obj in items:
if is_c_function(obj):
generate_c_function_stub(module, name, obj, functions, imports=imports, sigs=sigs)
done.add(name)
types: List[str] = []
for name, obj in items:
if name.startswith('__') and name.endswith('__'):
continue
if is_c_type(obj):
generate_c_type_stub(module, name, obj, types, imports=imports, sigs=sigs,
class_sigs=class_sigs)
done.add(name)
variables = []
for name, obj in items:
if name.startswith('__') and name.endswith('__'):
continue
if name not in done and not inspect.ismodule(obj):
type_str = strip_or_import(get_type_fullname(type(obj)), module, imports)
variables.append(f'{name}: {type_str}')
output = list(sorted(set(imports)))
for line in variables:
output.append(line)
for line in types:
if line.startswith('class') and output and output[-1]:
output.append('')
output.append(line)
if output and functions:
output.append('')
for line in functions:
output.append(line)
output = add_typing_import(output)
with open(target, 'w') as file:
for line in output:
file.write(f'{line}\n')
|
def generate_stub_for_c_module(module_name: str,
target: str,
sigs: Optional[Dict[str, str]] = None,
class_sigs: Optional[Dict[str, str]] = None) -> None:
"""Generate stub for C module.
This combines simple runtime introspection (looking for docstrings and attributes
with simple builtin types) and signatures inferred from .rst documentation (if given).
If directory for target doesn't exist it will be created. Existing stub
will be overwritten.
"""
module = importlib.import_module(module_name)
assert is_c_module(module), f'{module_name} is not a C module'
subdir = os.path.dirname(target)
if subdir and not os.path.isdir(subdir):
os.makedirs(subdir)
imports: List[str] = []
functions: List[str] = []
done = set()
items = sorted(module.__dict__.items(), key=lambda x: x[0])
for name, obj in items:
if is_c_function(obj):
generate_c_function_stub(module, name, obj, functions, imports=imports, sigs=sigs)
done.add(name)
types: List[str] = []
for name, obj in items:
if name.startswith('__') and name.endswith('__'):
continue
if is_c_type(obj):
generate_c_type_stub(module, name, obj, types, imports=imports, sigs=sigs,
class_sigs=class_sigs)
done.add(name)
variables = []
for name, obj in items:
if name.startswith('__') and name.endswith('__'):
continue
if name not in done and not inspect.ismodule(obj):
type_str = strip_or_import(get_type_fullname(type(obj)), module, imports)
variables.append(f'{name}: {type_str}')
output = sorted(set(imports))
for line in variables:
output.append(line)
for line in types:
if line.startswith('class') and output and output[-1]:
output.append('')
output.append(line)
if output and functions:
output.append('')
for line in functions:
output.append(line)
output = add_typing_import(output)
with open(target, 'w') as file:
for line in output:
file.write(f'{line}\n')
|
41,769 |
def _get_parallel_coordinate_plot(study: Study, params: Optional[List[str]] = None) -> go.Figure:
layout = go.Layout(
title='Parallel Coordinate Plot',
)
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
logger.warning('Your study does not have any completed trials.')
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is not None:
for input_p_name in params:
if input_p_name not in all_params:
ValueError('Parameter {} does not exist in your study.'.format(input_p_name))
all_params = set(params)
sorted_params = sorted(list(all_params))
dims = [{
'label': 'Objective Value',
'values': tuple([t.value for t in trials]),
'range': (min([t.value for t in trials]), max([t.value for t in trials]))
}]
for p_name in sorted_params:
values = []
for t in trials:
if p_name in t.params:
values.append(t.params[p_name])
is_categorical = False
try:
tuple(map(float, values))
except (TypeError, ValueError):
vocab = defaultdict(lambda: len(vocab)) # type: DefaultDict[str, int]
values = [vocab[v] for v in values]
is_categorical = True
dim = {
'label': p_name,
'values': tuple(values),
'range': (min(values), max(values))
}
if is_categorical:
dim['tickvals'] = list(range(len(vocab)))
dim['ticktext'] = list(sorted(vocab.items(), key=lambda x: x[1]))
dims.append(dim)
traces = [
go.Parcoords(
dimensions=dims,
line={
'color': dims[0]['values'],
'colorscale': 'blues',
'colorbar': {'title': 'Objective Value'},
'showscale': True,
'reversescale': study.direction == StudyDirection.MINIMIZE,
}
)
]
figure = go.Figure(data=traces, layout=layout)
return figure
|
def _get_parallel_coordinate_plot(study: Study, params: Optional[List[str]] = None) -> 'go.Figure':
layout = go.Layout(
title='Parallel Coordinate Plot',
)
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
logger.warning('Your study does not have any completed trials.')
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is not None:
for input_p_name in params:
if input_p_name not in all_params:
ValueError('Parameter {} does not exist in your study.'.format(input_p_name))
all_params = set(params)
sorted_params = sorted(list(all_params))
dims = [{
'label': 'Objective Value',
'values': tuple([t.value for t in trials]),
'range': (min([t.value for t in trials]), max([t.value for t in trials]))
}]
for p_name in sorted_params:
values = []
for t in trials:
if p_name in t.params:
values.append(t.params[p_name])
is_categorical = False
try:
tuple(map(float, values))
except (TypeError, ValueError):
vocab = defaultdict(lambda: len(vocab)) # type: DefaultDict[str, int]
values = [vocab[v] for v in values]
is_categorical = True
dim = {
'label': p_name,
'values': tuple(values),
'range': (min(values), max(values))
}
if is_categorical:
dim['tickvals'] = list(range(len(vocab)))
dim['ticktext'] = list(sorted(vocab.items(), key=lambda x: x[1]))
dims.append(dim)
traces = [
go.Parcoords(
dimensions=dims,
line={
'color': dims[0]['values'],
'colorscale': 'blues',
'colorbar': {'title': 'Objective Value'},
'showscale': True,
'reversescale': study.direction == StudyDirection.MINIMIZE,
}
)
]
figure = go.Figure(data=traces, layout=layout)
return figure
|
8,104 |
def _get_instrument_meta(instrument, telescope, observatory, wavelength, exposure):
"""
Function to correctly name keywords from keyword arguments
"""
coord = {}
if instrument is not None:
coord['instrume'] = str(instrument)
if telescope is not None:
coord['telescop'] = str(telescope)
if observatory is not None:
coord['obsrvtry'] = str(observatory)
if wavelength is not None:
coord['wavelnth'] = wavelength.to_value()
coord['waveunit'] = wavelength.unit.name
if exposure is not None:
coord['exptime'] = exposure.to_value(u.s)
return coord
|
def _get_instrument_meta(instrument, telescope, observatory, wavelength, exposure):
"""
Function to correctly name keywords from keyword arguments
"""
coord = {}
if instrument is not None:
coord['instrume'] = str(instrument)
if telescope is not None:
coord['telescop'] = str(telescope)
if observatory is not None:
coord['obsrvtry'] = str(observatory)
if wavelength is not None:
coord['wavelnth'] = wavelength.to_value()
coord['waveunit'] = wavelength.unit.to_string("fits")
if exposure is not None:
coord['exptime'] = exposure.to_value(u.s)
return coord
|
25,762 |
def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1,
max_iterations=100, track_iterations=False, **kwargs):
'''
Iterative linear optimization updating the line parameters for passive
AC and DC lines. This is helpful when line expansion is enabled. After each
sucessful solving, line impedances and line resistance are recalculated
based on the optimization result. If warmstart is possible, it uses the
result from the previous iteration to fasten the optimization.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
msq_threshold: float, default 0.05
Maximal mean square difference between optimized line capacity of
the current and the previous iteration. As soon as this threshold is
undercut, and the number of iterations is bigger than 'min_iterations'
the iterative optimization stops
min_iterations : integer, default 1
Minimal number of iteration to run regardless whether the msq_threshold
is already undercut
max_iterations : integer, default 100
Maximal numbder of iterations to run regardless whether msq_threshold
is already undercut
track_iterations: bool, default False
If True, the intermediate branch capacity steps and values of the
objective function are recorded for each iteration. The values of
iteration 0 stand for the starting point.
**kwargs
Keyword arguments of the lopf function which runs at each iteration
'''
n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier)
ext_i = get_extendable_i(n, 'Line')
typed_i = n.lines.query('type != ""').index
ext_untyped_i = ext_i.difference(typed_i)
ext_typed_i = ext_i & typed_i
base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom))
n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i]
def update_line_params(n, s_nom_prev):
factor = n.lines.s_nom_opt / s_nom_prev
for attr, carrier in (('x', 'AC'), ('r', 'DC')):
ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i)
n.lines.loc[ln_i, attr] /= factor[ln_i]
ln_i = ext_i & typed_i
n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i]
def msq_diff(n, s_nom_prev):
lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \
n.lines['s_nom_opt'].mean()
logger.info(f"Mean square difference after iteration {iteration} is "
f"{lines_err}")
return lines_err
def save_optimal_capacities(n, iteration, status):
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt']
setattr(n, f"status_{iteration}", status)
setattr(n, f"objective_{iteration}", n.objective)
n.iteration = iteration
if track_iterations:
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}']
iteration = 1
kwargs['store_basis'] = True
diff = msq_threshold
while diff >= msq_threshold or iteration < min_iterations:
if iteration > max_iterations:
logger.info(f'Iteration {iteration} beyond max_iterations '
f'{max_iterations}. Stopping ...')
break
s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom
kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__()))
status, termination_condition = network_lopf(n, snapshots, **kwargs)
assert status == 'ok', ('Optimization failed with status {status}'
'and termination {termination_condition}')
if track_iterations:
save_optimal_capacities(n, iteration, status)
update_line_params(n, s_nom_prev)
diff = msq_diff(n, s_nom_prev)
iteration += 1
logger.info('Running last lopf with fixed branches, overwrite p_nom '
'for links and s_nom for lines')
ext_links_i = get_extendable_i(n, 'Link')
n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False
n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False
network_lopf(n, snapshots, **kwargs)
n.lines.loc[ext_i, 's_nom_extendable'] = True
n.links.loc[ext_links_i, 'p_nom_extendable'] = True
|
def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1,
max_iterations=100, track_iterations=False, **kwargs):
'''
Iterative linear optimization updating the line parameters for passive
AC and DC lines. This is helpful when line expansion is enabled. After each
sucessful solving, line impedances and line resistance are recalculated
based on the optimization result. If warmstart is possible, it uses the
result from the previous iteration to fasten the optimization.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
msq_threshold: float, default 0.05
Maximal mean square difference between optimized line capacity of
the current and the previous iteration. As soon as this threshold is
undercut, and the number of iterations is bigger than 'min_iterations'
the iterative optimization stops
min_iterations : integer, default 1
Minimal number of iteration to run regardless whether the msq_threshold
is already undercut
max_iterations : integer, default 100
Maximal numbder of iterations to run regardless whether msq_threshold
is already undercut
track_iterations: bool, default False
If True, the intermediate branch capacity steps and values of the
objective function are recorded for each iteration. The values of
iteration 0 represent the initial state.
**kwargs
Keyword arguments of the lopf function which runs at each iteration
'''
n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier)
ext_i = get_extendable_i(n, 'Line')
typed_i = n.lines.query('type != ""').index
ext_untyped_i = ext_i.difference(typed_i)
ext_typed_i = ext_i & typed_i
base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom))
n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i]
def update_line_params(n, s_nom_prev):
factor = n.lines.s_nom_opt / s_nom_prev
for attr, carrier in (('x', 'AC'), ('r', 'DC')):
ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i)
n.lines.loc[ln_i, attr] /= factor[ln_i]
ln_i = ext_i & typed_i
n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i]
def msq_diff(n, s_nom_prev):
lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \
n.lines['s_nom_opt'].mean()
logger.info(f"Mean square difference after iteration {iteration} is "
f"{lines_err}")
return lines_err
def save_optimal_capacities(n, iteration, status):
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt']
setattr(n, f"status_{iteration}", status)
setattr(n, f"objective_{iteration}", n.objective)
n.iteration = iteration
if track_iterations:
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}']
iteration = 1
kwargs['store_basis'] = True
diff = msq_threshold
while diff >= msq_threshold or iteration < min_iterations:
if iteration > max_iterations:
logger.info(f'Iteration {iteration} beyond max_iterations '
f'{max_iterations}. Stopping ...')
break
s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom
kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__()))
status, termination_condition = network_lopf(n, snapshots, **kwargs)
assert status == 'ok', ('Optimization failed with status {status}'
'and termination {termination_condition}')
if track_iterations:
save_optimal_capacities(n, iteration, status)
update_line_params(n, s_nom_prev)
diff = msq_diff(n, s_nom_prev)
iteration += 1
logger.info('Running last lopf with fixed branches, overwrite p_nom '
'for links and s_nom for lines')
ext_links_i = get_extendable_i(n, 'Link')
n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False
n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False
network_lopf(n, snapshots, **kwargs)
n.lines.loc[ext_i, 's_nom_extendable'] = True
n.links.loc[ext_links_i, 'p_nom_extendable'] = True
|
44,884 |
def test_queued_states_have_default_times():
now = pendulum.now("utc")
assert now - Queued().start_time < datetime.timedelta(seconds=0.1)
|
def test_queued_states_have_start_times():
now = pendulum.now("utc")
assert now - Queued().start_time < datetime.timedelta(seconds=0.1)
|
6,910 |
def update_tags(doc, tags):
"""
Adds tags for documents
:param doc: Document to be added to global tags
"""
new_tags = list(set(tag.strip() for tag in tags.split(",") if tag))
for tag in new_tags:
if not frappe.db.exists("Tag Link", {"parenttype": doc.doctype, "parent": doc.name, "tag": tag}):
frappe.get_doc({
"doctype": "Tag Link",
"document_type": doc.doctype,
"document_name": doc.name,
"parenttype": doc.doctype,
"parent": doc.name,
"title": doc.get_title() or '',
"tag": tag
}).insert(ignore_permissions=True)
existing_tags = [tag.tag for tag in frappe.get_list("Tag Link", filters={
"document_type": doc.doctype,
"document_name": doc.name
}, fields=["tag"])]
deleted_tags = get_deleted_tags(new_tags, existing_tags)
if deleted_tags:
for tag in deleted_tags:
delete_tag_for_document(doc.doctype, doc.name, tag)
|
def update_tags(doc, tags):
"""
Adds tags for documents
:param doc: Document to be added to global tags
"""
new_tags = {tag.strip() for tag in tags.split(",") if tag}
for tag in new_tags:
if not frappe.db.exists("Tag Link", {"parenttype": doc.doctype, "parent": doc.name, "tag": tag}):
frappe.get_doc({
"doctype": "Tag Link",
"document_type": doc.doctype,
"document_name": doc.name,
"parenttype": doc.doctype,
"parent": doc.name,
"title": doc.get_title() or '',
"tag": tag
}).insert(ignore_permissions=True)
existing_tags = [tag.tag for tag in frappe.get_list("Tag Link", filters={
"document_type": doc.doctype,
"document_name": doc.name
}, fields=["tag"])]
deleted_tags = get_deleted_tags(new_tags, existing_tags)
if deleted_tags:
for tag in deleted_tags:
delete_tag_for_document(doc.doctype, doc.name, tag)
|
7,684 |
def _update_header(file_path, config, substring, regex, data, ci):
found = False
with open(file_path) as file_read:
content = orig_content = file_read.read()
if not content.strip():
return False
shebang_line = None
if content.startswith('#!/'):
shebang_line, content = content.split('\n', 1)
for match in regex.finditer(content):
if substring in match.group():
found = True
match_end = content[match.end():].lstrip()
match_end = '\n' + match_end if match_end else match_end
content = content[:match.start()] + gen_header(data | config) + match_end
if shebang_line:
content = shebang_line + '\n' + content
if content != orig_content:
msg = 'Incorrect header in {}' if ci else cformat('%{green!}Updating header of %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
if not ci:
with open(file_path, 'w') as file_write:
file_write.write(content)
return True
elif not found:
msg = 'Missing header in {}' if ci else cformat('%{red!}Missing header%{reset} in %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
return True
|
def _update_header(file_path, config, substring, regex, data, ci):
found = False
with open(file_path) as file_read:
content = orig_content = file_read.read()
if not content.strip():
return False
shebang_line = None
if content.startswith('#!/'):
shebang_line, content = content.split('\n', 1)
for match in regex.finditer(content):
if substring in match.group():
found = True
match_end = content[match.end():].lstrip()
match_end = f'\n{match_end}' if match_end else match_end
content = content[:match.start()] + gen_header(data | config) + match_end
if shebang_line:
content = shebang_line + '\n' + content
if content != orig_content:
msg = 'Incorrect header in {}' if ci else cformat('%{green!}Updating header of %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
if not ci:
with open(file_path, 'w') as file_write:
file_write.write(content)
return True
elif not found:
msg = 'Missing header in {}' if ci else cformat('%{red!}Missing header%{reset} in %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
return True
|
6,234 |
def pilotWrapperScript(pilotFilesCompressedEncodedDict=None,
pilotOptions='',
pilotExecDir='',
envVariables=None,
location=''):
""" Returns the content of the pilot wrapper script.
The pilot wrapper script is a bash script that invokes the system python. Linux only.
:param pilotFilesCompressedEncodedDict: this is a possible dict of name:compressed+encoded content files.
the proxy can be part of this, and of course the pilot files
:type pilotFilesCompressedEncodedDict: dict
:param pilotOptions: options with which to start the pilot
:type pilotOptions: string
:param pilotExecDir: pilot execution directory
:type pilotExecDir: string
:param envVariables: dictionary of environment variables
:type envVariables: dict
:param location: location where to get the pilot files
:type location: string
:returns: content of the pilot wrapper
:rtype: string
"""
if pilotFilesCompressedEncodedDict is None:
pilotFilesCompressedEncodedDict = {}
if envVariables is None:
envVariables = {}
compressedString = ""
# are there some pilot files to unpack? Then we create the unpacking string
for pfName, encodedPf in pilotFilesCompressedEncodedDict.items():
compressedString += """
try:
with open('%(pfName)s', 'wb') as fd:
if sys.version_info < (3,):
fd.write(bz2.decompress(base64.b64decode(\"\"\"%(encodedPf)s\"\"\")))
else:
fd.write(bz2.decompress(base64.b64decode(b'%(encodedPf)s')))
os.chmod('%(pfName)s', stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception as x:
print(x, file=sys.stderr)
logger.error(x)
shutil.rmtree(pilotWorkingDirectory)
sys.exit(3)
""" % {'encodedPf': encodedPf.decode() if hasattr(encodedPf, "decode") else encodedPf,
'pfName': pfName}
envVariablesString = ""
for name, value in envVariables.items(): # are there some environment variables to add?
envVariablesString += """
os.environ[\"%(name)s\"]=\"%(value)s\"
""" % {'name': name,
'value': value}
# add X509_USER_PROXY to establish pilot env in Cluster WNs
if 'proxy' in pilotFilesCompressedEncodedDict:
envVariablesString += """
os.environ['X509_USER_PROXY'] = os.path.join(pilotWorkingDirectory, 'proxy')
"""
# now building the actual pilot wrapper
localPilot = pilotWrapperContent % {'pilotExecDir': pilotExecDir}
if compressedString:
localPilot += """
# unpacking lines
logger.info("But first unpacking pilot files")
%s
""" % compressedString
if envVariablesString:
localPilot += """
# Modifying the environment
%s
""" % envVariablesString
if location:
localPilot += """
# Getting the pilot files
logger.info("Getting the pilot files from %(location)s")
location = '%(location)s'.replace(' ', '').split(',')
import random
random.shuffle(location)
# we try from the available locations
locs = [os.path.join('https://', loc) for loc in location]
locations = locs + [os.path.join(loc, 'pilot') for loc in locs]
# adding also, as last the cvmfs location dirac.egi.eu, but this won't contain a valid JSON
locations+=['file:/cvmfs/dirac.egi.eu/pilot/']
for loc in locations:
print('Trying %%s' %% loc)
# Getting the json, tar, and checksum file
try:
# urllib is different between python 2 and 3
if sys.version_info < (3,):
from urllib2 import urlopen as url_library_urlopen
from urllib2 import URLError as url_library_URLError
else:
from urllib.request import urlopen as url_library_urlopen
from urllib.error import URLError as url_library_URLError
for fileName in ['pilot.json', 'pilot.tar', 'checksums.sha512']:
# needs to distinguish whether urlopen method contains the 'context' param
# in theory, it should be available from python 2.7.9
# in practice, some prior versions may be composed of recent urllib version containing the param
if 'context' in url_library_urlopen.__code__.co_varnames:
import ssl
context = ssl._create_unverified_context()
remoteFile = url_library_urlopen(os.path.join(loc, fileName),
timeout=10,
context=context)
else:
remoteFile = url_library_urlopen(os.path.join(loc, fileName),
timeout=10)
localFile = open(fileName, 'wb')
localFile.write(remoteFile.read())
localFile.close()
if fileName != 'pilot.tar':
continue
try:
pt = tarfile.open('pilot.tar', 'r')
pt.extractall()
pt.close()
except Exception as x:
print("tarfile failed with message (this is normal!) %%s" %% repr(x), file=sys.stderr)
logger.error("tarfile failed with message (this is normal!) %%s" %% repr(x))
logger.warn("Trying tar command (tar -xvf pilot.tar)")
res = os.system("tar -xvf pilot.tar")
if res:
logger.error("tar failed with exit code %%d, giving up (this is normal!)" %% int(res))
print("tar failed with exit code %%d, giving up (this is normal!)" %% int(res), file=sys.stderr)
raise
# if we get here we break out of the loop of locations
break
except (url_library_URLError, Exception) as e:
print('%%s unreacheable (this is normal!)' %% loc, file=sys.stderr)
logger.error('%%s unreacheable (this is normal!)' %% loc)
logger.exception(e)
else:
print("None of the locations of the pilot files is reachable", file=sys.stderr)
logger.error("None of the locations of the pilot files is reachable")
sys.exit(-1)
# download was successful, now we check checksums
if os.path.exists('checksums.sha512'):
checksumDict = {}
chkSumFile = open('checksums.sha512', 'rt')
for line in chkSumFile.read().split('\\n'):
if not line.strip(): ## empty lines are ignored
continue
expectedHash, fileName = line.split(' ', 1)
if not os.path.exists(fileName):
continue
logger.info('Checking %%r for checksum', fileName)
fileHash = hashlib.sha512(open(fileName, 'rb').read()).hexdigest()
if fileHash != expectedHash:
print('Checksum mismatch for file %%r' %% fileName, file=sys.stderr)
print('Expected %%r, found %%r' %%(expectedHash, fileHash), file=sys.stderr)
logger.error('Checksum mismatch for file %%r', fileName)
logger.error('Expected %%r, found %%r', expectedHash, fileHash)
sys.exit(-1)
logger.debug('Checksum matched')
""" % {'location': location}
localPilot += """
# now finally launching the pilot script (which should be called dirac-pilot.py)
cmd = "python dirac-pilot.py %s"
logger.info('Executing: %%s' %% cmd)
sys.stdout.flush()
ret = os.system(cmd)
# and cleaning up
shutil.rmtree(pilotWorkingDirectory)
# did it fail?
if ret:
sys.exit(1)
EOF
""" % pilotOptions
return localPilot
|
def pilotWrapperScript(pilotFilesCompressedEncodedDict=None,
pilotOptions='',
pilotExecDir='',
envVariables=None,
location=''):
""" Returns the content of the pilot wrapper script.
The pilot wrapper script is a bash script that invokes the system python. Linux only.
:param pilotFilesCompressedEncodedDict: this is a possible dict of name:compressed+encoded content files.
the proxy can be part of this, and of course the pilot files
:type pilotFilesCompressedEncodedDict: dict
:param pilotOptions: options with which to start the pilot
:type pilotOptions: string
:param pilotExecDir: pilot execution directory
:type pilotExecDir: string
:param envVariables: dictionary of environment variables
:type envVariables: dict
:param location: location where to get the pilot files
:type location: string
:returns: content of the pilot wrapper
:rtype: string
"""
if pilotFilesCompressedEncodedDict is None:
pilotFilesCompressedEncodedDict = {}
if envVariables is None:
envVariables = {}
compressedString = ""
# are there some pilot files to unpack? Then we create the unpacking string
for pfName, encodedPf in pilotFilesCompressedEncodedDict.items():
compressedString += """
try:
with open('%(pfName)s', 'wb') as fd:
if sys.version_info < (3,):
fd.write(bz2.decompress(base64.b64decode(\"\"\"%(encodedPf)s\"\"\")))
else:
fd.write(bz2.decompress(base64.b64decode(b'%(encodedPf)s')))
os.chmod('%(pfName)s', stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception as x:
print(x, file=sys.stderr)
logger.error(x)
shutil.rmtree(pilotWorkingDirectory)
sys.exit(3)
""" % {'encodedPf': encodedPf.decode() if hasattr(encodedPf, "decode") else encodedPf,
'pfName': pfName}
envVariablesString = ""
for name, value in envVariables.items(): # are there some environment variables to add?
envVariablesString += """
os.environ[\"%(name)s\"]=\"%(value)s\"
""" % {'name': name,
'value': value}
# add X509_USER_PROXY to establish pilot env in Cluster WNs
if 'proxy' in pilotFilesCompressedEncodedDict:
envVariablesString += """
os.environ['X509_USER_PROXY'] = os.path.join(pilotWorkingDirectory, 'proxy')
"""
# now building the actual pilot wrapper
localPilot = pilotWrapperContent % {'pilotExecDir': pilotExecDir}
if compressedString:
localPilot += """
# unpacking lines
logger.info("But first unpacking pilot files")
%s
""" % compressedString
if envVariablesString:
localPilot += """
# Modifying the environment
%s
""" % envVariablesString
if location:
localPilot += """
# Getting the pilot files
logger.info("Getting the pilot files from %(location)s")
location = '%(location)s'.replace(' ', '').split(',')
import random
random.shuffle(location)
# we try from the available locations
locs = [os.path.join('https://', loc) for loc in location]
locations = locs + [os.path.join(loc, 'pilot') for loc in locs]
# adding also, as last the cvmfs location dirac.egi.eu, but this won't contain a valid JSON
locations += ['file:/cvmfs/dirac.egi.eu/pilot/']
for loc in locations:
print('Trying %%s' %% loc)
# Getting the json, tar, and checksum file
try:
# urllib is different between python 2 and 3
if sys.version_info < (3,):
from urllib2 import urlopen as url_library_urlopen
from urllib2 import URLError as url_library_URLError
else:
from urllib.request import urlopen as url_library_urlopen
from urllib.error import URLError as url_library_URLError
for fileName in ['pilot.json', 'pilot.tar', 'checksums.sha512']:
# needs to distinguish whether urlopen method contains the 'context' param
# in theory, it should be available from python 2.7.9
# in practice, some prior versions may be composed of recent urllib version containing the param
if 'context' in url_library_urlopen.__code__.co_varnames:
import ssl
context = ssl._create_unverified_context()
remoteFile = url_library_urlopen(os.path.join(loc, fileName),
timeout=10,
context=context)
else:
remoteFile = url_library_urlopen(os.path.join(loc, fileName),
timeout=10)
localFile = open(fileName, 'wb')
localFile.write(remoteFile.read())
localFile.close()
if fileName != 'pilot.tar':
continue
try:
pt = tarfile.open('pilot.tar', 'r')
pt.extractall()
pt.close()
except Exception as x:
print("tarfile failed with message (this is normal!) %%s" %% repr(x), file=sys.stderr)
logger.error("tarfile failed with message (this is normal!) %%s" %% repr(x))
logger.warn("Trying tar command (tar -xvf pilot.tar)")
res = os.system("tar -xvf pilot.tar")
if res:
logger.error("tar failed with exit code %%d, giving up (this is normal!)" %% int(res))
print("tar failed with exit code %%d, giving up (this is normal!)" %% int(res), file=sys.stderr)
raise
# if we get here we break out of the loop of locations
break
except (url_library_URLError, Exception) as e:
print('%%s unreacheable (this is normal!)' %% loc, file=sys.stderr)
logger.error('%%s unreacheable (this is normal!)' %% loc)
logger.exception(e)
else:
print("None of the locations of the pilot files is reachable", file=sys.stderr)
logger.error("None of the locations of the pilot files is reachable")
sys.exit(-1)
# download was successful, now we check checksums
if os.path.exists('checksums.sha512'):
checksumDict = {}
chkSumFile = open('checksums.sha512', 'rt')
for line in chkSumFile.read().split('\\n'):
if not line.strip(): ## empty lines are ignored
continue
expectedHash, fileName = line.split(' ', 1)
if not os.path.exists(fileName):
continue
logger.info('Checking %%r for checksum', fileName)
fileHash = hashlib.sha512(open(fileName, 'rb').read()).hexdigest()
if fileHash != expectedHash:
print('Checksum mismatch for file %%r' %% fileName, file=sys.stderr)
print('Expected %%r, found %%r' %%(expectedHash, fileHash), file=sys.stderr)
logger.error('Checksum mismatch for file %%r', fileName)
logger.error('Expected %%r, found %%r', expectedHash, fileHash)
sys.exit(-1)
logger.debug('Checksum matched')
""" % {'location': location}
localPilot += """
# now finally launching the pilot script (which should be called dirac-pilot.py)
cmd = "python dirac-pilot.py %s"
logger.info('Executing: %%s' %% cmd)
sys.stdout.flush()
ret = os.system(cmd)
# and cleaning up
shutil.rmtree(pilotWorkingDirectory)
# did it fail?
if ret:
sys.exit(1)
EOF
""" % pilotOptions
return localPilot
|
57,774 |
def arduino_set_pin_command(server: Server, args: any) -> str:
pin_type: str = args.get('pin_type')
prefix: str = "Arduino.DigitalPins" if pin_type == "digital" else "Arduino.AnalogPins"
pin_number: int = int(args.get('pin_number'))
value: int = int(args.get('value'))
result: int = int(server.send_data(f"set:{pin_type}:{pin_number},{value}"))
results = [{
"PinType": "Digital" if pin_type == "digital" else "Analog",
"PinNumber": pin_number,
"PinValue": result
}]
command_results = CommandResults(
outputs_prefix=prefix,
outputs_key_field=['PinNumber', 'PinType'],
outputs=results,
readable_output=tableToMarkdown(f"Set pin {pin_number} on {server.host}({server.port}):", results)
)
return command_results
|
def arduino_set_pin_command(server: Server, args: any) -> str:
pin_type = args.get('pin_type')
prefix = "Arduino.DigitalPins" if pin_type == "digital" else "Arduino.AnalogPins"
pin_number: int = int(args.get('pin_number'))
value: int = int(args.get('value'))
result: int = int(server.send_data(f"set:{pin_type}:{pin_number},{value}"))
results = [{
"PinType": "Digital" if pin_type == "digital" else "Analog",
"PinNumber": pin_number,
"PinValue": result
}]
command_results = CommandResults(
outputs_prefix=prefix,
outputs_key_field=['PinNumber', 'PinType'],
outputs=results,
readable_output=tableToMarkdown(f"Set pin {pin_number} on {server.host}({server.port}):", results)
)
return command_results
|
25,980 |
def load_arguments(self, _):
acr_arg_type = CLIArgumentType(metavar='ACR_NAME_OR_RESOURCE_ID')
# ACS command argument configuration
with self.argument_context('acs') as c:
c.argument('resource_name', name_type,
completer=get_resource_name_completion_list(
'Microsoft.ContainerService/ContainerServices'),
help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`')
c.argument('name', name_type,
completer=get_resource_name_completion_list(
'Microsoft.ContainerService/ContainerServices'),
help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`')
c.argument('container_service_name', name_type, help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'))
c.argument('admin_username', options_list=[
'--admin-username', '-u'], default='azureuser')
c.argument('api_version',
help=_get_feature_in_preview_message() + 'Use API version of ACS to perform az acs operations. Available options: 2017-01-31, 2017-07-01. Default: the latest version for the location')
c.argument('dns_name_prefix', options_list=['--dns-prefix', '-d'])
c.argument('orchestrator_type', get_enum_type(
orchestrator_types), options_list=['--orchestrator-type', '-t'])
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('tags', tags_type)
c.argument('disable_browser',
help='Do not open browser after opening a proxy to the cluster web user interface')
with self.argument_context('acs create') as c:
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('master_profile', options_list=['--master-profile', '-m'], type=validate_file_or_dict,
help=_get_feature_in_preview_message() + 'The file or dictionary representation of the master profile. Note it will override any master settings once set')
c.argument('master_vm_size', completer=get_vm_size_completion_list,
help=_get_feature_in_preview_message())
c.argument('agent_count', type=int)
c.argument('generate_ssh_keys', action='store_true', validator=validate_create_parameters,
help='Generate SSH public and private key files if missing')
c.argument('master_osdisk_size', type=int,
help=_get_feature_in_preview_message() + 'The disk size for master pool vms. Unit in GB. Default: corresponding vmsize disk size')
c.argument('master_vnet_subnet_id', type=str,
help=_get_feature_in_preview_message() + 'The custom vnet subnet id. Note agent need to used the same vnet if master set. Default: ""')
c.argument('master_first_consecutive_static_ip', type=str,
help=_get_feature_in_preview_message() + 'The first consecutive ip used to specify static ip block.')
c.argument('master_storage_profile', get_enum_type(storage_profile_types),
help=_get_feature_in_preview_message() + 'Default: varies based on Orchestrator')
c.argument('agent_profiles', options_list=['--agent-profiles', '-a'], type=validate_file_or_dict,
help=_get_feature_in_preview_message() + 'The file or dictionary representation of the agent profiles. Note it will override any agent settings once set')
c.argument('agent_vm_size', completer=get_vm_size_completion_list,
help='Set the default size for agent pools vms.')
c.argument('agent_osdisk_size', type=int,
help=_get_feature_in_preview_message() + 'Set the default disk size for agent pools vms. Unit in GB. Default: corresponding vmsize disk size')
c.argument('agent_vnet_subnet_id', type=str,
help=_get_feature_in_preview_message() + 'Set the default custom vnet subnet id for agent pools. Note agent need to used the same vnet if master set. Default: ""')
c.argument('agent_ports', type=validate_list_of_integers,
help=_get_feature_in_preview_message() + 'Set the default ports exposed on the agent pools. Only usable for non-Kubernetes. Default: 8080,4000,80')
c.argument('agent_storage_profile', get_enum_type(storage_profile_types),
help=_get_feature_in_preview_message() + 'Set default storage profile for agent pools. Default: varies based on Orchestrator')
c.argument('windows', action='store_true',
help='If true, set the default osType of agent pools to be Windows.')
c.argument('validate', action='store_true',
help='Generate and validate the ARM template without creating any resources')
c.argument('orchestrator_version', help=_get_feature_in_preview_message(
) + 'Use Orchestrator Version to specify the semantic version for your choice of orchestrator.')
with self.argument_context('acs scale') as c:
c.argument('new_agent_count', type=int)
for scope in ['dcos', 'kubernetes']:
with self.argument_context('acs {} browse'.format(scope)) as c:
c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter(), help='Path to an SSH key file to use.')
with self.argument_context('acs dcos install-cli') as c:
c.argument('install_location',
default=_get_default_install_location('dcos'))
with self.argument_context('acs kubernetes get-credentials') as c:
c.argument('path', options_list=['--file', '-f'])
c.argument('overwrite_existing', action='store_true',
help='If specified, overwrite any existing credentials.')
with self.argument_context('acs kubernetes install-cli') as c:
c.argument('install_location', type=file_type, completer=FilesCompleter(),
default=_get_default_install_location('kubectl'))
c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter(), help='Path to an SSH key file to use.')
# AKS command argument configuration
with self.argument_context('aks', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('resource_name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('kubernetes_version', options_list=[
'--kubernetes-version', '-k'], validator=validate_k8s_version)
c.argument('node_count', options_list=['--node-count', '-c'], type=int)
c.argument('tags', tags_type)
with self.argument_context('aks create', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('kubernetes_version',
completer=get_k8s_versions_completion_list)
c.argument('admin_username', options_list=[
'--admin-username', '-u'], default='azureuser')
c.argument('dns_name_prefix', options_list=['--dns-name-prefix', '-p'])
c.argument('generate_ssh_keys', action='store_true',
validator=validate_create_parameters)
c.argument('node_vm_size', options_list=[
'--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('nodepool_name', type=str, default='nodepool1',
help='Node pool name, up to 12 alphanumeric characters', validator=validate_nodepool_name)
c.argument('os_sku', type=str, options_list=['--os-sku'], completer=get_ossku_completion_list)
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('aad_client_app_id')
c.argument('aad_server_app_id')
c.argument('aad_server_app_secret')
c.argument('aad_tenant_id')
c.argument('dns_service_ip')
c.argument('docker_bridge_address')
c.argument('edge_zone', edge_zone_type)
c.argument('load_balancer_sku', type=str,
validator=validate_load_balancer_sku)
c.argument('load_balancer_managed_outbound_ip_count', type=int)
c.argument('load_balancer_outbound_ips', type=str,
validator=validate_load_balancer_outbound_ips)
c.argument('load_balancer_outbound_ip_prefixes', type=str,
validator=validate_load_balancer_outbound_ip_prefixes)
c.argument('load_balancer_outbound_ports', type=int,
validator=validate_load_balancer_outbound_ports)
c.argument('load_balancer_idle_timeout', type=int,
validator=validate_load_balancer_idle_timeout)
c.argument('outbound_type', arg_type=get_enum_type([CONST_OUTBOUND_TYPE_LOAD_BALANCER,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING]))
c.argument('enable_cluster_autoscaler', action='store_true')
c.argument('cluster_autoscaler_profile', nargs='+', options_list=["--cluster-autoscaler-profile", "--ca-profile"], validator=validate_cluster_autoscaler_profile,
help="Space-separated list of key=value pairs for configuring cluster autoscaler. Pass an empty string to clear the profile.")
c.argument('min_count', type=int, validator=validate_nodes_count)
c.argument('max_count', type=int, validator=validate_nodes_count)
c.argument('vm_set_type', type=str, validator=validate_vm_set_type)
c.argument('zones', zones_type, options_list=[
'--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.')
c.argument('uptime_sla', action='store_true')
c.argument('enable_addons', options_list=['--enable-addons', '-a'])
c.argument('disable_rbac', action='store_true')
c.argument('enable_rbac', action='store_true', options_list=['--enable-rbac', '-r'],
deprecate_info=c.deprecate(redirect="--disable-rbac", hide="2.0.45"))
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'])
c.argument('network_plugin', arg_type=get_enum_type(
['azure', 'kubenet']))
c.argument('network_policy')
c.argument('no_ssh_key', options_list=['--no-ssh-key', '-x'])
c.argument('pod_cidr')
c.argument('service_cidr')
c.argument('ppg', type=str, validator=validate_ppg)
c.argument('vnet_subnet_id', type=str,
validator=validate_vnet_subnet_id)
c.argument('workspace_resource_id')
c.argument('skip_subnet_role_assignment', action='store_true')
c.argument('api_server_authorized_ip_ranges',
type=str, validator=validate_ip_ranges)
c.argument('attach_acr', acr_arg_type)
c.argument('enable_private_cluster', action='store_true')
c.argument('private_dns_zone')
c.argument('fqdn_subdomain')
c.argument('disable_public_fqdn', action='store_true')
c.argument('nodepool_tags', nargs='*', validator=validate_nodepool_tags,
help='space-separated tags: key[=value] [key[=value] ...]. Use "" to clear existing tags.')
c.argument('enable_managed_identity', action='store_true')
c.argument('assign_identity', type=str,
validator=validate_assign_identity)
c.argument('nodepool_labels', nargs='*', validator=validate_nodepool_labels,
help='space-separated labels: key[=value] [key[=value] ...]. You can not change the node labels through CLI after creation. See https://aka.ms/node-labels for syntax of labels.')
c.argument('enable_node_public_ip', action='store_true')
c.argument('node_public_ip_prefix_id', type=str)
c.argument('windows_admin_username', options_list=[
'--windows-admin-username'])
c.argument('windows_admin_password', options_list=[
'--windows-admin-password'])
c.argument('enable_ahub', options_list=['--enable-ahub'])
c.argument('node_osdisk_diskencryptionset_id', type=str,
options_list=['--node-osdisk-diskencryptionset-id', '-d'])
c.argument('aci_subnet_name')
c.argument('enable_encryption_at_host', options_list=[
'--enable-encryption-at-host'], action='store_true')
c.argument('enable_ultra_ssd', options_list=[
'--enable-ultra-ssd'], action='store_true')
c.argument('appgw_name', options_list=[
'--appgw-name'], arg_group='Application Gateway')
c.argument('appgw_subnet_cidr', options_list=[
'--appgw-subnet-cidr'], arg_group='Application Gateway')
c.argument('appgw_id', options_list=[
'--appgw-id'], arg_group='Application Gateway')
c.argument('appgw_subnet_id', options_list=[
'--appgw-subnet-id'], arg_group='Application Gateway')
c.argument('appgw_watch_namespace', options_list=[
'--appgw-watch-namespace'], arg_group='Application Gateway')
c.argument('assign_kubelet_identity', validator=validate_assign_kubelet_identity)
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
c.argument('enable_sgxquotehelper', action='store_true')
with self.argument_context('aks update', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('attach_acr', acr_arg_type, validator=validate_acr)
c.argument('detach_acr', acr_arg_type, validator=validate_acr)
with self.argument_context('aks update') as c:
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('disable_cluster_autoscaler', options_list=[
"--disable-cluster-autoscaler", "-d"], action='store_true')
c.argument('update_cluster_autoscaler', options_list=[
"--update-cluster-autoscaler", "-u"], action='store_true')
c.argument('cluster_autoscaler_profile', nargs='+', options_list=["--cluster-autoscaler-profile", "--ca-profile"], validator=validate_cluster_autoscaler_profile,
help="Space-separated list of key=value pairs for configuring cluster autoscaler. Pass an empty string to clear the profile.")
c.argument('min_count', type=int, validator=validate_nodes_count)
c.argument('max_count', type=int, validator=validate_nodes_count)
c.argument('uptime_sla', action='store_true')
c.argument('no_uptime_sla', action='store_true')
c.argument('load_balancer_managed_outbound_ip_count', type=int)
c.argument('load_balancer_outbound_ips', type=str,
validator=validate_load_balancer_outbound_ips)
c.argument('load_balancer_outbound_ip_prefixes', type=str,
validator=validate_load_balancer_outbound_ip_prefixes)
c.argument('load_balancer_outbound_ports', type=int,
validator=validate_load_balancer_outbound_ports)
c.argument('load_balancer_idle_timeout', type=int,
validator=validate_load_balancer_idle_timeout)
c.argument('api_server_authorized_ip_ranges',
type=str, validator=validate_ip_ranges)
c.argument('enable_ahub', options_list=['--enable-ahub'])
c.argument('disable_ahub', options_list=['--disable-ahub'])
c.argument('enable_public_fqdn', action='store_true')
c.argument('disable_public_fqdn', action='store_true')
c.argument('windows_admin_password', options_list=[
'--windows-admin-password'])
c.argument('enable_managed_identity', action='store_true')
c.argument('assign_identity', type=str,
validator=validate_assign_identity)
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
with self.argument_context('aks disable-addons', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('addons', options_list=['--addons', '-a'])
with self.argument_context('aks enable-addons', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('addons', options_list=['--addons', '-a'])
c.argument('subnet_name', options_list=[
'--subnet-name', '-s'], help='Name of an existing subnet to use with the virtual-node add-on.')
c.argument('appgw_name', options_list=[
'--appgw-name'], arg_group='Application Gateway')
c.argument('appgw_subnet_cidr', options_list=[
'--appgw-subnet-cidr'], arg_group='Application Gateway')
c.argument('appgw_id', options_list=[
'--appgw-id'], arg_group='Application Gateway')
c.argument('appgw_subnet_id', options_list=[
'--appgw-subnet-id'], arg_group='Application Gateway')
c.argument('appgw_watch_namespace', options_list=[
'--appgw-watch-namespace'], arg_group='Application Gateway')
c.argument('enable_sgxquotehelper', action='store_true')
with self.argument_context('aks get-credentials', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('admin', options_list=['--admin', '-a'], default=False)
c.argument('context_name', options_list=['--context'],
help='If specified, overwrite the default context name.')
c.argument('path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
default=os.path.join(os.path.expanduser('~'), '.kube', 'config'))
c.argument('public_fqdn', default=False, action='store_true')
for scope in ['aks', 'acs kubernetes', 'acs dcos']:
with self.argument_context('{} install-cli'.format(scope)) as c:
c.argument('client_version', validator=validate_kubectl_version,
help='Version of kubectl to install.')
c.argument('install_location', default=_get_default_install_location(
'kubectl'), help='Path at which to install kubectl.')
c.argument('base_src_url',
help='Base download source URL for kubectl releases.')
c.argument('kubelogin_version', validator=validate_kubelogin_version,
help='Version of kubelogin to install.')
c.argument('kubelogin_install_location', default=_get_default_install_location(
'kubelogin'), help='Path at which to install kubelogin.')
c.argument('kubelogin_base_src_url', options_list=[
'--kubelogin-base-src-url', '-l'], help='Base download source URL for kubelogin releases.')
with self.argument_context('aks update-credentials', arg_group='Service Principal') as c:
c.argument('reset_service_principal', action='store_true')
c.argument('service_principal')
c.argument('client_secret')
with self.argument_context('aks update-credentials', arg_group='AAD') as c:
c.argument('reset_aad', action='store_true')
c.argument('aad_client_app_id')
c.argument('aad_server_app_id')
c.argument('aad_server_app_secret')
c.argument('aad_tenant_id')
with self.argument_context('aks upgrade', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('kubernetes_version',
completer=get_k8s_upgrades_completion_list)
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
with self.argument_context('aks scale', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('nodepool_name', type=str,
help='Node pool name, up to 12 alphanumeric characters', validator=validate_nodepool_name)
with self.argument_context('aks nodepool', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('cluster_name', type=str, help='The cluster name.')
for scope in ['aks nodepool add']:
with self.argument_context(scope) as c:
c.argument('nodepool_name', type=str, options_list=[
'--name', '-n'], validator=validate_nodepool_name, help='The node pool name.')
c.argument('zones', zones_type, options_list=[
'--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.')
c.argument('node_vm_size', options_list=[
'--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'])
c.argument('os_type', type=str)
c.argument('os_sku', type=str, options_list=['--os-sku'], completer=get_ossku_completion_list)
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('node_taints', type=str, validator=validate_taints)
c.argument('priority', arg_type=get_enum_type(
[CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT]), validator=validate_priority)
c.argument('eviction_policy', arg_type=get_enum_type(
[CONST_SPOT_EVICTION_POLICY_DELETE, CONST_SPOT_EVICTION_POLICY_DEALLOCATE]), validator=validate_eviction_policy)
c.argument('spot_max_price', type=float,
validator=validate_spot_max_price)
c.argument('tags', tags_type)
c.argument('labels', nargs='*', validator=validate_nodepool_labels)
c.argument('mode', get_enum_type(nodepool_mode_type))
c.argument('enable_node_public_ip', action='store_true')
c.argument('node_public_ip_prefix_id', type=str)
c.argument('ppg', type=str, validator=validate_ppg)
c.argument('max_surge', type=str, validator=validate_max_surge)
c.argument('node_os_disk_type', arg_type=get_enum_type(
[CONST_OS_DISK_TYPE_MANAGED, CONST_OS_DISK_TYPE_EPHEMERAL]))
c.argument('enable_encryption_at_host', options_list=[
'--enable-encryption-at-host'], action='store_true')
c.argument('enable_ultra_ssd', options_list=[
'--enable-ultra-ssd'], action='store_true')
for scope in ['aks nodepool show', 'aks nodepool delete', 'aks nodepool scale', 'aks nodepool upgrade', 'aks nodepool update']:
with self.argument_context(scope) as c:
c.argument('nodepool_name', type=str, options_list=[
'--name', '-n'], validator=validate_nodepool_name, help='The node pool name.')
with self.argument_context('aks nodepool update', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='agent_pools') as c:
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('disable_cluster_autoscaler', options_list=[
"--disable-cluster-autoscaler", "-d"], action='store_true')
c.argument('update_cluster_autoscaler', options_list=[
"--update-cluster-autoscaler", "-u"], action='store_true')
c.argument('tags', tags_type)
c.argument('mode', get_enum_type(nodepool_mode_type))
c.argument('max_surge', type=str, validator=validate_max_surge)
with self.argument_context('aks command invoke') as c:
c.argument('command_string', type=str, options_list=[
"--command", "-c"], help='the command to run')
c.argument('command_files', options_list=["--file", "-f"], required=False, action="append",
help='attach any files the command may use, or use \'.\' to upload the current folder.')
with self.argument_context('aks command result') as c:
c.argument('command_id', type=str, options_list=[
"--command-id", "-i"], help='the command ID from "aks command invoke"')
with self.argument_context('aks use-dev-spaces') as c:
c.argument('update', options_list=['--update'], action='store_true')
c.argument('space_name', options_list=['--space', '-s'])
c.argument('endpoint_type', get_enum_type(
['Public', 'Private', 'None'], default='Public'), options_list=['--endpoint', '-e'])
c.argument('prompt', options_list=[
'--yes', '-y'], action='store_true', help='Do not prompt for confirmation. Requires --space.')
with self.argument_context('aks remove-dev-spaces') as c:
c.argument('prompt', options_list=[
'--yes', '-y'], action='store_true', help='Do not prompt for confirmation')
# OpenShift command argument configuration
with self.argument_context('openshift', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument('resource_name', name_type, help='Name of the managed OpenShift cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters'))
c.argument('name', name_type, help='Name of the managed OpenShift cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters'))
c.argument('compute_count', options_list=[
'--compute-count', '-c'], type=int, default=4)
c.argument('tags', tags_type)
with self.argument_context('openshift create', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('compute_vm_size', options_list=['--compute-vm-size', '-s'])
c.argument('customer_admin_group_id', options_list=[
'--customer-admin-group-id'])
c.argument('workspace_id')
with self.argument_context('openshift monitor enable', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument(
'workspace_id', help='The resource ID of an existing Log Analytics Workspace to use for storing monitoring data.')
|
def load_arguments(self, _):
acr_arg_type = CLIArgumentType(metavar='ACR_NAME_OR_RESOURCE_ID')
# ACS command argument configuration
with self.argument_context('acs') as c:
c.argument('resource_name', name_type,
completer=get_resource_name_completion_list(
'Microsoft.ContainerService/ContainerServices'),
help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`')
c.argument('name', name_type,
completer=get_resource_name_completion_list(
'Microsoft.ContainerService/ContainerServices'),
help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`')
c.argument('container_service_name', name_type, help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'))
c.argument('admin_username', options_list=[
'--admin-username', '-u'], default='azureuser')
c.argument('api_version',
help=_get_feature_in_preview_message() + 'Use API version of ACS to perform az acs operations. Available options: 2017-01-31, 2017-07-01. Default: the latest version for the location')
c.argument('dns_name_prefix', options_list=['--dns-prefix', '-d'])
c.argument('orchestrator_type', get_enum_type(
orchestrator_types), options_list=['--orchestrator-type', '-t'])
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('tags', tags_type)
c.argument('disable_browser',
help='Do not open browser after opening a proxy to the cluster web user interface')
with self.argument_context('acs create') as c:
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('master_profile', options_list=['--master-profile', '-m'], type=validate_file_or_dict,
help=_get_feature_in_preview_message() + 'The file or dictionary representation of the master profile. Note it will override any master settings once set')
c.argument('master_vm_size', completer=get_vm_size_completion_list,
help=_get_feature_in_preview_message())
c.argument('agent_count', type=int)
c.argument('generate_ssh_keys', action='store_true', validator=validate_create_parameters,
help='Generate SSH public and private key files if missing')
c.argument('master_osdisk_size', type=int,
help=_get_feature_in_preview_message() + 'The disk size for master pool vms. Unit in GB. Default: corresponding vmsize disk size')
c.argument('master_vnet_subnet_id', type=str,
help=_get_feature_in_preview_message() + 'The custom vnet subnet id. Note agent need to used the same vnet if master set. Default: ""')
c.argument('master_first_consecutive_static_ip', type=str,
help=_get_feature_in_preview_message() + 'The first consecutive ip used to specify static ip block.')
c.argument('master_storage_profile', get_enum_type(storage_profile_types),
help=_get_feature_in_preview_message() + 'Default: varies based on Orchestrator')
c.argument('agent_profiles', options_list=['--agent-profiles', '-a'], type=validate_file_or_dict,
help=_get_feature_in_preview_message() + 'The file or dictionary representation of the agent profiles. Note it will override any agent settings once set')
c.argument('agent_vm_size', completer=get_vm_size_completion_list,
help='Set the default size for agent pools vms.')
c.argument('agent_osdisk_size', type=int,
help=_get_feature_in_preview_message() + 'Set the default disk size for agent pools vms. Unit in GB. Default: corresponding vmsize disk size')
c.argument('agent_vnet_subnet_id', type=str,
help=_get_feature_in_preview_message() + 'Set the default custom vnet subnet id for agent pools. Note agent need to used the same vnet if master set. Default: ""')
c.argument('agent_ports', type=validate_list_of_integers,
help=_get_feature_in_preview_message() + 'Set the default ports exposed on the agent pools. Only usable for non-Kubernetes. Default: 8080,4000,80')
c.argument('agent_storage_profile', get_enum_type(storage_profile_types),
help=_get_feature_in_preview_message() + 'Set default storage profile for agent pools. Default: varies based on Orchestrator')
c.argument('windows', action='store_true',
help='If true, set the default osType of agent pools to be Windows.')
c.argument('validate', action='store_true',
help='Generate and validate the ARM template without creating any resources')
c.argument('orchestrator_version', help=_get_feature_in_preview_message(
) + 'Use Orchestrator Version to specify the semantic version for your choice of orchestrator.')
with self.argument_context('acs scale') as c:
c.argument('new_agent_count', type=int)
for scope in ['dcos', 'kubernetes']:
with self.argument_context('acs {} browse'.format(scope)) as c:
c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter(), help='Path to an SSH key file to use.')
with self.argument_context('acs dcos install-cli') as c:
c.argument('install_location',
default=_get_default_install_location('dcos'))
with self.argument_context('acs kubernetes get-credentials') as c:
c.argument('path', options_list=['--file', '-f'])
c.argument('overwrite_existing', action='store_true',
help='If specified, overwrite any existing credentials.')
with self.argument_context('acs kubernetes install-cli') as c:
c.argument('install_location', type=file_type, completer=FilesCompleter(),
default=_get_default_install_location('kubectl'))
c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter(), help='Path to an SSH key file to use.')
# AKS command argument configuration
with self.argument_context('aks', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('resource_name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('kubernetes_version', options_list=[
'--kubernetes-version', '-k'], validator=validate_k8s_version)
c.argument('node_count', options_list=['--node-count', '-c'], type=int)
c.argument('tags', tags_type)
with self.argument_context('aks create', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('kubernetes_version',
completer=get_k8s_versions_completion_list)
c.argument('admin_username', options_list=[
'--admin-username', '-u'], default='azureuser')
c.argument('dns_name_prefix', options_list=['--dns-name-prefix', '-p'])
c.argument('generate_ssh_keys', action='store_true',
validator=validate_create_parameters)
c.argument('node_vm_size', options_list=[
'--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('nodepool_name', type=str, default='nodepool1',
help='Node pool name, up to 12 alphanumeric characters', validator=validate_nodepool_name)
c.argument('os_sku', completer=get_ossku_completion_list)
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('aad_client_app_id')
c.argument('aad_server_app_id')
c.argument('aad_server_app_secret')
c.argument('aad_tenant_id')
c.argument('dns_service_ip')
c.argument('docker_bridge_address')
c.argument('edge_zone', edge_zone_type)
c.argument('load_balancer_sku', type=str,
validator=validate_load_balancer_sku)
c.argument('load_balancer_managed_outbound_ip_count', type=int)
c.argument('load_balancer_outbound_ips', type=str,
validator=validate_load_balancer_outbound_ips)
c.argument('load_balancer_outbound_ip_prefixes', type=str,
validator=validate_load_balancer_outbound_ip_prefixes)
c.argument('load_balancer_outbound_ports', type=int,
validator=validate_load_balancer_outbound_ports)
c.argument('load_balancer_idle_timeout', type=int,
validator=validate_load_balancer_idle_timeout)
c.argument('outbound_type', arg_type=get_enum_type([CONST_OUTBOUND_TYPE_LOAD_BALANCER,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING]))
c.argument('enable_cluster_autoscaler', action='store_true')
c.argument('cluster_autoscaler_profile', nargs='+', options_list=["--cluster-autoscaler-profile", "--ca-profile"], validator=validate_cluster_autoscaler_profile,
help="Space-separated list of key=value pairs for configuring cluster autoscaler. Pass an empty string to clear the profile.")
c.argument('min_count', type=int, validator=validate_nodes_count)
c.argument('max_count', type=int, validator=validate_nodes_count)
c.argument('vm_set_type', type=str, validator=validate_vm_set_type)
c.argument('zones', zones_type, options_list=[
'--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.')
c.argument('uptime_sla', action='store_true')
c.argument('enable_addons', options_list=['--enable-addons', '-a'])
c.argument('disable_rbac', action='store_true')
c.argument('enable_rbac', action='store_true', options_list=['--enable-rbac', '-r'],
deprecate_info=c.deprecate(redirect="--disable-rbac", hide="2.0.45"))
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'])
c.argument('network_plugin', arg_type=get_enum_type(
['azure', 'kubenet']))
c.argument('network_policy')
c.argument('no_ssh_key', options_list=['--no-ssh-key', '-x'])
c.argument('pod_cidr')
c.argument('service_cidr')
c.argument('ppg', type=str, validator=validate_ppg)
c.argument('vnet_subnet_id', type=str,
validator=validate_vnet_subnet_id)
c.argument('workspace_resource_id')
c.argument('skip_subnet_role_assignment', action='store_true')
c.argument('api_server_authorized_ip_ranges',
type=str, validator=validate_ip_ranges)
c.argument('attach_acr', acr_arg_type)
c.argument('enable_private_cluster', action='store_true')
c.argument('private_dns_zone')
c.argument('fqdn_subdomain')
c.argument('disable_public_fqdn', action='store_true')
c.argument('nodepool_tags', nargs='*', validator=validate_nodepool_tags,
help='space-separated tags: key[=value] [key[=value] ...]. Use "" to clear existing tags.')
c.argument('enable_managed_identity', action='store_true')
c.argument('assign_identity', type=str,
validator=validate_assign_identity)
c.argument('nodepool_labels', nargs='*', validator=validate_nodepool_labels,
help='space-separated labels: key[=value] [key[=value] ...]. You can not change the node labels through CLI after creation. See https://aka.ms/node-labels for syntax of labels.')
c.argument('enable_node_public_ip', action='store_true')
c.argument('node_public_ip_prefix_id', type=str)
c.argument('windows_admin_username', options_list=[
'--windows-admin-username'])
c.argument('windows_admin_password', options_list=[
'--windows-admin-password'])
c.argument('enable_ahub', options_list=['--enable-ahub'])
c.argument('node_osdisk_diskencryptionset_id', type=str,
options_list=['--node-osdisk-diskencryptionset-id', '-d'])
c.argument('aci_subnet_name')
c.argument('enable_encryption_at_host', options_list=[
'--enable-encryption-at-host'], action='store_true')
c.argument('enable_ultra_ssd', options_list=[
'--enable-ultra-ssd'], action='store_true')
c.argument('appgw_name', options_list=[
'--appgw-name'], arg_group='Application Gateway')
c.argument('appgw_subnet_cidr', options_list=[
'--appgw-subnet-cidr'], arg_group='Application Gateway')
c.argument('appgw_id', options_list=[
'--appgw-id'], arg_group='Application Gateway')
c.argument('appgw_subnet_id', options_list=[
'--appgw-subnet-id'], arg_group='Application Gateway')
c.argument('appgw_watch_namespace', options_list=[
'--appgw-watch-namespace'], arg_group='Application Gateway')
c.argument('assign_kubelet_identity', validator=validate_assign_kubelet_identity)
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
c.argument('enable_sgxquotehelper', action='store_true')
with self.argument_context('aks update', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('attach_acr', acr_arg_type, validator=validate_acr)
c.argument('detach_acr', acr_arg_type, validator=validate_acr)
with self.argument_context('aks update') as c:
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('disable_cluster_autoscaler', options_list=[
"--disable-cluster-autoscaler", "-d"], action='store_true')
c.argument('update_cluster_autoscaler', options_list=[
"--update-cluster-autoscaler", "-u"], action='store_true')
c.argument('cluster_autoscaler_profile', nargs='+', options_list=["--cluster-autoscaler-profile", "--ca-profile"], validator=validate_cluster_autoscaler_profile,
help="Space-separated list of key=value pairs for configuring cluster autoscaler. Pass an empty string to clear the profile.")
c.argument('min_count', type=int, validator=validate_nodes_count)
c.argument('max_count', type=int, validator=validate_nodes_count)
c.argument('uptime_sla', action='store_true')
c.argument('no_uptime_sla', action='store_true')
c.argument('load_balancer_managed_outbound_ip_count', type=int)
c.argument('load_balancer_outbound_ips', type=str,
validator=validate_load_balancer_outbound_ips)
c.argument('load_balancer_outbound_ip_prefixes', type=str,
validator=validate_load_balancer_outbound_ip_prefixes)
c.argument('load_balancer_outbound_ports', type=int,
validator=validate_load_balancer_outbound_ports)
c.argument('load_balancer_idle_timeout', type=int,
validator=validate_load_balancer_idle_timeout)
c.argument('api_server_authorized_ip_ranges',
type=str, validator=validate_ip_ranges)
c.argument('enable_ahub', options_list=['--enable-ahub'])
c.argument('disable_ahub', options_list=['--disable-ahub'])
c.argument('enable_public_fqdn', action='store_true')
c.argument('disable_public_fqdn', action='store_true')
c.argument('windows_admin_password', options_list=[
'--windows-admin-password'])
c.argument('enable_managed_identity', action='store_true')
c.argument('assign_identity', type=str,
validator=validate_assign_identity)
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
with self.argument_context('aks disable-addons', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('addons', options_list=['--addons', '-a'])
with self.argument_context('aks enable-addons', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('addons', options_list=['--addons', '-a'])
c.argument('subnet_name', options_list=[
'--subnet-name', '-s'], help='Name of an existing subnet to use with the virtual-node add-on.')
c.argument('appgw_name', options_list=[
'--appgw-name'], arg_group='Application Gateway')
c.argument('appgw_subnet_cidr', options_list=[
'--appgw-subnet-cidr'], arg_group='Application Gateway')
c.argument('appgw_id', options_list=[
'--appgw-id'], arg_group='Application Gateway')
c.argument('appgw_subnet_id', options_list=[
'--appgw-subnet-id'], arg_group='Application Gateway')
c.argument('appgw_watch_namespace', options_list=[
'--appgw-watch-namespace'], arg_group='Application Gateway')
c.argument('enable_sgxquotehelper', action='store_true')
with self.argument_context('aks get-credentials', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('admin', options_list=['--admin', '-a'], default=False)
c.argument('context_name', options_list=['--context'],
help='If specified, overwrite the default context name.')
c.argument('path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
default=os.path.join(os.path.expanduser('~'), '.kube', 'config'))
c.argument('public_fqdn', default=False, action='store_true')
for scope in ['aks', 'acs kubernetes', 'acs dcos']:
with self.argument_context('{} install-cli'.format(scope)) as c:
c.argument('client_version', validator=validate_kubectl_version,
help='Version of kubectl to install.')
c.argument('install_location', default=_get_default_install_location(
'kubectl'), help='Path at which to install kubectl.')
c.argument('base_src_url',
help='Base download source URL for kubectl releases.')
c.argument('kubelogin_version', validator=validate_kubelogin_version,
help='Version of kubelogin to install.')
c.argument('kubelogin_install_location', default=_get_default_install_location(
'kubelogin'), help='Path at which to install kubelogin.')
c.argument('kubelogin_base_src_url', options_list=[
'--kubelogin-base-src-url', '-l'], help='Base download source URL for kubelogin releases.')
with self.argument_context('aks update-credentials', arg_group='Service Principal') as c:
c.argument('reset_service_principal', action='store_true')
c.argument('service_principal')
c.argument('client_secret')
with self.argument_context('aks update-credentials', arg_group='AAD') as c:
c.argument('reset_aad', action='store_true')
c.argument('aad_client_app_id')
c.argument('aad_server_app_id')
c.argument('aad_server_app_secret')
c.argument('aad_tenant_id')
with self.argument_context('aks upgrade', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('kubernetes_version',
completer=get_k8s_upgrades_completion_list)
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
with self.argument_context('aks scale', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('nodepool_name', type=str,
help='Node pool name, up to 12 alphanumeric characters', validator=validate_nodepool_name)
with self.argument_context('aks nodepool', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('cluster_name', type=str, help='The cluster name.')
for scope in ['aks nodepool add']:
with self.argument_context(scope) as c:
c.argument('nodepool_name', type=str, options_list=[
'--name', '-n'], validator=validate_nodepool_name, help='The node pool name.')
c.argument('zones', zones_type, options_list=[
'--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.')
c.argument('node_vm_size', options_list=[
'--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'])
c.argument('os_type', type=str)
c.argument('os_sku', type=str, options_list=['--os-sku'], completer=get_ossku_completion_list)
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('node_taints', type=str, validator=validate_taints)
c.argument('priority', arg_type=get_enum_type(
[CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT]), validator=validate_priority)
c.argument('eviction_policy', arg_type=get_enum_type(
[CONST_SPOT_EVICTION_POLICY_DELETE, CONST_SPOT_EVICTION_POLICY_DEALLOCATE]), validator=validate_eviction_policy)
c.argument('spot_max_price', type=float,
validator=validate_spot_max_price)
c.argument('tags', tags_type)
c.argument('labels', nargs='*', validator=validate_nodepool_labels)
c.argument('mode', get_enum_type(nodepool_mode_type))
c.argument('enable_node_public_ip', action='store_true')
c.argument('node_public_ip_prefix_id', type=str)
c.argument('ppg', type=str, validator=validate_ppg)
c.argument('max_surge', type=str, validator=validate_max_surge)
c.argument('node_os_disk_type', arg_type=get_enum_type(
[CONST_OS_DISK_TYPE_MANAGED, CONST_OS_DISK_TYPE_EPHEMERAL]))
c.argument('enable_encryption_at_host', options_list=[
'--enable-encryption-at-host'], action='store_true')
c.argument('enable_ultra_ssd', options_list=[
'--enable-ultra-ssd'], action='store_true')
for scope in ['aks nodepool show', 'aks nodepool delete', 'aks nodepool scale', 'aks nodepool upgrade', 'aks nodepool update']:
with self.argument_context(scope) as c:
c.argument('nodepool_name', type=str, options_list=[
'--name', '-n'], validator=validate_nodepool_name, help='The node pool name.')
with self.argument_context('aks nodepool update', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='agent_pools') as c:
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('disable_cluster_autoscaler', options_list=[
"--disable-cluster-autoscaler", "-d"], action='store_true')
c.argument('update_cluster_autoscaler', options_list=[
"--update-cluster-autoscaler", "-u"], action='store_true')
c.argument('tags', tags_type)
c.argument('mode', get_enum_type(nodepool_mode_type))
c.argument('max_surge', type=str, validator=validate_max_surge)
with self.argument_context('aks command invoke') as c:
c.argument('command_string', type=str, options_list=[
"--command", "-c"], help='the command to run')
c.argument('command_files', options_list=["--file", "-f"], required=False, action="append",
help='attach any files the command may use, or use \'.\' to upload the current folder.')
with self.argument_context('aks command result') as c:
c.argument('command_id', type=str, options_list=[
"--command-id", "-i"], help='the command ID from "aks command invoke"')
with self.argument_context('aks use-dev-spaces') as c:
c.argument('update', options_list=['--update'], action='store_true')
c.argument('space_name', options_list=['--space', '-s'])
c.argument('endpoint_type', get_enum_type(
['Public', 'Private', 'None'], default='Public'), options_list=['--endpoint', '-e'])
c.argument('prompt', options_list=[
'--yes', '-y'], action='store_true', help='Do not prompt for confirmation. Requires --space.')
with self.argument_context('aks remove-dev-spaces') as c:
c.argument('prompt', options_list=[
'--yes', '-y'], action='store_true', help='Do not prompt for confirmation')
# OpenShift command argument configuration
with self.argument_context('openshift', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument('resource_name', name_type, help='Name of the managed OpenShift cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters'))
c.argument('name', name_type, help='Name of the managed OpenShift cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters'))
c.argument('compute_count', options_list=[
'--compute-count', '-c'], type=int, default=4)
c.argument('tags', tags_type)
with self.argument_context('openshift create', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('compute_vm_size', options_list=['--compute-vm-size', '-s'])
c.argument('customer_admin_group_id', options_list=[
'--customer-admin-group-id'])
c.argument('workspace_id')
with self.argument_context('openshift monitor enable', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument(
'workspace_id', help='The resource ID of an existing Log Analytics Workspace to use for storing monitoring data.')
|
44,300 |
def taper_excitations(generators, paulixops, paulix_sector, singles, doubles):
r"""Transform excitations with a Clifford operator and taper qubits.
The qubit operators for single and double excitations are first generated using the generators of
:func:`~.SingleExcitation` and :func:`~.DoubleExcitation` operations. Each of these operators that commutes
with all :math:`\mathbb{Z}_2` symmetries of the molecular Hamiltonian are then tranformed using the
Clifford operators :math:`U` and then tapered, while rest of the other non-commuting operators are discarded.
These new tapered excitation operators can be exponentiated using :func:`~.PauliRot` for building a
tapered UCCSD-like circuit ansatze.
Args:
generators (list[Hamiltonian]): list of generators of symmetries, taus, for the Hamiltonian
paulixops (list[Operation]): list of single-qubit Pauli-X operators
paulix_sector (list[int]): list of eigenvalues of Pauli-X operators
singles (list(list(int))): list with the indices `r`, `p` of the two qubits representing the single excitation :math:`\vert r, p \rangle = \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}\rangle`
doubles (list(list(int))): list with the indices `s`, `r`, `q`, `p` of the four qubits representing the double excitation :math:`\vert s, r, q, p \rangle = \hat{c}_p^\dagger \hat{c}_q^\dagger \hat{c}_r \hat{c}_s \vert \mathrm{HF}\rangle`
Returns:
tuple(list, list): tapered single and double excitation operators
**Example**
>>> symbols = ['He', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.4588684632]])
>>> mol = qml.qchem.Molecule(symbols, geometry, charge=1)
>>> H, n_qubits = qml.qchem.molecular_hamiltonian(symbols, geometry)
>>> n_elec = mol.n_electrons
>>> generators = qml.qchem.symmetry_generators(H)
>>> paulixops = qml.qchem.paulix_ops(generators, 4)
>>> paulix_sector = qml.qchem.optimal_sector(H, generators, n_elec)
>>> singles, doubles = qml.qchem.excitations(n_elec, n_qubits)
>>> singles_tap, doubles_tap = taper_excitations(generators, paulixops,
paulix_sector, singles, doubles)
>>> print(singles_tap[0], doubles_tap[0])
((0.5+0j)) [Y0]
((-0.25+0j)) [X0 Y1] + ((-0.25+0j)) [Y0 X1]
"""
singles_tapered, doubles_tapered = [], []
for excitation in singles:
hamil_gen = qml.SingleExcitation(1, wires=excitation).generator()
if np.all([_is_commuting_obs(generator, hamil_gen) for generator in generators]):
excitation_tapered_op = qml.taper(hamil_gen, generators, paulixops, paulix_sector)
qml.simplify(excitation_tapered_op)
singles_tapered.append(excitation_tapered_op)
for excitation in doubles:
hamil_gen = qml.DoubleExcitation(1, wires=excitation).generator()
if np.all([_is_commuting_obs(generator, hamil_gen) for generator in generators]):
excitation_tapered_op = qml.taper(hamil_gen, generators, paulixops, paulix_sector)
qml.simplify(excitation_tapered_op)
doubles_tapered.append(excitation_tapered_op)
return singles_tapered, doubles_tapered
|
def taper_excitations(generators, paulixops, paulix_sector, singles, doubles):
r"""Transform excitations with a Clifford operator and taper qubits.
The qubit operators for single and double excitations are first generated using the generators of
:func:`~.SingleExcitation` and :func:`~.DoubleExcitation` operations. Each of these operators that commutes
with all :math:`\mathbb{Z}_2` symmetries of the molecular Hamiltonian are then tranformed using the
Clifford operators :math:`U` and then tapered, while rest of the other non-commuting operators are discarded.
These new tapered excitation operators can be exponentiated using :func:`~.PauliRot` for building a
tapered UCCSD-like circuit ansatze.
Args:
generators (list[Hamiltonian]): list of generators of symmetries, taus, for the Hamiltonian
paulixops (list[Operation]): list of single-qubit Pauli-X operators
paulix_sector (list[int]): list of eigenvalues of Pauli-X operators
singles (list(list(int))): list with the indices `r`, `p` of the two qubits representing the single excitation :math:`\vert r, p \rangle = \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}\rangle`
doubles (list(list(int))): list with the indices `s`, `r`, `q`, `p` of the four qubits representing the double excitation
Returns:
tuple(list, list): tapered single and double excitation operators
**Example**
>>> symbols = ['He', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.4588684632]])
>>> mol = qml.qchem.Molecule(symbols, geometry, charge=1)
>>> H, n_qubits = qml.qchem.molecular_hamiltonian(symbols, geometry)
>>> n_elec = mol.n_electrons
>>> generators = qml.qchem.symmetry_generators(H)
>>> paulixops = qml.qchem.paulix_ops(generators, 4)
>>> paulix_sector = qml.qchem.optimal_sector(H, generators, n_elec)
>>> singles, doubles = qml.qchem.excitations(n_elec, n_qubits)
>>> singles_tap, doubles_tap = taper_excitations(generators, paulixops,
paulix_sector, singles, doubles)
>>> print(singles_tap[0], doubles_tap[0])
((0.5+0j)) [Y0]
((-0.25+0j)) [X0 Y1] + ((-0.25+0j)) [Y0 X1]
"""
singles_tapered, doubles_tapered = [], []
for excitation in singles:
hamil_gen = qml.SingleExcitation(1, wires=excitation).generator()
if np.all([_is_commuting_obs(generator, hamil_gen) for generator in generators]):
excitation_tapered_op = qml.taper(hamil_gen, generators, paulixops, paulix_sector)
qml.simplify(excitation_tapered_op)
singles_tapered.append(excitation_tapered_op)
for excitation in doubles:
hamil_gen = qml.DoubleExcitation(1, wires=excitation).generator()
if np.all([_is_commuting_obs(generator, hamil_gen) for generator in generators]):
excitation_tapered_op = qml.taper(hamil_gen, generators, paulixops, paulix_sector)
qml.simplify(excitation_tapered_op)
doubles_tapered.append(excitation_tapered_op)
return singles_tapered, doubles_tapered
|
44,763 |
def param_logger(fn, args=[], kwargs=None, unlogged=[]):
"""
Log parameters explicitly passed to a function.
:param fn: function whose parameters are to be logged
:param args: arguments explicitly passed into fn
:param kwargs: kwargs explicitly passed into fn
:param unlogged: parameters not to be logged
:return: None
"""
# Names of all parameters for the function
all_param_names = inspect.getargspec(fn)[0]
# Default values of all parameters with default values. Has length of n, and corresponds
# to values of last n elements in all_param_names
all_default_values = inspect.getargspec(fn)[3]
# Checking if default values are present for logging. Known bug that getargspec will return an
# empty argspec for certain functions, despite the functions having an argspec.
if all_default_values:
# Removing the first len(args) elements from all_param_names - these are values already
# passed in explicitly by the user and don't need to be logged with default values.
kwargs_and_default_names = all_param_names[len(args):]
# If there are more parameter names than default values left, we know that the parameters
# not covered by the default values are passed in as kwargs (assuming all non-default
# parameters are passed to the function)
if len(kwargs_and_default_names) > len(all_default_values):
kwargs_and_default_names = kwargs_and_default_names[len(kwargs_and_default_names)
- len(all_default_values):]
# Otherwise, if there are more default values than parameter names, we know that some of the
# parameters with default values have been entered by the user in args
elif len(kwargs_and_default_names) < len(all_default_values):
all_default_values = all_default_values[len(all_default_values)
- len(kwargs_and_default_names):]
default_params = zip(kwargs_and_default_names, all_default_values)
# Filtering out the parameters that have been passed in by the user as a kwarg.
default_params_to_be_logged = []
for param in default_params:
if param[0] not in kwargs:
default_params_to_be_logged += [param]
for param in default_params_to_be_logged:
if param[0] not in unlogged:
try_mlflow_log(mlflow.log_param, param[0], param[1])
# List of tuples of parameter names and args that are passed by the user
params_list = zip(inspect.getargspec(fn)[0][:len(args)], args)
for param in params_list:
if param[0] not in unlogged:
try_mlflow_log(mlflow.log_param, param[0], param[1])
for param_name in kwargs:
if param_name not in unlogged:
try_mlflow_log(mlflow.log_param, param_name, kwargs[param_name])
|
def log_fn_args_as_params(fn, args=[], kwargs=None, unlogged=[]):
"""
Log parameters explicitly passed to a function.
:param fn: function whose parameters are to be logged
:param args: arguments explicitly passed into fn
:param kwargs: kwargs explicitly passed into fn
:param unlogged: parameters not to be logged
:return: None
"""
# Names of all parameters for the function
all_param_names = inspect.getargspec(fn)[0]
# Default values of all parameters with default values. Has length of n, and corresponds
# to values of last n elements in all_param_names
all_default_values = inspect.getargspec(fn)[3]
# Checking if default values are present for logging. Known bug that getargspec will return an
# empty argspec for certain functions, despite the functions having an argspec.
if all_default_values:
# Removing the first len(args) elements from all_param_names - these are values already
# passed in explicitly by the user and don't need to be logged with default values.
kwargs_and_default_names = all_param_names[len(args):]
# If there are more parameter names than default values left, we know that the parameters
# not covered by the default values are passed in as kwargs (assuming all non-default
# parameters are passed to the function)
if len(kwargs_and_default_names) > len(all_default_values):
kwargs_and_default_names = kwargs_and_default_names[len(kwargs_and_default_names)
- len(all_default_values):]
# Otherwise, if there are more default values than parameter names, we know that some of the
# parameters with default values have been entered by the user in args
elif len(kwargs_and_default_names) < len(all_default_values):
all_default_values = all_default_values[len(all_default_values)
- len(kwargs_and_default_names):]
default_params = zip(kwargs_and_default_names, all_default_values)
# Filtering out the parameters that have been passed in by the user as a kwarg.
default_params_to_be_logged = []
for param in default_params:
if param[0] not in kwargs:
default_params_to_be_logged += [param]
for param in default_params_to_be_logged:
if param[0] not in unlogged:
try_mlflow_log(mlflow.log_param, param[0], param[1])
# List of tuples of parameter names and args that are passed by the user
params_list = zip(inspect.getargspec(fn)[0][:len(args)], args)
for param in params_list:
if param[0] not in unlogged:
try_mlflow_log(mlflow.log_param, param[0], param[1])
for param_name in kwargs:
if param_name not in unlogged:
try_mlflow_log(mlflow.log_param, param_name, kwargs[param_name])
|
26,397 |
def generate_config(context):
# Using some global values from an external config file.
# Hardcoded for this example.
global_prefic = "acc "
# Manipulate context.properties #
for folder in context.properties["folders"]:
folder["displayName"] = global_prefic + folder["displayName"]
# Passing values forward to CFT template
return {
'resources': [{
'type': "cft-folder.py",
'name': context.env['name'],
'properties': context.properties}]
}
|
def generate_config(context):
# Using some global values from an external config file.
# Hardcoded for this example.
global_prefix = "acc "
# Manipulate context.properties #
for folder in context.properties["folders"]:
folder["displayName"] = global_prefic + folder["displayName"]
# Passing values forward to CFT template
return {
'resources': [{
'type': "cft-folder.py",
'name': context.env['name'],
'properties': context.properties}]
}
|
42,982 |
def apply_gate_BLAS(mat, state, pure, modes, n, trunc):
"""
Gate application based on custom indexing and matrix multiplication.
Assumes the input matrix has shape (out1, in1, ...).
This implementation uses indexing and BLAS. As per stack overflow,
einsum doesn't actually use BLAS but rather a c implementation. In theory
if reshaping is efficient this should be faster.
"""
size = len(modes)
dim = trunc**size
stshape = [trunc for i in range(size)]
# Apply the following matrix transposition:
# |m1><m1| |m2><m2| ... |mn><mn| -> |m1>|m2>...|mn><m1|<m2|...<mn|
transpose_list = [2*i for i in range(size)] + [2*i + 1 for i in range(size)]
matview = np.transpose(mat, transpose_list).reshape((dim, dim))
if pure:
if n == 1:
return np.dot(mat, state)
# Transpose the state into the following form:
# |psi> |mode[0]> |mode[1]> ... |mode[n]>
transpose_list = [i for i in range(n) if not i in modes] + modes
view = np.transpose(state, transpose_list)
# Apply matrix to each substate
ret = np.zeros([trunc for i in range(n)], dtype=def_type)
for i in product(*([range(trunc) for j in range(n - size)])):
ret[i] = np.dot(matview, view[i].ravel()).reshape(stshape)
# "untranspose" the return matrix ret
untranspose_list = [0] * len(transpose_list)
for i in range(len(transpose_list)): # pylint: disable=consider-using-enumerate
untranspose_list[transpose_list[i]] = i
return np.transpose(ret, untranspose_list)
# otherwise, if state is mixed
if n == 1:
return np.dot(mat, np.dot(state, dagger(mat)))
# Transpose the state into the following form:
# |psi><psi||mode[0]>|mode[1]>...|mode[n]><mode[0]|<mode[1]|...<mode[n]|
transpose_list = [i for i in range(n*2) if not i//2 in modes]
transpose_list = transpose_list + [2*i for i in modes] + [2*i + 1 for i in modes]
view = np.transpose(state, transpose_list)
# Apply matrix to each substate
ret = np.zeros([trunc for i in range(n*2)], dtype=def_type)
for i in product(*([range(trunc) for j in range((n - size)*2)])):
ret[i] = np.dot(matview, np.dot(view[i].reshape((dim, dim)), dagger(matview))).reshape(stshape + stshape)
# "untranspose" the return matrix ret
untranspose_list = [0] * len(transpose_list)
for i in range(len(transpose_list)): # pylint: disable=consider-using-enumerate
untranspose_list[transpose_list[i]] = i
return np.transpose(ret, untranspose_list)
|
def apply_gate_BLAS(mat, state, pure, modes, n, trunc):
"""
Gate application based on custom indexing and matrix multiplication.
Assumes the input matrix has shape (out1, in1, ...).
This implementation uses indexing and BLAS. As per stack overflow,
einsum doesn't actually use BLAS but rather a c implementation. In theory
if reshaping is efficient this should be faster.
"""
size = len(modes)
dim = trunc**size
stshape = [trunc for i in range(size)]
# Apply the following matrix transposition:
# |m1><m1| |m2><m2| ... |mn><mn| -> |m1>|m2>...|mn><m1|<m2|...<mn|
transpose_list = [2*i for i in range(size)] + [2*i + 1 for i in range(size)]
matview = np.transpose(mat, transpose_list).reshape((dim, dim))
if pure:
if n == 1:
return np.dot(mat, state)
# Transpose the state into the following form:
# |psi> |mode[0]> |mode[1]> ... |mode[n]>
transpose_list = [i for i in range(n) if not i in modes] + modes
view = np.transpose(state, transpose_list)
# Apply matrix to each substate
ret = np.zeros([trunc for i in range(n)], dtype=def_type)
for i in product(*([range(trunc) for j in range(n - size)])):
ret[i] = np.dot(matview, view[i].ravel()).reshape(stshape)
# "untranspose" the return matrix ret
untranspose_list = [0] * len(transpose_list)
for i in range(len(transpose_list)): # pylint: disable=consider-using-enumerate
untranspose_list[transpose_list[i]] = i
return np.transpose(ret, untranspose_list)
# otherwise, the state is mixed
if n == 1:
return np.dot(mat, np.dot(state, dagger(mat)))
# Transpose the state into the following form:
# |psi><psi||mode[0]>|mode[1]>...|mode[n]><mode[0]|<mode[1]|...<mode[n]|
transpose_list = [i for i in range(n*2) if not i//2 in modes]
transpose_list = transpose_list + [2*i for i in modes] + [2*i + 1 for i in modes]
view = np.transpose(state, transpose_list)
# Apply matrix to each substate
ret = np.zeros([trunc for i in range(n*2)], dtype=def_type)
for i in product(*([range(trunc) for j in range((n - size)*2)])):
ret[i] = np.dot(matview, np.dot(view[i].reshape((dim, dim)), dagger(matview))).reshape(stshape + stshape)
# "untranspose" the return matrix ret
untranspose_list = [0] * len(transpose_list)
for i in range(len(transpose_list)): # pylint: disable=consider-using-enumerate
untranspose_list[transpose_list[i]] = i
return np.transpose(ret, untranspose_list)
|
40,372 |
def bipartite_subgraph(subset: Union[PairTensor, Tuple[List[int], List[int]]],
edge_index: Tensor, edge_attr: Optional[Tensor] = None,
relabel_nodes: bool = False,
num_nodes: Tuple[int, int] = None,
return_edge_mask: bool = False):
r"""Returns the induced subgraph of :obj:`(edge_index, edge_attr)`
containing the nodes in :obj:`subset`, for a bipartite graph.
Args:
subset (PairTensor or tuple([int],[int])): The nodes to keep.
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
relabel_nodes (bool, optional): If set to :obj:`True`, the resulting
:obj:`edge_index` will be relabeled to hold consecutive indices
starting from zero. (default: :obj:`False`)
num_nodes (tuple, optional): The number of nodes.
(default: :obj:`None`)
return_edge_mask (bool, optional): If set to :obj:`True`, will return
the edge mask to filter out additional edge features.
(default: :obj:`False`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
device = edge_index.device
if isinstance(subset[0], (list, tuple)):
subset = (torch.tensor(subset[0], dtype=torch.long, device=device),
torch.tensor(subset[1], dtype=torch.long, device=device))
if subset[0].dtype == torch.bool or subset[0].dtype == torch.uint8:
num_nodes = subset[0].size(0), subset[1].size(0)
else:
if num_nodes is None:
num_nodes = (maybe_num_nodes(edge_index[0]),
maybe_num_nodes(edge_index[1]))
subset = (index_to_mask(subset[0], size=num_nodes[0]),
index_to_mask(subset[1], size=num_nodes[1]))
node_mask_i, node_mask_j = subset[0], subset[1]
edge_mask = node_mask_i[edge_index[0]] & node_mask_j[edge_index[1]]
edge_index = edge_index[:, edge_mask]
edge_attr = edge_attr[edge_mask] if edge_attr is not None else None
if relabel_nodes:
node_idx_i = torch.zeros(node_mask_i.size(0), dtype=torch.long,
device=device)
node_idx_j = torch.zeros(node_mask_j.size(0), dtype=torch.long,
device=device)
node_idx_i[subset[0]] = torch.arange(subset[0].sum().item(),
device=device)
node_idx_j[subset[1]] = torch.arange(subset[1].sum().item(),
device=device)
edge_index = torch.stack(
[node_idx_i[edge_index[0]], node_idx_j[edge_index[1]]])
if return_edge_mask:
return edge_index, edge_attr, edge_mask
else:
return edge_index, edge_attr
|
def bipartite_subgraph(subset: Union[PairTensor, Tuple[List[int], List[int]]],
edge_index: Tensor, edge_attr: Optional[Tensor] = None,
relabel_nodes: bool = False,
num_nodes: Tuple[int, int] = None,
return_edge_mask: bool = False):
r"""Returns the induced subgraph of :obj:`(edge_index, edge_attr)`
containing the nodes in :obj:`subset`.
Args:
subset (PairTensor or tuple([int],[int])): The nodes to keep.
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
relabel_nodes (bool, optional): If set to :obj:`True`, the resulting
:obj:`edge_index` will be relabeled to hold consecutive indices
starting from zero. (default: :obj:`False`)
num_nodes (tuple, optional): The number of nodes.
(default: :obj:`None`)
return_edge_mask (bool, optional): If set to :obj:`True`, will return
the edge mask to filter out additional edge features.
(default: :obj:`False`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
device = edge_index.device
if isinstance(subset[0], (list, tuple)):
subset = (torch.tensor(subset[0], dtype=torch.long, device=device),
torch.tensor(subset[1], dtype=torch.long, device=device))
if subset[0].dtype == torch.bool or subset[0].dtype == torch.uint8:
num_nodes = subset[0].size(0), subset[1].size(0)
else:
if num_nodes is None:
num_nodes = (maybe_num_nodes(edge_index[0]),
maybe_num_nodes(edge_index[1]))
subset = (index_to_mask(subset[0], size=num_nodes[0]),
index_to_mask(subset[1], size=num_nodes[1]))
node_mask_i, node_mask_j = subset[0], subset[1]
edge_mask = node_mask_i[edge_index[0]] & node_mask_j[edge_index[1]]
edge_index = edge_index[:, edge_mask]
edge_attr = edge_attr[edge_mask] if edge_attr is not None else None
if relabel_nodes:
node_idx_i = torch.zeros(node_mask_i.size(0), dtype=torch.long,
device=device)
node_idx_j = torch.zeros(node_mask_j.size(0), dtype=torch.long,
device=device)
node_idx_i[subset[0]] = torch.arange(subset[0].sum().item(),
device=device)
node_idx_j[subset[1]] = torch.arange(subset[1].sum().item(),
device=device)
edge_index = torch.stack(
[node_idx_i[edge_index[0]], node_idx_j[edge_index[1]]])
if return_edge_mask:
return edge_index, edge_attr, edge_mask
else:
return edge_index, edge_attr
|
28,254 |
def _parse_output_bool(numeric_value: Union[int, float]) -> bool:
""" Parses and converts the value to boolean type. True is 1.
Args:
numeric_value: The numerical value to convert.
Returns:
The boolean representation of the numeric value.
"""
return bool(numeric_value)
|
def _parse_output_bool(numeric_value: float) -> bool:
""" Parses and converts the value to boolean type. True is 1.
Args:
numeric_value: The numerical value to convert.
Returns:
The boolean representation of the numeric value.
"""
return bool(numeric_value)
|
22,625 |
def seq2col(X, nW, lens=None, out=None, threads_per_block=128, num_blocks=128):
B = X.shape[0]
nF = nW * 2 + 1
I = X.shape[1]
assert X.dtype == "float32", "CUDA seq2col kernel can only handle float32"
lens = check_seq2col_lens(lens, B)
nL = lens.shape[0]
if out is None:
out = cupy.zeros((B, I * nF), dtype="f")
seq2col_kernel((num_blocks,), (threads_per_block,), (out, X, lens, nW, B, I, nL))
return out
|
def seq2col(X, nW, *, lens=None, out=None, threads_per_block=128, num_blocks=128):
B = X.shape[0]
nF = nW * 2 + 1
I = X.shape[1]
assert X.dtype == "float32", "CUDA seq2col kernel can only handle float32"
lens = check_seq2col_lens(lens, B)
nL = lens.shape[0]
if out is None:
out = cupy.zeros((B, I * nF), dtype="f")
seq2col_kernel((num_blocks,), (threads_per_block,), (out, X, lens, nW, B, I, nL))
return out
|
5,761 |
def zpk2sos(z, p, k, pairing=None, analog=False):
"""Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional
The method to use to combine pairs of poles and zeros into sections.
If analog is False and pairing is None, pairing is set to 'nearest';
if analog is True, pairing must be 'minimal', and is set to that if
it is None.
analog : bool, optional
If True, system is analog, otherwise discrete.
.. versionadded:: 1.8.0
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle for discrete-time systems, and
poles closest to the imaginary axis for continuous-time systems.
``pairing='minimal'`` outputs may not be suitable for `sosfilt`,
and ``analog=True`` outputs will never be suitable for `sosfilt`.
*Algorithms*
The steps in the ``pairing='nearest'``, ``pairing='keep_odd'``,
and ``pairing='minimal'`` algorithms are mostly shared. The
``'nearest'`` algorithm attempts to minimize the peak gain, while
``'keep_odd'`` minimizes peak gain under the constraint that
odd-order systems should retain one section as first order.
``'minimal'`` is similar to ``'keep_odd'``, but no additional
poles or zeros are introduced
The algorithm steps are as follows:
As a pre-processing step for ``pairing='nearest'``,
``pairing='keep_odd'``, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for
pairing. If ``pairing == 'nearest'`` and there are an odd number
of poles, add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle (or imaginary axis, for ``analog=True``) to
begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing = 'keep_odd'`` or ``'minimal'`` methods.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert
to SOS format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
With ``pairing='minimal'``, the first-order section doesn't have
the extra pole and zero at the origin:
>>> signal.zpk2sos(z1, p1, 1, pairing='minimal')
array([[ 0. , 1. , 1. , 0. , 1. , -0.75],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
if pairing is None:
pairing = 'minimal' if analog else 'nearest'
valid_pairings = ['nearest', 'keep_odd', 'minimal']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if analog and pairing != 'minimal':
raise ValueError('for analog zpk2sos conversion, '
+ 'pairing must be "minimal"')
if len(z) == len(p) == 0:
if not analog:
return np.array([[k, 0., 0., 1., 0., 0.]])
else:
return np.array([[0., 0., k, 0., 0., 1.]])
if pairing != 'minimal':
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
else:
if len(p) < len(z):
raise ValueError('for analog zpk2sos conversion, '
+ 'must have len(p)>=len(z)')
n_sections = (len(p) + 1) // 2
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
if not np.isreal(k):
raise ValueError('k must be real')
k = k.real
if not analog:
# digital: "worst" is the closest to the unit circle
def idx_worst(p):
return np.argmin(np.abs(1 - np.abs(p)))
else:
# analog: "worst" is the closest to the imaginary axis
def idx_worst(p):
return np.argmin(np.abs(np.real(p)))
sos = np.zeros((n_sections, 6))
# Construct the system, reversing order so the "worst" are last
for si in range(n_sections-1, -1, -1):
# Select the next "worst" pole
p1_idx = idx_worst(p)
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case (1): last remaining real pole
if pairing != 'minimal':
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1, 0], [p1, 0], 1)
elif len(z) > 0:
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1], [p1], 1)
else:
sos[si] = _single_zpksos([], [p1], 1)
elif (len(p) + 1 == len(z)
and not np.isreal(p1)
and np.isreal(p).sum() == 1
and np.isreal(z).sum() == 1):
# Special case (2): there's one real pole and one real zero
# left, and an equal number of poles and zeros to pair up.
# We *must* pair with a complex zero
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1, z1.conj()], [p1, p1.conj()], 1)
else:
if np.isreal(p1):
prealidx = np.flatnonzero(np.isreal(p))
p2_idx = prealidx[idx_worst(p[prealidx])]
p2 = p[p2_idx]
p = np.delete(p, p2_idx)
else:
p2 = p1.conj()
# find closest zero
if len(z) > 0:
z1_idx = _nearest_real_complex_idx(z, p1, 'any')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
if not np.isreal(z1):
sos[si] = _single_zpksos([z1, z1.conj()], [p1, p2], 1)
else:
if len(z) > 0:
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
sos[si] = _single_zpksos([z1, z2], [p1, p2], 1)
else:
sos[si] = _single_zpksos([z1], [p1, p2], 1)
else:
# no more zeros
sos[si] = _single_zpksos([], [p1, p2], 1)
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# put gain in first sos
sos[0][:3] *= k
return sos
|
def zpk2sos(z, p, k, pairing=None, *, analog=False):
"""Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional
The method to use to combine pairs of poles and zeros into sections.
If analog is False and pairing is None, pairing is set to 'nearest';
if analog is True, pairing must be 'minimal', and is set to that if
it is None.
analog : bool, optional
If True, system is analog, otherwise discrete.
.. versionadded:: 1.8.0
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle for discrete-time systems, and
poles closest to the imaginary axis for continuous-time systems.
``pairing='minimal'`` outputs may not be suitable for `sosfilt`,
and ``analog=True`` outputs will never be suitable for `sosfilt`.
*Algorithms*
The steps in the ``pairing='nearest'``, ``pairing='keep_odd'``,
and ``pairing='minimal'`` algorithms are mostly shared. The
``'nearest'`` algorithm attempts to minimize the peak gain, while
``'keep_odd'`` minimizes peak gain under the constraint that
odd-order systems should retain one section as first order.
``'minimal'`` is similar to ``'keep_odd'``, but no additional
poles or zeros are introduced
The algorithm steps are as follows:
As a pre-processing step for ``pairing='nearest'``,
``pairing='keep_odd'``, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for
pairing. If ``pairing == 'nearest'`` and there are an odd number
of poles, add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle (or imaginary axis, for ``analog=True``) to
begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing = 'keep_odd'`` or ``'minimal'`` methods.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert
to SOS format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
With ``pairing='minimal'``, the first-order section doesn't have
the extra pole and zero at the origin:
>>> signal.zpk2sos(z1, p1, 1, pairing='minimal')
array([[ 0. , 1. , 1. , 0. , 1. , -0.75],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
if pairing is None:
pairing = 'minimal' if analog else 'nearest'
valid_pairings = ['nearest', 'keep_odd', 'minimal']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if analog and pairing != 'minimal':
raise ValueError('for analog zpk2sos conversion, '
+ 'pairing must be "minimal"')
if len(z) == len(p) == 0:
if not analog:
return np.array([[k, 0., 0., 1., 0., 0.]])
else:
return np.array([[0., 0., k, 0., 0., 1.]])
if pairing != 'minimal':
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
else:
if len(p) < len(z):
raise ValueError('for analog zpk2sos conversion, '
+ 'must have len(p)>=len(z)')
n_sections = (len(p) + 1) // 2
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
if not np.isreal(k):
raise ValueError('k must be real')
k = k.real
if not analog:
# digital: "worst" is the closest to the unit circle
def idx_worst(p):
return np.argmin(np.abs(1 - np.abs(p)))
else:
# analog: "worst" is the closest to the imaginary axis
def idx_worst(p):
return np.argmin(np.abs(np.real(p)))
sos = np.zeros((n_sections, 6))
# Construct the system, reversing order so the "worst" are last
for si in range(n_sections-1, -1, -1):
# Select the next "worst" pole
p1_idx = idx_worst(p)
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case (1): last remaining real pole
if pairing != 'minimal':
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1, 0], [p1, 0], 1)
elif len(z) > 0:
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1], [p1], 1)
else:
sos[si] = _single_zpksos([], [p1], 1)
elif (len(p) + 1 == len(z)
and not np.isreal(p1)
and np.isreal(p).sum() == 1
and np.isreal(z).sum() == 1):
# Special case (2): there's one real pole and one real zero
# left, and an equal number of poles and zeros to pair up.
# We *must* pair with a complex zero
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1, z1.conj()], [p1, p1.conj()], 1)
else:
if np.isreal(p1):
prealidx = np.flatnonzero(np.isreal(p))
p2_idx = prealidx[idx_worst(p[prealidx])]
p2 = p[p2_idx]
p = np.delete(p, p2_idx)
else:
p2 = p1.conj()
# find closest zero
if len(z) > 0:
z1_idx = _nearest_real_complex_idx(z, p1, 'any')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
if not np.isreal(z1):
sos[si] = _single_zpksos([z1, z1.conj()], [p1, p2], 1)
else:
if len(z) > 0:
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
sos[si] = _single_zpksos([z1, z2], [p1, p2], 1)
else:
sos[si] = _single_zpksos([z1], [p1, p2], 1)
else:
# no more zeros
sos[si] = _single_zpksos([], [p1, p2], 1)
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# put gain in first sos
sos[0][:3] *= k
return sos
|
43,620 |
def hf_state(n_electrons, m_spin_orbitals):
r"""Generates the occupation-number vector representing the Hartree-Fock (HF)
state of :math:`N` electrons in a basis of :math:`M` spin orbitals.
The many-particle wave function in the HF approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
is represented by the occupation-number vector:
.. math:
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_M \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right..
**Example**
>>> init_state = hf_state(2, 6)
>>> print(init_state)
[1 1 0 0 0 0]
Args:
n_electrons (int): number of active electrons
m_spin_orbitals (int): number of active **spin-orbitals**
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be > 0; got 'n_electrons' = {}"
.format(n_electrons)
)
if n_electrons > m_spin_orbitals:
raise ValueError(
"The number of active orbitals has to be >= the number of active electrons;"
" got 'm_spin_orbitals'={} < 'n_electrons'={}".format(m_spin_orbitals, n_electrons)
)
hf_state_on = [1 if i < n_electrons else 0 for i in range(m_spin_orbitals)]
return np.array(hf_state_on)
|
def hf_state(n_electrons, m_spin_orbitals):
r"""Generates the occupation-number vector representing the Hartree-Fock (HF)
state of :math:`N` electrons in a basis of :math:`M` spin orbitals.
The many-particle wave function in the HF approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
is represented by the occupation-number vector:
.. math:
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_M \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right..
**Example**
>>> init_state = hf_state(2, 6)
>>> print(init_state)
[1 1 0 0 0 0]
Args:
n_electrons (int): number of active electrons
m_spin_orbitals (int): number of active **spin-orbitals**
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be > 0; got 'n_electrons' = {}"
.format(n_electrons)
)
if n_electrons > m_spin_orbitals:
raise ValueError(
"The number of active orbitals has to be >= the number of active electrons;"
" got 'm_spin_orbitals'={} < 'n_electrons'={}".format(m_spin_orbitals, n_electrons)
)
hf_state_on = np.where(np.arange(m_spin_orbitals) < n_electrons, 1, 0)
return np.array(hf_state_on)
|
8,569 |
def do_xmlrpc_rw(cobbler_api: CobblerAPI, port):
"""
This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.
:param cobbler_api: The cobbler_api instance which is used for this method.
:param port: The port where the xmlrpc api should run on.
"""
xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)
server = remote.CobblerXMLRPCServer(('127.0.0.1', port))
server.logRequests = 0 # don't print stuff
logger.debug("XMLRPC running on %s", port)
server.register_instance(xinterface)
start_time = ""
try:
import psutil
p = psutil.Process(os.getpid())
start_time = " in %s seconds" % str(time.time() - p.create_time())
except ModuleNotFoundError:
# This is not critical, but debug only - just install python3-psutil
pass
while True:
try:
logger.info("Cobbler startup completed" + start_time)
server.serve_forever()
except IOError:
# interrupted? try to serve again
time.sleep(0.5)
|
def do_xmlrpc_rw(cobbler_api: CobblerAPI, port):
"""
This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.
:param cobbler_api: The cobbler_api instance which is used for this method.
:param port: The port where the xmlrpc api should run on.
"""
xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)
server = remote.CobblerXMLRPCServer(('127.0.0.1', port))
server.logRequests = 0 # don't print stuff
logger.debug("XMLRPC running on %s", port)
server.register_instance(xinterface)
start_time = ""
try:
import psutil
p = psutil.Process(os.getpid())
start_time = " in %s seconds" % str(time.time() - p.create_time())
except ModuleNotFoundError:
# This is not critical, but debug only - just install python3-psutil
pass
while True:
try:
logger.info("Cobbler startup completed %s", start_time)
server.serve_forever()
except IOError:
# interrupted? try to serve again
time.sleep(0.5)
|
57,107 |
def validate_suggestion_change(obj):
"""Validates Exploration or Question change.
Args:
obj: dict. Data that needs to be validated.
Returns:
dict. Returns suggestion change dict after validation.
"""
# No explicit call to validate_dict is required, because
# ExplorationChange or QuestionSuggestionChange calls
# validate method while initialization.
if obj.get('cmd') is None:
raise base.BaseHandler.InvalidInputException(
'Missing cmd key in change dict')
exp_change_commands = [command['name'] for command in
exp_domain.ExplorationChange.ALLOWED_COMMANDS]
question_change_commands = [command['name'] for command in
question_domain.QuestionChange.ALLOWED_COMMANDS]
if obj['cmd'] in exp_change_commands:
exp_domain.ExplorationChange(obj)
elif obj['cmd'] in question_change_commands:
question_domain.QuestionSuggestionChange(obj)
else:
raise base.BaseHandler.InvalidInputException(
'%s cmd is not allowed.' % obj['cmd'])
return obj
|
def validate_suggestion_change(obj):
"""Validates Exploration or Question change.
Args:
obj: dict. Data that needs to be validated.
Returns:
dict. Returns suggestion change dict after validation.
"""
# No explicit call to validate_dict is required, because
# ExplorationChange or QuestionSuggestionChange calls
# validate method while initialization.
if obj.get('cmd') is None:
raise base.BaseHandler.InvalidInputException(
'Missing cmd key in change dict')
exp_change_commands = [
command['name'] for command in
exp_domain.ExplorationChange.ALLOWED_COMMANDS
]
question_change_commands = [
command['name'] for command in
question_domain.QuestionChange.ALLOWED_COMMANDS
]
if obj['cmd'] in exp_change_commands:
exp_domain.ExplorationChange(obj)
elif obj['cmd'] in question_change_commands:
question_domain.QuestionSuggestionChange(obj)
else:
raise base.BaseHandler.InvalidInputException(
'%s cmd is not allowed.' % obj['cmd'])
return obj
|
30,649 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials', {}).get('identifier', '')
password = params.get('credentials', {}).get('password', '')
base_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
ok_codes=(200, 201, 204),
headers={'accept': "application/json"}
)
if demisto.command() == 'test-module':
result = test_module(client)
return_outputs(result)
elif demisto.command() == 'guardian-search':
result = search_by_query(client, demisto.args())
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-list-all-assets':
result = list_all_assets(client)
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-find-ip-by-mac':
result = find_ip_by_mac(client, demisto.args())
return_outputs(result[0], result[1], result[2])
except Exception as e:
return_error(str(f'Failed to execute {demisto.command()} command. Error: {str(e)}'))
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials', {}).get('identifier', '')
password = params.get('credentials', {}).get('password', '')
base_url = params['url'].strip('/')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
ok_codes=(200, 201, 204),
headers={'accept': "application/json"}
)
if demisto.command() == 'test-module':
result = test_module(client)
return_outputs(result)
elif demisto.command() == 'guardian-search':
result = search_by_query(client, demisto.args())
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-list-all-assets':
result = list_all_assets(client)
return_outputs(result[0], result[1], result[2])
elif demisto.command() == 'guardian-find-ip-by-mac':
result = find_ip_by_mac(client, demisto.args())
return_outputs(result[0], result[1], result[2])
except Exception as e:
return_error(str(f'Failed to execute {demisto.command()} command. Error: {str(e)}'))
|
27,792 |
def import_path(
p: Union[str, "os.PathLike[str]"],
*,
mode: Union[str, ImportMode] = ImportMode.prepend,
root: Path,
) -> ModuleType:
"""Import and return a module from the given path, which can be a file (a module) or
a directory (a package).
The import mechanism used is controlled by the `mode` parameter:
* `mode == ImportMode.prepend`: the directory containing the module (or package, taking
`__init__.py` files into account) will be put at the *start* of `sys.path` before
being imported with `__import__.
* `mode == ImportMode.append`: same as `prepend`, but the directory will be appended
to the end of `sys.path`, if not already in `sys.path`.
* `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib`
to import the module, which avoids having to use `__import__` and muck with `sys.path`
at all. It effectively allows having same-named test modules in different places.
:param root:
Used as an anchor when mode == ImportMode.importlib to obtain
a unique name for the module being imported so it can safely be stored
into ``sys.modules``.
:raises ImportPathMismatchError:
If after importing the given `path` and the module `__file__`
are different. Only raised in `prepend` and `append` modes.
"""
mode = ImportMode(mode)
path = Path(p)
if not path.exists():
raise ImportError(path)
if mode is ImportMode.importlib:
module_name = module_name_from_path(path, root)
for meta_importer in sys.meta_path:
spec = meta_importer.find_spec(module_name, [str(path.parent)])
if spec is not None:
break
else:
spec = importlib.util.spec_from_file_location(module_name, str(path))
if spec is None:
raise ImportError(
f"Can't find module {module_name} at location {str(path)}"
)
mod = importlib.util.module_from_spec(spec)
sys.modules[module_name] = mod
spec.loader.exec_module(mod) # type: ignore[union-attr]
insert_missing_modules(sys.modules, module_name)
return mod
pkg_path = resolve_package_path(path)
if pkg_path is not None:
pkg_root = pkg_path.parent
names = list(path.with_suffix("").relative_to(pkg_root).parts)
if names[-1] == "__init__":
names.pop()
module_name = ".".join(names)
else:
pkg_root = path.parent
module_name = path.stem
# Change sys.path permanently: restoring it at the end of this function would cause surprising
# problems because of delayed imports: for example, a conftest.py file imported by this function
# might have local imports, which would fail at runtime if we restored sys.path.
if mode is ImportMode.append:
if str(pkg_root) not in sys.path:
sys.path.append(str(pkg_root))
elif mode is ImportMode.prepend:
if str(pkg_root) != sys.path[0]:
sys.path.insert(0, str(pkg_root))
else:
assert_never(mode)
importlib.import_module(module_name)
mod = sys.modules[module_name]
if path.name == "__init__.py":
return mod
ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
if ignore != "1":
module_file = mod.__file__
if module_file.endswith((".pyc", ".pyo")):
module_file = module_file[:-1]
if module_file.endswith(os.path.sep + "__init__.py"):
module_file = module_file[: -(len(os.path.sep + "__init__.py"))]
try:
is_same = _is_same(str(path), module_file)
except FileNotFoundError:
is_same = False
if not is_same:
raise ImportPathMismatchError(module_name, module_file, path)
return mod
|
def import_path(
p: Union[str, "os.PathLike[str]"],
*,
mode: Union[str, ImportMode] = ImportMode.prepend,
root: Path,
) -> ModuleType:
"""Import and return a module from the given path, which can be a file (a module) or
a directory (a package).
The import mechanism used is controlled by the `mode` parameter:
* `mode == ImportMode.prepend`: the directory containing the module (or package, taking
`__init__.py` files into account) will be put at the *start* of `sys.path` before
being imported with `__import__.
* `mode == ImportMode.append`: same as `prepend`, but the directory will be appended
to the end of `sys.path`, if not already in `sys.path`.
* `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib`
to import the module, which avoids having to use `__import__` and muck with `sys.path`
at all. It effectively allows having same-named test modules in different places.
:param root:
Used as an anchor when mode == ImportMode.importlib to obtain
a unique name for the module being imported so it can safely be stored
into ``sys.modules``.
:raises ImportPathMismatchError:
If after importing the given `path` and the module `__file__`
are different. Only raised in `prepend` and `append` modes.
"""
mode = ImportMode(mode)
path = Path(p)
if not path.exists():
raise ImportError(path)
if mode is ImportMode.importlib:
module_name = module_name_from_path(path, root)
for meta_importer in sys.meta_path:
spec = meta_importer.find_spec(module_name, [str(path.parent)])
if spec is not None:
break
else:
spec = importlib.util.spec_from_file_location(module_name, str(path))
if spec is None:
raise ImportError(
f"Can't find module {module_name} at location {path}"
)
mod = importlib.util.module_from_spec(spec)
sys.modules[module_name] = mod
spec.loader.exec_module(mod) # type: ignore[union-attr]
insert_missing_modules(sys.modules, module_name)
return mod
pkg_path = resolve_package_path(path)
if pkg_path is not None:
pkg_root = pkg_path.parent
names = list(path.with_suffix("").relative_to(pkg_root).parts)
if names[-1] == "__init__":
names.pop()
module_name = ".".join(names)
else:
pkg_root = path.parent
module_name = path.stem
# Change sys.path permanently: restoring it at the end of this function would cause surprising
# problems because of delayed imports: for example, a conftest.py file imported by this function
# might have local imports, which would fail at runtime if we restored sys.path.
if mode is ImportMode.append:
if str(pkg_root) not in sys.path:
sys.path.append(str(pkg_root))
elif mode is ImportMode.prepend:
if str(pkg_root) != sys.path[0]:
sys.path.insert(0, str(pkg_root))
else:
assert_never(mode)
importlib.import_module(module_name)
mod = sys.modules[module_name]
if path.name == "__init__.py":
return mod
ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
if ignore != "1":
module_file = mod.__file__
if module_file.endswith((".pyc", ".pyo")):
module_file = module_file[:-1]
if module_file.endswith(os.path.sep + "__init__.py"):
module_file = module_file[: -(len(os.path.sep + "__init__.py"))]
try:
is_same = _is_same(str(path), module_file)
except FileNotFoundError:
is_same = False
if not is_same:
raise ImportPathMismatchError(module_name, module_file, path)
return mod
|
54,315 |
def convertRadisToJSON():
"""Converts the ~/.radis file into json formatted file ~/.radisjson
Example
-------
original ~/.radis file format
[HITRAN-CO2-TEST]
info = HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: 2380-2398 cm-1 (4165-4200 nm)
path = PATH_TO\radis\radis\test\files\hitran_co2_626_bandhead_4165_4200nm.par
format = hitran
parfuncfmt = hapi
levelsfmt = radis
-----------
Converted ~/.radisjson file format
{"HITRAN-CO2-TEST": {"info": "HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: 2380-2398 cm-1 (4165-4200 nm)",
"path": "PATH_TO\\radis\\radis\\test\\files\\hitran_co2_626_bandhead_4165_4200nm.par",
"format": "hitran",
"parfuncfmt": "hapi",
"levelsfmt": "radis"}}
"""
# Loads configuration file ~/.radis
config = getConfig()
# Variable to store data in JSON format
config_json = {}
# Converting configuration into JSON format and storing in config_json variable
for i in config.sections():
temp = {}
for j in config[i]:
temp[j] = config[i][j]
config_json[i] = temp
# Creating json file
config_json_dir = CONFIG_PATH_JSON
with open(config_json_dir, "w") as outfile:
json.dump(config_json, outfile, indent=2)
outfile.close()
return
|
def convertRadisToJSON():
"""Converts the ~/.radis file into json formatted file ~/.radisjson
Example
-------
original ~/.radis file format::
[HITRAN-CO2-TEST]
info = HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: 2380-2398 cm-1 (4165-4200 nm)
path = PATH_TO\radis\radis\test\files\hitran_co2_626_bandhead_4165_4200nm.par
format = hitran
parfuncfmt = hapi
levelsfmt = radis
-----------
Converted ~/.radisjson file format
{"HITRAN-CO2-TEST": {"info": "HITRAN 2016 database, CO2, 1 main isotope (CO2-626), bandhead: 2380-2398 cm-1 (4165-4200 nm)",
"path": "PATH_TO\\radis\\radis\\test\\files\\hitran_co2_626_bandhead_4165_4200nm.par",
"format": "hitran",
"parfuncfmt": "hapi",
"levelsfmt": "radis"}}
"""
# Loads configuration file ~/.radis
config = getConfig()
# Variable to store data in JSON format
config_json = {}
# Converting configuration into JSON format and storing in config_json variable
for i in config.sections():
temp = {}
for j in config[i]:
temp[j] = config[i][j]
config_json[i] = temp
# Creating json file
config_json_dir = CONFIG_PATH_JSON
with open(config_json_dir, "w") as outfile:
json.dump(config_json, outfile, indent=2)
outfile.close()
return
|
1,794 |
def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
np.random.seed(1)
n_samples = 1000
n_features = 2
n_half_samples = int(n_samples / 2)
x = np.random.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w_none, 2) == 1.00
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == x1_x2_imp_ratio_w_none
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none, 2) == 2.00
|
def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
rng = np.random.RandomState(1)
n_samples = 1000
n_features = 2
n_half_samples = int(n_samples / 2)
x = np.random.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w_none, 2) == 1.00
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == x1_x2_imp_ratio_w_none
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none, 2) == 2.00
|
4,877 |
def test_normalize_kwarg_pie():
fig,ax = plt.subplots()
x=[0.3,0.3,0.1]
t1 = ax.pie(x=x, normalize=True)
assert abs(t1[0][-1].theta2 - 360.) < 1e-3
t2 = ax.pie(x=x, normalize=False)
assert abs(t2[0][-1].theta2 - 360.) > 1e-3
|
def test_normalize_kwarg_pie():
fig, ax = plt.subplots()
x=[0.3,0.3,0.1]
t1 = ax.pie(x=x, normalize=True)
assert abs(t1[0][-1].theta2 - 360.) < 1e-3
t2 = ax.pie(x=x, normalize=False)
assert abs(t2[0][-1].theta2 - 360.) > 1e-3
|
41,842 |
def dump_best_config(input_config_file: str, output_config_file: str, study: optuna.Study) -> None:
"""Save jsonnet after updating with parameters from the best trial in the study.
Args:
input_config_file:
Input configuration file (Jsonnet) specified in
:class:`~optuna.integration.AllenNLPExecutor`.
output_config_file:
Output configuration (Json) file.
study:
Instance of :class:`~optuna.study.Study`.
Note that :func:`~optuna.study.Study.optimize` mast have already called.
"""
best_params = study.best_params
for key, value in best_params.items():
best_params[key] = str(value)
best_config = json.loads(_jsonnet.evaluate_file(input_config_file, ext_vars=best_params))
best_config = allennlp.common.params.infer_and_cast(best_config)
with open(output_config_file, "w") as f:
json.dump(best_config, f, indent=4)
|
def dump_best_config(input_config_file: str, output_config_file: str, study: optuna.Study) -> None:
"""Save jsonnet after updating with parameters from the best trial in the study.
Args:
input_config_file:
Input configuration file (Jsonnet) specified in
:class:`~optuna.integration.AllenNLPExecutor`.
output_config_file:
Output configuration (Json) file.
study:
Instance of :class:`~optuna.study.Study`.
Note that :func:`~optuna.study.Study.optimize` must have been called.
"""
best_params = study.best_params
for key, value in best_params.items():
best_params[key] = str(value)
best_config = json.loads(_jsonnet.evaluate_file(input_config_file, ext_vars=best_params))
best_config = allennlp.common.params.infer_and_cast(best_config)
with open(output_config_file, "w") as f:
json.dump(best_config, f, indent=4)
|
9,358 |
def test_wrap_var_dict():
assert not isinstance(wrap_var(dict(foo='bar')), AnsibleUnsafe)
assert isinstance(wrap_var(dict(foo='bar'))['foo'], AnsibleUnsafe)
|
def test_wrap_var_dict():
assert isinstance(wrap_var(dict(foo='bar')), dict)
assert isinstance(wrap_var(dict(foo='bar'))['foo'], AnsibleUnsafe)
|
27,956 |
def perform_analysis(args, skip_handler, context, actions, metadata):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
actions = prepare_actions(actions, analyzers)
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
available_checkers = set()
# Add profile names to the checkers list so we will not warn
# if a profile is enabled but there is no checker with that name.
available_checkers.update(context.available_profiles.keys())
# Collect all the available checkers from the enabled analyzers.
for analyzer in config_map.items():
_, analyzer_cfg = analyzer
for analyzer_checker in analyzer_cfg.checks().items():
checker_name, _ = analyzer_checker
available_checkers.add(checker_name)
if 'ordered_checkers' in args:
missing_checkers = checkers.available(args.ordered_checkers,
available_checkers)
if missing_checkers:
LOG.warning("No checker(s) with these names was found:\n%s",
'\n'.join(missing_checkers))
LOG.warning("Please review the checker names.\n"
"In the next release the analysis will not start"
" with invalid checker names.")
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Statistics collector checkers must be explicitly disabled
# as they trash the output.
if "clangsa" in analyzers:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_collect, False)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)
# Save some metadata information.
versions = __get_analyzer_version(context, config_map)
metadata['versions'].update(versions)
metadata['checkers'] = {}
for analyzer in analyzers:
metadata['checkers'][analyzer] = {}
for check, data in config_map[analyzer].checks().items():
enabled, _ = data
metadata['checkers'][analyzer].update({check: enabled})
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
config_map = manager.dict(config_map)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args, manager)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_capability = config_map[ClangSA.ANALYZER_NAME].ctu_capability
ctu_data = manager.dict({'ctu_dir': ctu_dir,
'ctu_func_map_cmd':
ctu_capability.mapping_tool_path,
'ctu_func_map_file':
ctu_capability.mapping_file_name,
'ctu_temp_fnmap_folder':
'tmpExternalFnMaps'})
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
config_map,
args.jobs,
skip_handler,
ctu_data,
statistics_data,
manager)
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
|
def perform_analysis(args, skip_handler, context, actions, metadata):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
actions = prepare_actions(actions, analyzers)
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
available_checkers = set()
# Add profile names to the checkers list so we will not warn
# if a profile is enabled but there is no checker with that name.
available_checkers.update(context.available_profiles.keys())
# Collect all the available checkers from the enabled analyzers.
for analyzer in config_map.items():
_, analyzer_cfg = analyzer
for analyzer_checker in analyzer_cfg.checks().items():
checker_name, _ = analyzer_checker
available_checkers.add(checker_name)
if 'ordered_checkers' in args:
missing_checkers = checkers.available(args.ordered_checkers,
available_checkers)
if missing_checkers:
LOG.warning("No checker(s) with these names was found:\n%s",
'\n'.join(missing_checkers))
LOG.warning("Please review the checker names.\n"
"In the next release the analysis will not start"
"with invalid checker names.")
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Statistics collector checkers must be explicitly disabled
# as they trash the output.
if "clangsa" in analyzers:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_collect, False)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)
# Save some metadata information.
versions = __get_analyzer_version(context, config_map)
metadata['versions'].update(versions)
metadata['checkers'] = {}
for analyzer in analyzers:
metadata['checkers'][analyzer] = {}
for check, data in config_map[analyzer].checks().items():
enabled, _ = data
metadata['checkers'][analyzer].update({check: enabled})
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
config_map = manager.dict(config_map)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args, manager)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_capability = config_map[ClangSA.ANALYZER_NAME].ctu_capability
ctu_data = manager.dict({'ctu_dir': ctu_dir,
'ctu_func_map_cmd':
ctu_capability.mapping_tool_path,
'ctu_func_map_file':
ctu_capability.mapping_file_name,
'ctu_temp_fnmap_folder':
'tmpExternalFnMaps'})
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
config_map,
args.jobs,
skip_handler,
ctu_data,
statistics_data,
manager)
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
|
34,289 |
def test_train_status():
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
ctx = multiprocessing.get_context("spawn")
# run a rasa server in one process
p0 = ctx.Process(target=subprocess.run, args=(["rasa", "run", "--enable-api"],))
p0.start()
server_ready = False
# wait until server is up before sending train request and status test loop
while not server_ready:
try:
server_ready = (
requests.get("http://localhost:5005/status").status_code == 200
)
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
# use another process to hit the first server with a training request
results = Manager().dict()
p1 = ctx.Process(target=_send_train_request, args=(results, payload))
p1.start()
training_started = False
training_finished = False
# use our current process to query the status endpoint while the training is running
while not training_finished:
time.sleep(0.5)
# hit status endpoint with short timeout to ensure training doesn't block
status_resp = requests.get("http://localhost:5005/status", timeout=1)
assert status_resp.status_code == 200
if not training_started:
# make sure that we don't fail because we got status before training updated number of jobs
training_started = status_resp.json()["num_active_training_jobs"] == 1
else:
if results.get("train_response_code") is None:
assert status_resp.json()["num_active_training_jobs"] == 1
else:
# once the response code is in, training is done, status should return 0 again
assert results.get("train_response_code") == 200
training_finished = True
status_resp = requests.get("http://localhost:5005/status")
assert status_resp.json()["num_active_training_jobs"] == 0
p0.kill()
p1.join()
|
def test_train_status():
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
ctx = multiprocessing.get_context("spawn")
# run a rasa server in one process
p0 = ctx.Process(target=subprocess.run, args=(["rasa", "run", "--enable-api"],))
p0.start()
server_ready = False
# wait until server is up before sending train request and status test loop
while not server_ready:
try:
server_ready = (
requests.get("http://localhost:5005/status").status_code == 200
)
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
# use another process to hit the first server with a training request
results = Manager().dict()
p1 = ctx.Process(target=_send_train_request, args=(results, payload))
p1.start()
training_started = False
training_finished = False
# use our current process to query the status endpoint while the training is running
while not training_finished:
time.sleep(0.5)
# hit status endpoint with short timeout to ensure training doesn't block
status_resp = requests.get("http://localhost:5005/status", timeout=1)
assert status_resp.status_code == 200
if not training_started:
# make sure that we don't fail because we got status before training updated number of jobs
training_started = status_resp.json()["num_active_training_jobs"] == 1
else:
if results.get("train_response_code") is None:
assert status_resp.json()["num_active_training_jobs"] == 1
else:
# once the response code is in, training is done, `num_active_training_jobs ` should be 0 again
assert results.get("train_response_code") == 200
training_finished = True
status_resp = requests.get("http://localhost:5005/status")
assert status_resp.json()["num_active_training_jobs"] == 0
p0.kill()
p1.join()
|
2,471 |
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for t in np.sort(np.unique(y)):
plt.scatter(
*X_red[y == t].T,
marker=f"${t}$",
s=50,
c=plt.cm.nipy_spectral(labels[y == t] / 10),
alpha=0.5,
)
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis("off")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
|
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for digit in digits.target_names:
plt.scatter(
*X_red[y == t].T,
marker=f"${t}$",
s=50,
c=plt.cm.nipy_spectral(labels[y == t] / 10),
alpha=0.5,
)
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis("off")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
|
22,806 |
def relevant_values(all_values: Mapping[str, Any]) -> Dict[str, Any]:
"""Return a new dict containing only items relevant for renewal.
:param dict all_values: The original values.
:returns: A new dictionary containing items that can be used in renewal.
:rtype dict:
"""
plugins = plugins_disco.PluginsRegistry.find_all()
namespaces = [plugins_common.dest_namespace(plugin) for plugin in plugins]
rv = {
option: value
for option, value in all_values.items()
if _relevant(namespaces, option) and cli.option_was_set(option, value)
}
# We always save the server value to help with forward compatibility
# and behavioral consistency when versions of Certbot with different
# server defaults are used.
rv["server"] = all_values["server"]
# Save key type to help with forward compatibility on Certbot's transition
# to from RSA to ECDSA certificates by default.
rv["key_type"] = all_values["key_type"]
return rv
|
def relevant_values(all_values: Mapping[str, Any]) -> Dict[str, Any]:
"""Return a new dict containing only items relevant for renewal.
:param dict all_values: The original values.
:returns: A new dictionary containing items that can be used in renewal.
:rtype dict:
"""
plugins = plugins_disco.PluginsRegistry.find_all()
namespaces = [plugins_common.dest_namespace(plugin) for plugin in plugins]
rv = {
option: value
for option, value in all_values.items()
if _relevant(namespaces, option) and cli.option_was_set(option, value)
}
# We always save the server value to help with forward compatibility
# and behavioral consistency when versions of Certbot with different
# server defaults are used.
rv["server"] = all_values["server"]
# Save key type to help with forward compatibility on Certbot's transition
# from RSA to ECDSA certificates by default.
rv["key_type"] = all_values["key_type"]
return rv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.