id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
56,414 |
def get_include_skip_queries(
skip: List[str],
include: List[str]
):
""" Get queries for skip and include values of a component.
To get the skip and include lists use the 'get_component_values' function.
"""
include_q = select([File.id]) \
.where(or_(*[
File.filepath.like(conv(fp)) for fp in include])) \
.distinct()
skip_q = select([File.id]) \
.where(or_(*[
File.filepath.like(conv(fp)) for fp in skip])) \
.distinct()
return include_q, skip_q
|
def get_include_skip_queries(
include: List[str],
skip: List[str]
):
""" Get queries for skip and include values of a component.
To get the skip and include lists use the 'get_component_values' function.
"""
include_q = select([File.id]) \
.where(or_(*[
File.filepath.like(conv(fp)) for fp in include])) \
.distinct()
skip_q = select([File.id]) \
.where(or_(*[
File.filepath.like(conv(fp)) for fp in skip])) \
.distinct()
return include_q, skip_q
|
30,941 |
def check_in_domain(domain_name: str, domain_to_check: list) -> CommandResults:
"""
Args:
domain_name: main domain
domain_to_check: list of domains or sub domains that should be checked
Returns:
for each domain for the list an entry with True / False if it is in the domain or not
"""
context_entry = []
for element in domain_to_check:
is_in_domain = False
# split by domain name
domain_to_check_prefix = element.split(domain_name)[0]
if domain_to_check_prefix + domain_name == element:
is_in_domain = True
context_entry.append({
'Domain.Name': element,
'Domain.IsInternal': True if is_in_domain else False
})
return CommandResults(outputs=context_entry)
|
def check_in_domain(domain_name: str, domain_to_check: list) -> CommandResults:
"""
Args:
domain_name: main domain
domain_to_check: list of domains or sub domains that should be checked
Returns:
for each domain for the list an entry with True / False if it is in the domain or not
"""
context_entry = []
for element in domain_to_check:
is_in_domain = False
# split by domain name
domain_to_check_prefix = element.split(domain_name)[0]
if domain_to_check_prefix + domain_name == element:
is_in_domain = True
context_entry.append({
'Domain.Name': element,
'Domain.IsInternal': is_in_domain
})
return CommandResults(outputs=context_entry)
|
32,299 |
def run_polling_command(client: MsClient, args: dict, cmd: str, action_func: Callable,
results_function: Callable, post_polling_process: Callable):
"""
This function is generically handling the polling flow. In the polling flow, there is always an initial call that
starts the uploading to the API (referred here as the 'upload' function) and another call that retrieves the status
of that upload (referred here as the 'results' function).
The run_polling_command function runs the 'upload' function and returns a ScheduledCommand object that schedules
the next 'results' function, until the polling is complete.
Args:
args: the arguments required to the command being called, under cmd
cmd: the command to schedule by after the current command
upload_function: the function that initiates the uploading to the API
results_function: the function that retrieves the status of the previously initiated upload process
uploaded_item: the type of item being uploaded
Returns:
"""
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', 10))
timeout_in_seconds = int(args.get('timeout_in_seconds', 600))
# distinguish between the initial run, which is the upload run, and the results run
is_first_run = 'machine_action_id' not in args
if is_first_run:
command_results = action_func(client, args)
outputs = command_results.outputs
# schedule next poll
polling_args = {
'machine_action_id': outputs.get('action_id') or command_results.raw_response.get("id"),
'interval_in_seconds': interval_in_secs,
'polling': True,
**args,
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=timeout_in_seconds)
command_results.scheduled_command = scheduled_command
return command_results
# not a first run
command_result = results_function(client, args)
action_status = command_result.outputs.get("status")
demisto.debug(f"action status is: {action_status}")
if len(command_result.outputs.get("commands", [])) > 0:
command_status = command_result.outputs.get("commands", [{}])[0].get("commandStatus")
else:
command_status = 'Completed' if action_status == "Succeeded" else None
if action_status in ['Failed', 'Cancelled'] or command_status == 'Failed':
error_msg = f"Command {action_status}."
if len(command_result.outputs.get("commands", [])) > 0:
error_msg += f'{command_result.outputs.get("commands", [{}])[0].get("errors")}'
raise Exception(error_msg)
elif command_status != 'Completed' or action_status == 'InProgress':
demisto.debug("action status is not completed")
# schedule next poll
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=timeout_in_seconds
)
command_result = CommandResults(scheduled_command=scheduled_command)
return command_result
# action was completed
else:
return post_polling_process(client, command_result.outputs)
|
def run_polling_command(client: MsClient, args: dict, cmd: str, action_func: Callable,
results_function: Callable, post_polling_process: Callable):
"""
This function is generically handling the polling flow. In the polling flow, there is always an initial call that
starts the uploading to the API (referred here as the 'upload' function) and another call that retrieves the status
of that upload (referred here as the 'results' function).
The run_polling_command function runs the 'upload' function and returns a ScheduledCommand object that schedules
the next 'results' function, until the polling is complete.
Args:
args: the arguments required to the command being called, under cmd
cmd: the command to schedule by after the current command
upload_function: the function that initiates the uploading to the API
results_function: the function that retrieves the status of the previously initiated upload process
uploaded_item: the type of item being uploaded
Returns:
"""
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', 10))
timeout_in_seconds = int(args.get('timeout_in_seconds', 600))
# distinguish between the initial run, which is the upload run, and the results run
is_first_run = 'machine_action_id' not in args
if is_first_run:
command_results = action_func(client, args)
outputs = command_results.outputs
# schedule next poll
polling_args = {
'machine_action_id': outputs.get('action_id') or command_results.raw_response.get("id"),
'interval_in_seconds': interval_in_secs,
'polling': True,
**args,
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=timeout_in_seconds)
command_results.scheduled_command = scheduled_command
return command_results
# not a first run
command_result = results_function(client, args)
action_status = command_result.outputs.get("status")
demisto.debug(f"action status is: {action_status}")
if command_result.outputs.get("commands", []):
command_status = command_result.outputs.get("commands", [{}])[0].get("commandStatus")
else:
command_status = 'Completed' if action_status == "Succeeded" else None
if action_status in ['Failed', 'Cancelled'] or command_status == 'Failed':
error_msg = f"Command {action_status}."
if len(command_result.outputs.get("commands", [])) > 0:
error_msg += f'{command_result.outputs.get("commands", [{}])[0].get("errors")}'
raise Exception(error_msg)
elif command_status != 'Completed' or action_status == 'InProgress':
demisto.debug("action status is not completed")
# schedule next poll
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=timeout_in_seconds
)
command_result = CommandResults(scheduled_command=scheduled_command)
return command_result
# action was completed
else:
return post_polling_process(client, command_result.outputs)
|
23,289 |
def copy_and_keep_build(site):
"""
Copies each site into the target location and keep last "n" builts as backups
"""
global error_count
for wiki in ALL_WIKIS:
if site=='common':
continue
if not site==None and not site==wiki:
continue
debug('copy: %s' % wiki)
targetdir = os.path.join(args.destdir, wiki)
debug("Creating temporary folders")
previousdir = os.path.join(args.backupdestdir)
debug('Recreating %s' % previousdir )
if not os.path.exists(previousdir):
os.mkdir(previousdir)
olddir = os.path.join(previousdir, str(building_time) )
debug('Recreating %s' % olddir )
if not os.path.exists(olddir):
os.mkdir(olddir)
if os.path.exists(targetdir):
debug('Moving %s into %s' % (targetdir,olddir) )
shutil.move(targetdir, olddir)
sourcedir='./%s/build/html/' % wiki
html_moved_dir = os.path.join(args.destdir, 'html')
try:
subprocess.check_call(['mv', sourcedir, html_moved_dir])
# Rename move! (single move to html/* failed)
subprocess.check_call(['mv', html_moved_dir ,targetdir])
debug("Moved to %s" % targetdir)
except:
error("FAIL moving output to %s" % targetdir)
finally:
debug("Creating a backup in %s" % olddir)
subprocess.check_call(['cp', '-r', targetdir ,olddir])
delete_old_wiki_backups(previousdir, N_BACKUPS_RETAIM)
|
def copy_and_keep_build(site):
"""
Copies each site into the target location and keep last "n" builts as backups
"""
global error_count
for wiki in ALL_WIKIS:
if site=='common':
continue
if not site==None and not site==wiki:
continue
debug('copy: %s' % wiki)
targetdir = os.path.join(args.destdir, wiki)
debug("Creating temporary folders")
previousdir = os.path.join(args.backupdestdir)
debug('Recreating %s' % previousdir )
if not os.path.exists(previousdir):
os.mkdir(previousdir)
olddir = os.path.join(previousdir, str(building_time) )
debug('Recreating %s' % olddir )
if not os.path.exists(olddir):
os.mkdir(olddir)
if os.path.exists(targetdir):
debug('Moving %s into %s' % (targetdir,olddir) )
shutil.move(targetdir, olddir)
sourcedir='./%s/build/html/' % wiki
html_moved_dir = os.path.join(args.destdir, 'html')
try:
subprocess.check_call(['mv', sourcedir, html_moved_dir])
# Rename move! (single move to html/* failed)
subprocess.check_call(['mv', html_moved_dir ,targetdir])
debug("Moved to %s" % targetdir)
except:
error("FAIL moving output to %s" % targetdir)
finally:
debug("Creating a backup in %s" % olddir)
subprocess.check_call(['cp', '-r', targetdir ,olddir])
delete_old_wiki_backups(previousdir, N_BACKUPS_RETAIM)
|
23,282 |
def not_(validator):
"""
A validator that wraps and logically 'inverts' the validator passed to it,
raising a `ValueError` if the provided validator doesn't, and suppresses
the `ValueError` if the provided validator *does*.
Intended to be used with existing validators to compose logic without
needing to create inverted variants, for example, ``not_(in_(...))``.
:param validator: A validator to be logically inverted.
:raises ValueError: If the wrapped validator does not raise a
`ValueError`, this validator will.
.. versionadded:: just a PR for now.
"""
return _NotValidator(validator)
|
def not_(validator):
"""
A validator that wraps and logically 'inverts' the validator passed to it,
raising a `ValueError` if the provided validator doesn't, and suppresses
the `ValueError` if the provided validator *does*.
Intended to be used with existing validators to compose logic without
needing to create inverted variants, for example, ``not_(in_(...))``.
:param validator: A validator to be logically inverted.
:raises ValueError: If the wrapped validator does not raise a
`ValueError`, this validator will.
.. versionadded:: 22.2.0
"""
return _NotValidator(validator)
|
11,669 |
def build_multi_output_mock_op(slot_data: SLOT_DATA, graph: Graph, n_lanes: int = None):
"""Returns an operator that has outputs as specifier in slot_data
This is especially useful when testing toplevelOperators, as these usually
need a lot of diverse inputs, which can be tedious using e.g.
`OpArrayPiper`. This function can be used to generate an operator that mocks
all needed output slots, an operator may take as inputs.
Note: no consistency checking is done with the data provided from SlotDescription
Currently, data access is not yet supported.
Args:
slot_data: Slot metadata will be the same for level 1 slots. If
slot_data.data is given, it has to be the a sequence of same the same
length as n_lanes.
n_lanes: number of lanes - level 1 slots are resized to that number.
"""
class _OP(Operator):
def __init__(self, slot_data, *args, **kwargs):
self._data = slot_data
self._n_lanes = n_lanes
super().__init__(*args, **kwargs)
for name, val in self._data.items():
meta_dict = MetaDict(dtype=val.dtype, shape=val.shape, axistags=val.axistags)
if self.outputs[name].level == 0:
self.outputs[name].meta.assignFrom(meta_dict)
elif self.outputs[name].level == 1 and self._n_lanes:
if self._data[name].data is not None and not (len(self._data[name].data) == self._n_lanes):
raise ValueError(f"Data for slot {name} did not match number of lanes {self._n_lanes}")
self.outputs[name].resize(self._n_lanes)
for ss in self.outputs[name]:
ss.meta.assignFrom(meta_dict)
def setupOutputs(self):
pass
def execute(self, slot, subindex, roi, result):
if self._data[slot.name].data is None:
raise RuntimeError(f"Slot {slot.name} should not be accessed")
key = roi.toSlice()
if slot.level == 0:
result[...] = self._data[slot.name].data[key]
elif slot.level == 1:
assert len(subindex) == 1
result[...] = self._data[slot.name].data[subindex[0]][key]
assert all(slot_descr.level in [0, 1] for slot_descr in slot_data.values())
MultiOutputMockOp = type(
"MultiOutputMockOp",
(_OP,),
{slot_name: OutputSlot(level=slot_descr.level) for slot_name, slot_descr in slot_data.items()},
)
op = MultiOutputMockOp(slot_data, graph=graph)
return op
|
def build_multi_output_mock_op(slot_data: SLOT_DATA, graph: Graph, n_lanes: int = 0) -> Type[MultiOutputMockOpBase]:
"""Returns an operator that has outputs as specifier in slot_data
This is especially useful when testing toplevelOperators, as these usually
need a lot of diverse inputs, which can be tedious using e.g.
`OpArrayPiper`. This function can be used to generate an operator that mocks
all needed output slots, an operator may take as inputs.
Note: no consistency checking is done with the data provided from SlotDescription
Currently, data access is not yet supported.
Args:
slot_data: Slot metadata will be the same for level 1 slots. If
slot_data.data is given, it has to be the a sequence of same the same
length as n_lanes.
n_lanes: number of lanes - level 1 slots are resized to that number.
"""
class _OP(Operator):
def __init__(self, slot_data, *args, **kwargs):
self._data = slot_data
self._n_lanes = n_lanes
super().__init__(*args, **kwargs)
for name, val in self._data.items():
meta_dict = MetaDict(dtype=val.dtype, shape=val.shape, axistags=val.axistags)
if self.outputs[name].level == 0:
self.outputs[name].meta.assignFrom(meta_dict)
elif self.outputs[name].level == 1 and self._n_lanes:
if self._data[name].data is not None and not (len(self._data[name].data) == self._n_lanes):
raise ValueError(f"Data for slot {name} did not match number of lanes {self._n_lanes}")
self.outputs[name].resize(self._n_lanes)
for ss in self.outputs[name]:
ss.meta.assignFrom(meta_dict)
def setupOutputs(self):
pass
def execute(self, slot, subindex, roi, result):
if self._data[slot.name].data is None:
raise RuntimeError(f"Slot {slot.name} should not be accessed")
key = roi.toSlice()
if slot.level == 0:
result[...] = self._data[slot.name].data[key]
elif slot.level == 1:
assert len(subindex) == 1
result[...] = self._data[slot.name].data[subindex[0]][key]
assert all(slot_descr.level in [0, 1] for slot_descr in slot_data.values())
MultiOutputMockOp = type(
"MultiOutputMockOp",
(_OP,),
{slot_name: OutputSlot(level=slot_descr.level) for slot_name, slot_descr in slot_data.items()},
)
op = MultiOutputMockOp(slot_data, graph=graph)
return op
|
13,991 |
def get_all_spiketrains(container):
"""
Get all `neo.Spiketrain` objects from a container.
The objects can be any list, dict, or other iterable or mapping containing
spiketrains, as well as any Neo object that can hold spiketrains:
`neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
Containers are searched recursively, so the objects can be nested
(such as a list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo.Block, neo.Segment, neo.Unit,
neo.ChannelIndex
The container for the spiketrains.
Returns
-------
list
A list of the unique `neo.SpikeTrain` objects in `container`.
"""
return SpikeTrainList(_get_all_objs(container, 'SpikeTrain'))
|
def get_all_spiketrains(container):
"""
Get all `neo.Spiketrain` objects from a container.
The objects can be any list, dict, or other iterable or mapping containing
spiketrains, as well as any Neo object that can hold spiketrains:
`neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
Containers are searched recursively, so the objects can be nested
(such as a list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo.Block, neo.Segment, neo.Unit,
neo.ChannelIndex
The container for the spiketrains.
Returns
-------
list
A `neo.SpikeTrainList` object of the unique `neo.SpikeTrain` objects in `container`.
"""
return SpikeTrainList(_get_all_objs(container, 'SpikeTrain'))
|
53,215 |
def get_queue_manager_connection(config, logger):
# type: (IBMMQConfig, CheckLoggingAdapter) -> pymqi.QueueManager
"""
Get the queue manager connection
"""
if config.ssl:
# There is a memory leak when SSL connections fail.
# By testing with a normal connection first, we avoid making unnecessary SSL connections.
# This does not fix the memory leak but mitigate its likelihood.
# Details: https://github.com/dsuch/pymqi/issues/208
if config.try_normal_ssl:
try:
get_normal_connection(config, logger)
except pymqi.MQMIError as e:
logger.debug("Tried normal connection before SSL connection. It failed with: %s", e)
if e.reason == pymqi.CMQC.MQRC_UNKNOWN_CHANNEL_NAME:
raise
return get_ssl_connection(config, logger)
else:
return get_ssl_connection(config, logger)
else:
return get_normal_connection(config, logger)
|
def get_queue_manager_connection(config, logger):
# type: (IBMMQConfig, CheckLoggingAdapter) -> pymqi.QueueManager
"""
Get the queue manager connection
"""
if config.ssl:
# There is a memory leak when SSL connections fail.
# By testing with a normal connection first, we avoid making unnecessary SSL connections.
# This does not fix the memory leak but mitigate its likelihood.
# Details: https://github.com/dsuch/pymqi/issues/208
if config.try_normal_ssl:
try:
get_normal_connection(config, logger)
except pymqi.MQMIError as e:
logger.debug("Tried normal connection before SSL connection. It failed with: %s", e)
if e.reason == pymqi.CMQC.MQRC_UNKNOWN_CHANNEL_NAME:
raise
return get_ssl_connection(config, logger)
else:
return get_normal_connection(config, logger)
|
25,759 |
def network_lopf(n, snapshots=None, solver_name="cbc",
solver_logfile=None, extra_functionality=None, skip_objective=False,
extra_postprocessing=None, formulation="kirchhoff",
keep_references=False, keep_files=False,
keep_shadowprices=['Bus', 'Line', 'Transformer', 'Link', 'GlobalConstraint'],
solver_options=None, warmstart=False, store_basis=False,
solver_dir=None):
"""
Linear optimal power flow for a group of snapshots.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
solver_name : string
Must be a solver name that pyomo recognises and that is
installed, e.g. "glpk", "gurobi"
pyomo : bool, default True
Whether to use pyomo for building and solving the model, setting
this to False saves a lot of memory and time.
solver_logfile : None|string
If not None, sets the logfile option of the solver.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
solver_dir : str, default None
Path to directory where necessary files are written, default None leads
to the default temporary directory used by tempfile.mkstemp().
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
formulation : string
Formulation of the linear power flow equations to use; must be
one of ["angles","cycles","kirchhoff","ptdf"]
extra_functionality : callable function
This function must take two arguments
`extra_functionality(network,snapshots)` and is called after
the model building is complete, but before it is sent to the
solver. It allows the user to
add/change constraints and add/change the objective function.
skip_objective : bool, default False
Skip writing the default objective function. If False, an custom
objective has to be defined via extra_functionality.
extra_postprocessing : callable function
This function must take three arguments
`extra_postprocessing(network,snapshots,duals)` and is called after
the model has solved and the results are extracted. It allows the user
to extract further information about the solution, such as additional
shadow prices.
warmstart : bool or string, default False
Use this to warmstart the optimization. Pass a string which gives
the path to the basis file. If set to True, a path to
a basis file must be given in network.basis_fn.
store_basis : bool, default False
Whether to store the basis of the optimization results. If True,
the path to the basis file is saved in network.basis_fn. Note that
a basis can only be stored if simplex, dual-simplex, or barrier
*with* crossover is used for solving.
keep_references : bool, default False
Keep the references of variable and constraint names withing the
network. These can be looked up in `n.vars` and `n.cons` after solving.
keep_shadowprices : bool or list of component names
Keep shadow prices for all constraints, if set to True. If a list
is passed the shadow prices will only be parsed for those constraint
names. Defaults to ['Bus', 'Line', 'GlobalConstraint'].
After solving, the shadow prices can be retrieved using
:func:`pypsa.linopt.get_dual` with corresponding name
"""
supported_solvers = ["cbc", "gurobi", 'glpk', 'scs']
if solver_name not in supported_solvers:
raise NotImplementedError(f"Solver {solver_name} not in "
f"supported solvers: {supported_solvers}")
if formulation != "kirchhoff":
raise NotImplementedError("Only the kirchhoff formulation is supported")
if n.generators.committable.any():
logger.warn("Unit commitment is not yet completely implemented for "
"optimising without pyomo. Thus minimum up time, minimum down time, "
"start up costs, shut down costs will be ignored.")
#disable logging because multiple slack bus calculations, keep output clean
snapshots = _as_snapshots(n, snapshots)
n.calculate_dependent_values()
n.determine_network_topology()
logger.info("Prepare linear problem")
fdp, problem_fn = prepare_lopf(n, snapshots, keep_files, skip_objective,
extra_functionality, solver_dir)
fds, solution_fn = mkstemp(prefix='pypsa-solve', suffix='.sol', dir=solver_dir)
if warmstart == True:
warmstart = n.basis_fn
logger.info("Solve linear problem using warmstart")
else:
logger.info(f"Solve linear problem using {solver_name.title()} solver")
solve = eval(f'run_and_read_{solver_name}')
res = solve(n, problem_fn, solution_fn, solver_logfile,
solver_options, keep_files, warmstart, store_basis)
status, termination_condition, variables_sol, constraints_dual, obj = res
if not keep_files:
os.close(fdp); os.remove(problem_fn)
os.close(fds); os.remove(solution_fn)
if status == "ok" and termination_condition == "optimal":
logger.info('Optimization successful. Objective value: {:.2e}'.format(obj))
elif status == "warning" and termination_condition == "suboptimal":
logger.warning('Optimization solution is sub-optimal. Objective value: {:.2e}'.format(obj))
else:
logger.warning(f'Optimization failed with status {status} and termination condition {termination_condition}')
return status, termination_condition
n.objective = obj
assign_solution(n, snapshots, variables_sol, constraints_dual,
keep_references=keep_references,
keep_shadowprices=keep_shadowprices)
gc.collect()
return status,termination_condition
|
def network_lopf(n, snapshots=None, solver_name="cbc",
solver_logfile=None, extra_functionality=None, skip_objective=False,
extra_postprocessing=None, formulation="kirchhoff",
keep_references=False, keep_files=False,
keep_shadowprices=['Bus', 'Line', 'Transformer', 'Link', 'GlobalConstraint'],
solver_options=None, warmstart=False, store_basis=False,
solver_dir=None):
"""
Linear optimal power flow for a group of snapshots.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
solver_name : string
Must be a solver name that pyomo recognises and that is
installed, e.g. "glpk", "gurobi"
pyomo : bool, default True
Whether to use pyomo for building and solving the model, setting
this to False saves a lot of memory and time.
solver_logfile : None|string
If not None, sets the logfile option of the solver.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
solver_dir : str, default None
Path to directory where necessary files are written, default None leads
to the default temporary directory used by tempfile.mkstemp().
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
formulation : string
Formulation of the linear power flow equations to use; must be
one of ["angles","cycles","kirchhoff","ptdf"]
extra_functionality : callable function
This function must take two arguments
`extra_functionality(network,snapshots)` and is called after
the model building is complete, but before it is sent to the
solver. It allows the user to
add/change constraints and add/change the objective function.
skip_objective : bool, default False
Skip writing the default objective function. If False, a custom
objective has to be defined via extra_functionality.
extra_postprocessing : callable function
This function must take three arguments
`extra_postprocessing(network,snapshots,duals)` and is called after
the model has solved and the results are extracted. It allows the user
to extract further information about the solution, such as additional
shadow prices.
warmstart : bool or string, default False
Use this to warmstart the optimization. Pass a string which gives
the path to the basis file. If set to True, a path to
a basis file must be given in network.basis_fn.
store_basis : bool, default False
Whether to store the basis of the optimization results. If True,
the path to the basis file is saved in network.basis_fn. Note that
a basis can only be stored if simplex, dual-simplex, or barrier
*with* crossover is used for solving.
keep_references : bool, default False
Keep the references of variable and constraint names withing the
network. These can be looked up in `n.vars` and `n.cons` after solving.
keep_shadowprices : bool or list of component names
Keep shadow prices for all constraints, if set to True. If a list
is passed the shadow prices will only be parsed for those constraint
names. Defaults to ['Bus', 'Line', 'GlobalConstraint'].
After solving, the shadow prices can be retrieved using
:func:`pypsa.linopt.get_dual` with corresponding name
"""
supported_solvers = ["cbc", "gurobi", 'glpk', 'scs']
if solver_name not in supported_solvers:
raise NotImplementedError(f"Solver {solver_name} not in "
f"supported solvers: {supported_solvers}")
if formulation != "kirchhoff":
raise NotImplementedError("Only the kirchhoff formulation is supported")
if n.generators.committable.any():
logger.warn("Unit commitment is not yet completely implemented for "
"optimising without pyomo. Thus minimum up time, minimum down time, "
"start up costs, shut down costs will be ignored.")
#disable logging because multiple slack bus calculations, keep output clean
snapshots = _as_snapshots(n, snapshots)
n.calculate_dependent_values()
n.determine_network_topology()
logger.info("Prepare linear problem")
fdp, problem_fn = prepare_lopf(n, snapshots, keep_files, skip_objective,
extra_functionality, solver_dir)
fds, solution_fn = mkstemp(prefix='pypsa-solve', suffix='.sol', dir=solver_dir)
if warmstart == True:
warmstart = n.basis_fn
logger.info("Solve linear problem using warmstart")
else:
logger.info(f"Solve linear problem using {solver_name.title()} solver")
solve = eval(f'run_and_read_{solver_name}')
res = solve(n, problem_fn, solution_fn, solver_logfile,
solver_options, keep_files, warmstart, store_basis)
status, termination_condition, variables_sol, constraints_dual, obj = res
if not keep_files:
os.close(fdp); os.remove(problem_fn)
os.close(fds); os.remove(solution_fn)
if status == "ok" and termination_condition == "optimal":
logger.info('Optimization successful. Objective value: {:.2e}'.format(obj))
elif status == "warning" and termination_condition == "suboptimal":
logger.warning('Optimization solution is sub-optimal. Objective value: {:.2e}'.format(obj))
else:
logger.warning(f'Optimization failed with status {status} and termination condition {termination_condition}')
return status, termination_condition
n.objective = obj
assign_solution(n, snapshots, variables_sol, constraints_dual,
keep_references=keep_references,
keep_shadowprices=keep_shadowprices)
gc.collect()
return status,termination_condition
|
27,870 |
def mean_absolute_error(x0, x1):
"""Mean absolute error function.
This function computes the mean absolute error between two variables. The
mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
dimensions. This function first calculates the absolute value differences
of corresponding elements in x0 and x1, and then returns the mean of those
differences.
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable:
A variable holding an array representing the mean absolute
error of two inputs.
.. admonition:: Example
1D array examples:
>>> x = np.array([1, 2, 3]).astype(np.float32)
>>> y = np.array([0, 0, 0]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(2.)
>>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
>>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(6.)
2D array example:
- In this example, there are 4 elements, and thus 4 errors
>>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
>>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(5.5)
3D array example:
- In this example, there are 8 elements, and thus 8 errors
>>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
>>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
>>> x = x.astype(np.float32)
>>> y = y.astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(3.5)
"""
return MeanAbsoluteError().apply((x0, x1))[0]
|
def mean_absolute_error(x0, x1):
"""Mean absolute error function.
This function computes the mean absolute error between two variables. The
mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
dimensions. This function first calculates the absolute value differences
between the corresponding elements in x0 and x1, and then returns the mean of those
differences.
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable:
A variable holding an array representing the mean absolute
error of two inputs.
.. admonition:: Example
1D array examples:
>>> x = np.array([1, 2, 3]).astype(np.float32)
>>> y = np.array([0, 0, 0]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(2.)
>>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
>>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(6.)
2D array example:
- In this example, there are 4 elements, and thus 4 errors
>>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
>>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(5.5)
3D array example:
- In this example, there are 8 elements, and thus 8 errors
>>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
>>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
>>> x = x.astype(np.float32)
>>> y = y.astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(3.5)
"""
return MeanAbsoluteError().apply((x0, x1))[0]
|
13,619 |
def gram_schmidt_biorth(V, W, product=None,
reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3,
copy=True):
"""Biorthonormalize a pair of |VectorArrays| using the biorthonormal Gram-Schmidt process.
See Algorithm 1 in :cite:`BKS11`.
Note that this algorithm can be significantly less accurate compared to orthogonalization,
in particular, when `V` and `W` are orthogonal.
Parameters
----------
V, W
The |VectorArrays| which are to be biorthonormalized.
product
The inner product |Operator| w.r.t. which to biorthonormalize.
If `None`, the Euclidean product is used.
reiterate
If `True`, orthonormalize again if the norm of the orthogonalized vector is
much smaller than the norm of the original vector.
reiteration_threshold
If `reiterate` is `True`, re-orthonormalize if the ratio between the norms of
the orthogonalized vector and the original vector is smaller than this value.
check
If `True`, check if the resulting |VectorArray| is really orthonormal.
check_tol
Tolerance for the check.
copy
If `True`, create a copy of `V` and `W` instead of modifying `V` and `W` in-place.
Returns
-------
The biorthonormalized |VectorArrays|.
"""
assert V.space == W.space
assert len(V) == len(W)
logger = getLogger('pymor.algorithms.gram_schmidt.gram_schmidt_biorth')
if copy:
V = V.copy()
W = W.copy()
# main loop
for i in range(len(V)):
# calculate norm of V[i]
initial_norm = V[i].norm(product)[0]
# project V[i]
if i == 0:
V[0].scal(1 / initial_norm)
else:
norm = initial_norm
# If reiterate is True, reiterate as long as the norm of the vector changes
# strongly during projection.
while True:
for j in range(i):
# project by (I - V[j] * W[j]^T * E)
p = W[j].pairwise_inner(V[i], product)[0]
V[i].axpy(-p, V[j])
# calculate new norm
old_norm, norm = norm, V[i].norm(product)[0]
# check if reorthogonalization should be done
if reiterate and norm < reiteration_threshold * old_norm:
logger.info(f"Projecting vector V[{i}] again")
else:
V[i].scal(1 / norm)
break
# calculate norm of W[i]
initial_norm = W[i].norm(product)[0]
# project W[i]
if i == 0:
W[0].scal(1 / initial_norm)
else:
norm = initial_norm
# If reiterate is True, reiterate as long as the norm of the vector changes
# strongly during projection.
while True:
for j in range(i):
# project by (I - W[j] * V[j]^T * E)
p = V[j].pairwise_inner(W[i], product)[0]
W[i].axpy(-p, W[j])
# calculate new norm
old_norm, norm = norm, W[i].norm(product)[0]
# check if reorthogonalization should be done
if reiterate and norm < reiteration_threshold * old_norm:
logger.info(f"Projecting vector W[{i}] again")
else:
W[i].scal(1 / norm)
break
# rescale V[i]
p = W[i].pairwise_inner(V[i], product)[0]
V[i].scal(1 / p)
if check:
error_matrix = W.inner(V, product)
error_matrix -= np.eye(len(V))
if error_matrix.size > 0:
err = np.max(np.abs(error_matrix))
if err >= check_tol:
raise AccuracyError(f"result not biorthogonal (max err={err})")
return V, W
|
def gram_schmidt_biorth(V, W, product=None,
reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3,
copy=True):
"""Biorthonormalize a pair of |VectorArrays| using the biorthonormal Gram-Schmidt process.
See Algorithm 1 in :cite:`BKS11`.
Note that this algorithm can be significantly less accurate compared to orthogonalization,
in particular, when `V` and `W` are almost orthogonal.
Parameters
----------
V, W
The |VectorArrays| which are to be biorthonormalized.
product
The inner product |Operator| w.r.t. which to biorthonormalize.
If `None`, the Euclidean product is used.
reiterate
If `True`, orthonormalize again if the norm of the orthogonalized vector is
much smaller than the norm of the original vector.
reiteration_threshold
If `reiterate` is `True`, re-orthonormalize if the ratio between the norms of
the orthogonalized vector and the original vector is smaller than this value.
check
If `True`, check if the resulting |VectorArray| is really orthonormal.
check_tol
Tolerance for the check.
copy
If `True`, create a copy of `V` and `W` instead of modifying `V` and `W` in-place.
Returns
-------
The biorthonormalized |VectorArrays|.
"""
assert V.space == W.space
assert len(V) == len(W)
logger = getLogger('pymor.algorithms.gram_schmidt.gram_schmidt_biorth')
if copy:
V = V.copy()
W = W.copy()
# main loop
for i in range(len(V)):
# calculate norm of V[i]
initial_norm = V[i].norm(product)[0]
# project V[i]
if i == 0:
V[0].scal(1 / initial_norm)
else:
norm = initial_norm
# If reiterate is True, reiterate as long as the norm of the vector changes
# strongly during projection.
while True:
for j in range(i):
# project by (I - V[j] * W[j]^T * E)
p = W[j].pairwise_inner(V[i], product)[0]
V[i].axpy(-p, V[j])
# calculate new norm
old_norm, norm = norm, V[i].norm(product)[0]
# check if reorthogonalization should be done
if reiterate and norm < reiteration_threshold * old_norm:
logger.info(f"Projecting vector V[{i}] again")
else:
V[i].scal(1 / norm)
break
# calculate norm of W[i]
initial_norm = W[i].norm(product)[0]
# project W[i]
if i == 0:
W[0].scal(1 / initial_norm)
else:
norm = initial_norm
# If reiterate is True, reiterate as long as the norm of the vector changes
# strongly during projection.
while True:
for j in range(i):
# project by (I - W[j] * V[j]^T * E)
p = V[j].pairwise_inner(W[i], product)[0]
W[i].axpy(-p, W[j])
# calculate new norm
old_norm, norm = norm, W[i].norm(product)[0]
# check if reorthogonalization should be done
if reiterate and norm < reiteration_threshold * old_norm:
logger.info(f"Projecting vector W[{i}] again")
else:
W[i].scal(1 / norm)
break
# rescale V[i]
p = W[i].pairwise_inner(V[i], product)[0]
V[i].scal(1 / p)
if check:
error_matrix = W.inner(V, product)
error_matrix -= np.eye(len(V))
if error_matrix.size > 0:
err = np.max(np.abs(error_matrix))
if err >= check_tol:
raise AccuracyError(f"result not biorthogonal (max err={err})")
return V, W
|
33,266 |
def encode(data, encoding='utf-8'):
assert encoding == 'utf-8', "Only UTF-8 encoding is currently supported."
# Check if data is already encoded (for the purposes of unicode only
# If not, convert to a string if necessary, and then encode as utf-8 bytes
if isinstance(data, bytes):
return data # UTF-8 data is already encoded
elif not isinstance(data, str):
data = str(data)
# Encode UTF-8 data
if encoding is not None:
try:
data = data.encode(encoding)
except Exception as e:
if os.environ.get('DEBUG'):
tb = sys.exc_info()[2]
raise e.with_traceback(tb)
logger.warning("An encoding error has occurred... continuing anyway. To capture these errors, rerun the current command prefixed with `DEBUG=1 `.")
data = data.encode(encoding, errors='ignore')
return data
|
def encode(data, encoding='utf-8'):
assert encoding == 'utf-8', "Only UTF-8 encoding is currently supported."
# Check if data is already encoded (for the purposes of unicode only
# If not, convert to a string if necessary, and then encode as utf-8 bytes
if isinstance(data, bytes):
return data # UTF-8 data is already encoded
elif not isinstance(data, str):
data = str(data)
# Encode UTF-8 data
if encoding is not None:
try:
data = data.encode(encoding)
except Exception as e:
if os.environ.get('DEBUG'):
tb = sys.exc_info()[2]
raise e.with_traceback(tb=sys.exc_info()[2])
logger.warning("An encoding error has occurred... continuing anyway. To capture these errors, rerun the current command prefixed with `DEBUG=1 `.")
data = data.encode(encoding, errors='ignore')
return data
|
28,372 |
def infer_ngrams_corpus(corpus):
bow_feat_idx = [
i for i in range(len(corpus.domain.attributes))
if 'bow-feature' in corpus.domain.attributes[i].attributes
]
if len(bow_feat_idx) == 0:
corpus = BowVectorizer().transform(corpus)
bow_feat_idx = [
i for i in range(len(corpus.domain.attributes))
if 'bow-feature' in corpus.domain.attributes[i].attributes
]
feature_presence = corpus.X.sum(axis=0)
keep = [i for i in bow_feat_idx if feature_presence[0, i] > 0]
return Sparse2CorpusSliceable(corpus.X[:, keep].T)
|
def infer_ngrams_corpus(corpus):
bow_feat_idx = [
i for i in range(len(corpus.domain.attributes))
if 'bow-feature' in corpus.domain.attributes[i].attributes
]
if len(bow_feat_idx) == 0:
corpus = BowVectorizer().transform(corpus)
bow_feat_idx = [
i for i in range(len(corpus.domain.attributes))
if 'bow-feature' in corpus.domain.attributes[i].attributes
]
feature_presence = corpus.X.sum(axis=0)
keep = [i for i in bow_feat_idx if feature_presence[0, i] > 0]
return Sparse2Corpus(corpus.X[:, keep].T)
|
59,274 |
def main(regrtest_args):
args = [sys.executable,
'-u', # Unbuffered stdout and stderr
'-W', 'default', # Warnings set to 'default'
'-bb', # Warnings about bytes/bytearray
]
cross_compile = '_PYTHON_HOST_PLATFORM' in os.environ
hostrunner = os.environ.get("_PYTHON_HOSTRUNNER")
if hostrunner is None:
hostrunner = sysconfig.get_config_var("HOSTRUNNER")
if cross_compile:
# emulate -E, but keep PYTHONPATH + cross compile env vars, so
# test executable can load correct sysconfigdata file.
keep = {
'_PYTHON_PROJECT_BASE',
'_PYTHON_HOST_PLATFORM',
'_PYTHON_SYSCONFIGDATA_NAME',
'PYTHONPATH'
}
environ = {
name: value for name, value in os.environ.items()
if not name.startswith(('PYTHON', '_PYTHON')) or name in keep
}
else:
environ = os.environ.copy()
args.append("-E")
# Allow user-specified interpreter options to override our defaults.
args.extend(test.support.args_from_interpreter_flags())
args.extend(['-m', 'test', # Run the test suite
'-r', # Randomize test order
'-w', # Re-run failed tests in verbose mode
])
if sys.platform == 'win32':
args.append('-n') # Silence alerts under Windows
if not any(is_multiprocess_flag(arg) for arg in regrtest_args):
if cross_compile and hostrunner:
# For now use only one core for cross compiled builds.
# hostrunner can be expensive.
args.extend(['-j', '1'])
else:
args.extend(['-j', '0']) # Use all CPU cores
if not any(is_resource_use_flag(arg) for arg in regrtest_args):
args.extend(['-u', 'all,-largefile,-audio,-gui'])
if cross_compile and hostrunner:
# If HOSTRUNNER is set and -p/--python option is not given, then
# use hostrunner to execute python binary for tests.
if not any(is_python_flag(arg) for arg in regrtest_args):
buildpython = sysconfig.get_config_var("BUILDPYTHON")
args.extend(["--python", f"{hostrunner} {buildpython}"])
args.extend(regrtest_args)
print(shlex.join(args))
if sys.platform == 'win32':
from subprocess import call
sys.exit(call(args))
else:
os.execve(sys.executable, args, environ)
|
def main(regrtest_args):
args = [sys.executable,
'-u', # Unbuffered stdout and stderr
'-W', 'default', # Warnings set to 'default'
'-bb', # Warnings about bytes/bytearray
]
cross_compile = '_PYTHON_HOST_PLATFORM' in os.environ
hostrunner = os.environ.get("_PYTHON_HOSTRUNNER")
if hostrunner is None:
hostrunner = sysconfig.get_config_var("HOSTRUNNER")
if cross_compile:
# emulate -E, but keep PYTHONPATH + cross compile env vars, so
# test executable can load correct sysconfigdata file.
keep = {
'_PYTHON_PROJECT_BASE',
'_PYTHON_HOST_PLATFORM',
'_PYTHON_SYSCONFIGDATA_NAME',
'PYTHONPATH'
}
environ = {
name: value for name, value in os.environ.items()
if not name.startswith(('PYTHON', '_PYTHON')) or name in keep
}
else:
environ = os.environ.copy()
args.append("-E")
# Allow user-specified interpreter options to override our defaults.
args.extend(test.support.args_from_interpreter_flags())
args.extend(['-m', 'test', # Run the test suite
'-r', # Randomize test order
'-w', # Re-run failed tests in verbose mode
])
if sys.platform == 'win32':
args.append('-n') # Silence alerts under Windows
if not any(is_multiprocess_flag(arg) for arg in regrtest_args):
if cross_compile and hostrunner:
# For now use only one core for cross-compiled builds;
# hostrunner can be expensive.
args.extend(['-j', '1'])
else:
args.extend(['-j', '0']) # Use all CPU cores
if not any(is_resource_use_flag(arg) for arg in regrtest_args):
args.extend(['-u', 'all,-largefile,-audio,-gui'])
if cross_compile and hostrunner:
# If HOSTRUNNER is set and -p/--python option is not given, then
# use hostrunner to execute python binary for tests.
if not any(is_python_flag(arg) for arg in regrtest_args):
buildpython = sysconfig.get_config_var("BUILDPYTHON")
args.extend(["--python", f"{hostrunner} {buildpython}"])
args.extend(regrtest_args)
print(shlex.join(args))
if sys.platform == 'win32':
from subprocess import call
sys.exit(call(args))
else:
os.execve(sys.executable, args, environ)
|
43,822 |
def quantum_monte_carlo(fn, wires, target_wire, estimation_wires):
r"""Provides the circuit to perform the
`quantum Monte Carlo estimation <https://arxiv.org/abs/1805.00109>`__ algorithm.
The input ``fn`` should be the quantum circuit corresponding to the :math:`\mathcal{F}` unitary
in the paper above that encodes the probability distribution and random variable onto ``wires``
so that measurement of the ``target_wire`` provides the expectation value to be estimated.
The quantum Monte Carlo algorithm then estimates the expectation value using quantum phase
estimation (check out :class:`~.QuantumPhaseEstimation` for more details), using the
``estimation_wires``.
.. note::
A complementary approach for quantum Monte Carlo is available with the
:class:`~.QuantumMonteCarlo` template.
The ``quantum_monte_carlo`` transform is intended for
use when you already have the circuit for performing :math:`\mathcal{F}` set up, and is
compatible with resource estimation and potential hardware implementation. The
:class:`~.QuantumMonteCarlo` template is unitary-based and is only compatible with
simulators, but may perform faster and is suited to quick prototyping.
Args:
fn (Callable): a quantum function that applies quantum operations according to the
:math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
contained within ``wires``.
estimation_wires (Union[Wires, Sequence[int], or int]): the wires used for phase estimation
Returns:
function: The circuit for quantum Monte Carlo estimation
Raises:
ValueError: if ``wires`` and ``estimation_wires`` share a common wire
.. UsageDetails::
Consider an input quantum circuit ``fn`` that performs the unitary
.. math::
\mathcal{F} = \mathcal{R} \mathcal{A}.
.. figure:: ../../_static/ops/f.svg
:align: center
:width: 15%
:target: javascript:void(0);
Here, the unitary :math:`\mathcal{A}` prepares a probability distribution :math:`p(i)` of
dimension :math:`M = 2^{m}` over :math:`m \geq 1` qubits:
.. math::
\mathcal{A}|0\rangle^{\otimes m} = \sum_{i \in X} p(i) |i\rangle
where :math:`X = \{0, 1, \ldots, M - 1\}` and :math:`|i\rangle` is the basis state
corresponding to :math:`i`. The :math:`\mathcal{R}` unitary imprints the
result of a function :math:`f: X \rightarrow [0, 1]` onto an ancilla qubit:
.. math::
\mathcal{R}|i\rangle |0\rangle = |i\rangle \left(\sqrt{1 - f(i)} |0\rangle + \sqrt{f(i)}|1\rangle\right).
Following `this <https://arxiv.org/abs/1805.00109>`__ paper,
it can be seen that the probability of measuring the state :math:`|1\rangle` in the final
qubit is
.. math::
\mu = \sum_{i \in X} p(i) f(i).
However, it is possible to measure :math:`\mu` more efficiently using quantum Monte Carlo
estimation. This function transforms an input quantum circuit ``fn`` that performs the
unitary :math:`\mathcal{F}` to a larger circuit for measuring :math:`\mu` using the quantum
Monte Carlo algorithm.
.. figure:: ../../_static/ops/qmc.svg
:align: center
:width: 60%
:target: javascript:void(0);
The algorithm proceeds as follows:
#. The probability distribution :math:`p(i)` is encoded using a unitary :math:`\mathcal{A}`
applied to the first :math:`m` qubits specified by ``wires``.
#. The function :math:`f(i)` is encoded onto the ``target_wire`` using a unitary
:math:`\mathcal{R}`.
#. The unitary :math:`\mathcal{Q}` is defined with eigenvalues
:math:`e^{\pm 2 \pi i \theta}` such that the phase :math:`\theta` encodes the expectation
value through the equation :math:`\mu = (1 + \cos (\pi \theta)) / 2`. The circuit in
steps 1 and 2 prepares an equal superposition over the two states corresponding to the
eigenvalues :math:`e^{\pm 2 \pi i \theta}`.
#. The circuit returned by this function is applied so that :math:`\pm\theta` can be
estimated by finding the probabilities of the :math:`n` estimation wires. This in turn
allows for the estimation of :math:`\mu`.
Visit `Rebentrost et al. (2018)
<https://arxiv.org/abs/1805.00109>`__ for further details.
In this algorithm, the number of applications :math:`N` of the :math:`\mathcal{Q}` unitary
scales as :math:`2^{n}`. However, due to the use of quantum phase estimation, the error
:math:`\epsilon` scales as :math:`\mathcal{O}(2^{-n})`. Hence,
.. math::
N = \mathcal{O}\left(\frac{1}{\epsilon}\right).
This scaling can be compared to standard Monte Carlo estimation, where :math:`N` samples are
generated from the probability distribution and the average over :math:`f` is taken. In that
case,
.. math::
N = \mathcal{O}\left(\frac{1}{\epsilon^{2}}\right).
Hence, the quantum Monte Carlo algorithm has a quadratically improved time complexity with
:math:`N`.
**Example**
Consider a standard normal distribution :math:`p(x)` and a function
:math:`f(x) = \sin ^{2} (x)`. The expectation value of :math:`f(x)` is
:math:`\int_{-\infty}^{\infty}f(x)p(x) \approx 0.432332`. This number can be approximated by
discretizing the problem and using the quantum Monte Carlo algorithm.
First, the problem is discretized:
.. code-block:: python
from scipy.stats import norm
m = 5
M = 2 ** m
xmax = np.pi # bound to region [-pi, pi]
xs = np.linspace(-xmax, xmax, M)
probs = np.array([norm().pdf(x) for x in xs])
probs /= np.sum(probs)
func = lambda i: np.sin(xs[i]) ** 2
r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])
The ``quantum_monte_carlo`` transform can then be used:
.. code-block::
from pennylane.templates.state_preparations.mottonen import (
_uniform_rotation_dagger as r_unitary,
)
n = 6
N = 2 ** n
a_wires = range(m)
wires = range(m + 1)
target_wire = m
estimation_wires = range(m + 1, n + m + 1)
dev = qml.device("default.qubit", wires=(n + m + 1))
def fn():
qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=a_wires)
r_unitary(qml.RY, r_rotations, control_wires=a_wires[::-1], target_wire=target_wire)
@qml.qnode(dev)
def qmc():
qml.quantum_monte_carlo(fn, wires, target_wire, estimation_wires)()
return qml.probs(estimation_wires)
phase_estimated = np.argmax(qmc()[:int(N / 2)]) / N
The estimated value can be retrieved using the formula :math:`\mu = (1-\cos(\pi \theta))/2`
>>> (1 - np.cos(np.pi * phase_estimated)) / 2
0.42663476277231915
It is also possible to explore the resources required to perform the quantum Monte Carlo
algorithm
>>> qtape = qmc.qtape.expand(depth=1)
>>> qtape.get_resources()
{'RY': 14674,
'CNOT': 15686,
'PhaseShift': 1020,
'RX': 510,
'CZ': 126,
'PauliX': 1260,
'Toffoli': 2016,
'SWAP': 3,
'Hadamard': 6,
'ControlledPhaseShift': 15}
"""
wires = Wires(wires)
target_wire = Wires(target_wire)
estimation_wires = Wires(estimation_wires)
if Wires.shared_wires([wires, estimation_wires]):
raise ValueError("No wires can be shared between the wires and estimation_wires registers")
@wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
for i, control_wire in enumerate(estimation_wires):
Hadamard(control_wire)
# Find wires eligible to be used as helper wires
work_wires = estimation_wires.toset() - {control_wire}
n_reps = 2 ** (len(estimation_wires) - (i + 1))
q = apply_controlled_Q(
fn,
wires=wires,
target_wire=target_wire,
control_wire=control_wire,
work_wires=work_wires,
)
for _ in range(n_reps):
q(*args, **kwargs)
QFT(wires=estimation_wires).inv()
return wrapper
|
def quantum_monte_carlo(fn, wires, target_wire, estimation_wires):
r"""Provides the circuit to perform the
`quantum Monte Carlo estimation <https://arxiv.org/abs/1805.00109>`__ algorithm.
The input ``fn`` should be the quantum circuit corresponding to the :math:`\mathcal{F}` unitary
in the paper above that encodes the probability distribution and random variable onto ``wires``
so that measurement of the ``target_wire`` provides the expectation value to be estimated.
The quantum Monte Carlo algorithm then estimates the expectation value using quantum phase
estimation (check out :class:`~.QuantumPhaseEstimation` for more details), using the
``estimation_wires``.
.. note::
A complementary approach for quantum Monte Carlo is available with the
:class:`~.QuantumMonteCarlo` template.
The ``quantum_monte_carlo`` transform is intended for
use when you already have the circuit for performing :math:`\mathcal{F}` set up, and is
compatible with resource estimation and potential hardware implementation. The
:class:`~.QuantumMonteCarlo` template is only compatible with
simulators, but may perform faster and is suited to quick prototyping.
Args:
fn (Callable): a quantum function that applies quantum operations according to the
:math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
contained within ``wires``.
estimation_wires (Union[Wires, Sequence[int], or int]): the wires used for phase estimation
Returns:
function: The circuit for quantum Monte Carlo estimation
Raises:
ValueError: if ``wires`` and ``estimation_wires`` share a common wire
.. UsageDetails::
Consider an input quantum circuit ``fn`` that performs the unitary
.. math::
\mathcal{F} = \mathcal{R} \mathcal{A}.
.. figure:: ../../_static/ops/f.svg
:align: center
:width: 15%
:target: javascript:void(0);
Here, the unitary :math:`\mathcal{A}` prepares a probability distribution :math:`p(i)` of
dimension :math:`M = 2^{m}` over :math:`m \geq 1` qubits:
.. math::
\mathcal{A}|0\rangle^{\otimes m} = \sum_{i \in X} p(i) |i\rangle
where :math:`X = \{0, 1, \ldots, M - 1\}` and :math:`|i\rangle` is the basis state
corresponding to :math:`i`. The :math:`\mathcal{R}` unitary imprints the
result of a function :math:`f: X \rightarrow [0, 1]` onto an ancilla qubit:
.. math::
\mathcal{R}|i\rangle |0\rangle = |i\rangle \left(\sqrt{1 - f(i)} |0\rangle + \sqrt{f(i)}|1\rangle\right).
Following `this <https://arxiv.org/abs/1805.00109>`__ paper,
it can be seen that the probability of measuring the state :math:`|1\rangle` in the final
qubit is
.. math::
\mu = \sum_{i \in X} p(i) f(i).
However, it is possible to measure :math:`\mu` more efficiently using quantum Monte Carlo
estimation. This function transforms an input quantum circuit ``fn`` that performs the
unitary :math:`\mathcal{F}` to a larger circuit for measuring :math:`\mu` using the quantum
Monte Carlo algorithm.
.. figure:: ../../_static/ops/qmc.svg
:align: center
:width: 60%
:target: javascript:void(0);
The algorithm proceeds as follows:
#. The probability distribution :math:`p(i)` is encoded using a unitary :math:`\mathcal{A}`
applied to the first :math:`m` qubits specified by ``wires``.
#. The function :math:`f(i)` is encoded onto the ``target_wire`` using a unitary
:math:`\mathcal{R}`.
#. The unitary :math:`\mathcal{Q}` is defined with eigenvalues
:math:`e^{\pm 2 \pi i \theta}` such that the phase :math:`\theta` encodes the expectation
value through the equation :math:`\mu = (1 + \cos (\pi \theta)) / 2`. The circuit in
steps 1 and 2 prepares an equal superposition over the two states corresponding to the
eigenvalues :math:`e^{\pm 2 \pi i \theta}`.
#. The circuit returned by this function is applied so that :math:`\pm\theta` can be
estimated by finding the probabilities of the :math:`n` estimation wires. This in turn
allows for the estimation of :math:`\mu`.
Visit `Rebentrost et al. (2018)
<https://arxiv.org/abs/1805.00109>`__ for further details.
In this algorithm, the number of applications :math:`N` of the :math:`\mathcal{Q}` unitary
scales as :math:`2^{n}`. However, due to the use of quantum phase estimation, the error
:math:`\epsilon` scales as :math:`\mathcal{O}(2^{-n})`. Hence,
.. math::
N = \mathcal{O}\left(\frac{1}{\epsilon}\right).
This scaling can be compared to standard Monte Carlo estimation, where :math:`N` samples are
generated from the probability distribution and the average over :math:`f` is taken. In that
case,
.. math::
N = \mathcal{O}\left(\frac{1}{\epsilon^{2}}\right).
Hence, the quantum Monte Carlo algorithm has a quadratically improved time complexity with
:math:`N`.
**Example**
Consider a standard normal distribution :math:`p(x)` and a function
:math:`f(x) = \sin ^{2} (x)`. The expectation value of :math:`f(x)` is
:math:`\int_{-\infty}^{\infty}f(x)p(x) \approx 0.432332`. This number can be approximated by
discretizing the problem and using the quantum Monte Carlo algorithm.
First, the problem is discretized:
.. code-block:: python
from scipy.stats import norm
m = 5
M = 2 ** m
xmax = np.pi # bound to region [-pi, pi]
xs = np.linspace(-xmax, xmax, M)
probs = np.array([norm().pdf(x) for x in xs])
probs /= np.sum(probs)
func = lambda i: np.sin(xs[i]) ** 2
r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])
The ``quantum_monte_carlo`` transform can then be used:
.. code-block::
from pennylane.templates.state_preparations.mottonen import (
_uniform_rotation_dagger as r_unitary,
)
n = 6
N = 2 ** n
a_wires = range(m)
wires = range(m + 1)
target_wire = m
estimation_wires = range(m + 1, n + m + 1)
dev = qml.device("default.qubit", wires=(n + m + 1))
def fn():
qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=a_wires)
r_unitary(qml.RY, r_rotations, control_wires=a_wires[::-1], target_wire=target_wire)
@qml.qnode(dev)
def qmc():
qml.quantum_monte_carlo(fn, wires, target_wire, estimation_wires)()
return qml.probs(estimation_wires)
phase_estimated = np.argmax(qmc()[:int(N / 2)]) / N
The estimated value can be retrieved using the formula :math:`\mu = (1-\cos(\pi \theta))/2`
>>> (1 - np.cos(np.pi * phase_estimated)) / 2
0.42663476277231915
It is also possible to explore the resources required to perform the quantum Monte Carlo
algorithm
>>> qtape = qmc.qtape.expand(depth=1)
>>> qtape.get_resources()
{'RY': 14674,
'CNOT': 15686,
'PhaseShift': 1020,
'RX': 510,
'CZ': 126,
'PauliX': 1260,
'Toffoli': 2016,
'SWAP': 3,
'Hadamard': 6,
'ControlledPhaseShift': 15}
"""
wires = Wires(wires)
target_wire = Wires(target_wire)
estimation_wires = Wires(estimation_wires)
if Wires.shared_wires([wires, estimation_wires]):
raise ValueError("No wires can be shared between the wires and estimation_wires registers")
@wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
for i, control_wire in enumerate(estimation_wires):
Hadamard(control_wire)
# Find wires eligible to be used as helper wires
work_wires = estimation_wires.toset() - {control_wire}
n_reps = 2 ** (len(estimation_wires) - (i + 1))
q = apply_controlled_Q(
fn,
wires=wires,
target_wire=target_wire,
control_wire=control_wire,
work_wires=work_wires,
)
for _ in range(n_reps):
q(*args, **kwargs)
QFT(wires=estimation_wires).inv()
return wrapper
|
54,392 |
def check_checksum(archive: Path, source_metadata: Dict[str, Any]):
"""
Checks that an archive matches the checksum in the package metadata.
archive -- the path to the archive we wish to checksum
source_metadata -- The source section from meta.yaml.
"""
checksum_keys = {"md5", "sha256"}.intersection(source_metadata)
if not checksum_keys:
return
elif len(checksum_keys) != 1:
raise ValueError(
"Only one checksum should be included in a package "
"setup; found {}.".format(checksum_keys)
)
checksum_algorithm = checksum_keys.pop()
checksum = source_metadata[checksum_algorithm]
CHUNK_SIZE = 1 << 16
h = getattr(hashlib, checksum_algorithm)()
with open(archive, "rb") as fd:
while True:
chunk = fd.read(CHUNK_SIZE)
h.update(chunk)
if len(chunk) < CHUNK_SIZE:
break
if h.hexdigest() != checksum:
raise ValueError("Invalid {} checksum".format(checksum_algorithm))
|
def check_checksum(archive: Path, source_metadata: Dict[str, Any]):
"""
Checks that an archive matches the checksum in the package metadata.
Parameters
----------
archive
the path to the archive we wish to checksum
source_metadata
The source section from meta.yaml.
"""
checksum_keys = {"md5", "sha256"}.intersection(source_metadata)
if not checksum_keys:
return
elif len(checksum_keys) != 1:
raise ValueError(
"Only one checksum should be included in a package "
"setup; found {}.".format(checksum_keys)
)
checksum_algorithm = checksum_keys.pop()
checksum = source_metadata[checksum_algorithm]
CHUNK_SIZE = 1 << 16
h = getattr(hashlib, checksum_algorithm)()
with open(archive, "rb") as fd:
while True:
chunk = fd.read(CHUNK_SIZE)
h.update(chunk)
if len(chunk) < CHUNK_SIZE:
break
if h.hexdigest() != checksum:
raise ValueError("Invalid {} checksum".format(checksum_algorithm))
|
7,039 |
def get_runtime_client(comms_method, workflow, timeout=None):
"""Return client for the provided commumication method.
Args:
comm_method: communication method
workflow: workflow name
"""
if comms_method == CommsMeth.SSH:
from cylc.flow.network.ssh_client import SuiteRuntimeClient
else:
from cylc.flow.network.client import SuiteRuntimeClient
return SuiteRuntimeClient(workflow, timeout=timeout)
|
def get_runtime_client(comms_method, workflow, timeout=None):
"""Return client for the provided communication method.
Args:
comm_method: communication method
workflow: workflow name
"""
if comms_method == CommsMeth.SSH:
from cylc.flow.network.ssh_client import SuiteRuntimeClient
else:
from cylc.flow.network.client import SuiteRuntimeClient
return SuiteRuntimeClient(workflow, timeout=timeout)
|
5,895 |
def create_really_basic_wheel(name, version):
# type: (str, str) -> bytes
def digest(contents):
return "sha256={}".format(
urlsafe_b64encode_nopad(sha256(contents).digest())
)
def add_file(path, text):
contents = text.encode("utf-8")
z.writestr(path, contents)
records.append((path, digest(contents), str(len(contents))))
dist_info = "{}-{}.dist-info".format(name, version)
record_path = "{}/RECORD".format(dist_info)
records = [(record_path, "", "")]
buf = BytesIO()
with ZipFile(buf, "w") as z:
add_file("{}/WHEEL".format(dist_info), "Wheel-Version: 1.0")
add_file(
"{}/METADATA".format(dist_info),
dedent(
"""
Metadata-Version: 2.1
Name: {}
Version: {}
""".format(name, version)
),
)
z.writestr(record_path, "\n".join(",".join(r) for r in records))
buf.seek(0)
return buf.read()
|
def create_really_basic_wheel(name, version):
# type: (str, str) -> bytes
def digest(contents):
return "sha256={}".format(
urlsafe_b64encode_nopad(sha256(contents).digest())
)
def add_file(path, text):
contents = text.encode("utf-8")
z.writestr(path, contents)
records.append((path, digest(contents), str(len(contents))))
dist_info = "{}-{}.dist-info".format(name, version)
record_path = "{}/RECORD".format(dist_info)
records = [(record_path, "", "")]
buf = BytesIO()
with ZipFile(buf, "w") as z:
add_file("{}/WHEEL".format(dist_info), "Wheel-Version: 1.0")
add_file(
"{}/METADATA".format(dist_info),
dedent(
"""\
Metadata-Version: 2.1
Name: {}
Version: {}
""".format(name, version)
),
)
z.writestr(record_path, "\n".join(",".join(r) for r in records))
buf.seek(0)
return buf.read()
|
2,669 |
def test_feature_union():
# basic sanity check for feature union
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert X_transformed.shape == (X.shape[0], 3)
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# Test clone
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
fs2 = clone(fs)
assert fs.transformer_list[0][1] is not fs2.transformer_list[0][1]
# test setting parameters
fs.set_params(select__k=2)
assert fs.fit_transform(X, y).shape == (X.shape[0], 4)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert X_transformed.shape == (X.shape[0], 8)
# test error if some elements do not support transform
msg = "All estimators should implement fit and transform.*\\bNoTrans\\b"
fs = FeatureUnion([("transform", Transf()), ("no_transform", NoTrans())])
with pytest.raises(TypeError, match=msg):
fs.fit(X)
# test that init accepts tuples
fs = FeatureUnion((("svd", svd), ("select", select)))
fs.fit(X, y)
|
def test_feature_union():
# basic sanity check for feature union
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert X_transformed.shape == (X.shape[0], 3)
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# Test clone
fs2 = clone(fs)
assert fs.transformer_list[0][1] is not fs2.transformer_list[0][1]
# test setting parameters
fs.set_params(select__k=2)
assert fs.fit_transform(X, y).shape == (X.shape[0], 4)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert X_transformed.shape == (X.shape[0], 8)
# test error if some elements do not support transform
msg = "All estimators should implement fit and transform.*\\bNoTrans\\b"
fs = FeatureUnion([("transform", Transf()), ("no_transform", NoTrans())])
with pytest.raises(TypeError, match=msg):
fs.fit(X)
# test that init accepts tuples
fs = FeatureUnion((("svd", svd), ("select", select)))
fs.fit(X, y)
|
17,391 |
def move_exp_nanmean(array, *, axis, alpha):
if is_duck_dask_array(array):
raise TypeError("rolling_exp is not currently support for dask arrays")
import numbagg
if axis == ():
return array.astype(np.float64)
else:
return numbagg.move_exp_nanmean(array, axis=axis, alpha=alpha)
|
def move_exp_nanmean(array, *, axis, alpha):
if is_duck_dask_array(array):
raise TypeError("rolling_exp is not currently support for dask-like arrays")
import numbagg
if axis == ():
return array.astype(np.float64)
else:
return numbagg.move_exp_nanmean(array, axis=axis, alpha=alpha)
|
47,897 |
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
# load vocabulary file for model
log.info("Loading vocab file:\t{}".format(args.vocab))
with open(args.vocab, "r", encoding="utf-8") as r:
vocab = dict((t.rstrip("\n"), i) for i, t in enumerate(r.readlines()))
log.info("{} tokens loaded".format(len(vocab)))
# get context as a string (as we might need it's length for the sequence reshape)
context = get_context(args)
# encode context into token ids list
c_tokens_id, c_tokens_se = text_to_tokens(context, vocab)
log.info("Initializing Inference Engine")
ie = IECore()
log.info("Device is {}".format(args.device))
version = ie.get_versions(args.device)[args.device]
version_str = "{}.{}.{}".format(version.major, version.minor, version.build_number)
log.info("Plugin version is {}".format(version_str))
# read IR
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
ie_encoder = ie.read_network(model=model_xml, weights=model_bin)
if args.reshape:
# reshape the sequence length to the context + maximum question length (in tokens)
first_input_layer = next(iter(ie_encoder.inputs))
c = ie_encoder.inputs[first_input_layer].shape[1]
# find the closest multiple of 64
seq = min(c, round((len(c_tokens_id) + args.max_question_token_num) / 64) * 64)
if seq < c:
input_info = list(ie_encoder.inputs)
new_shapes = dict([])
for i in input_info:
n, c = ie_encoder.inputs[i].shape
new_shapes[i] = [n, seq]
log.info("Reshaped input {} from {} to the {}".format(i, ie_encoder.inputs[i].shape, new_shapes[i]))
log.info("Attempting to reshape the network to the modified inputs...")
try:
ie_encoder.reshape(new_shapes)
log.info("Successful!")
except:
log.info("Failed...reloading the network")
ie_encoder = ie.read_network(model=model_xml, weights=model_bin)
log.info("Done")
else:
log.info("Skipping network reshaping,"
" as (context length + max question length) exceeds the current (input) network sequence length")
# check input and output names
input_names_model = list(ie_encoder.inputs.keys())
output_names_model = list(ie_encoder.outputs.keys())
input_names = eval(args.input_names)
output_names = eval(args.output_names)
if set(input_names_model) != set(input_names) or set(output_names_model) != set(output_names):
log.error("Input or Output names do not match")
log.error(" Network input->output names: {}->{}".format(input_names_model, output_names_model))
log.error(" Expected (from the demo cmd-line) input->output names: {}->{}".format(input_names, output_names))
raise Exception("Unexpected network input or output names")
# load model to the device
log.info("Loading model to the {}".format(args.device))
ie_encoder_exec = ie.load_network(network=ie_encoder, device_name=args.device)
# loop on user's questions
while True:
question = input('Type question (enter to exit):')
if not question:
break
q_tokens_id, _ = text_to_tokens(question, vocab)
# maximum number of tokens that can be processed by network at once
max_length = ie_encoder.inputs[input_names[0]].shape[1]
# calculate number of tokens for context in each inference request.
# reserve 3 positions for special tokens
# [CLS] q_tokens [SEP] c_tokens [SEP]
c_wnd_len = max_length - (len(q_tokens_id) + 3)
# token num between two neighbour context windows
# 1/2 means that context windows are overlapped by half
c_stride = c_wnd_len // 2
t0 = time.time()
t_count = 0
# array of answers from each window
answers = []
# init a window to iterate over context
c_s, c_e = 0, min(c_wnd_len, len(c_tokens_id))
# iterate while context window is not empty
while c_e > c_s:
# form the request
tok_cls = vocab['[CLS]']
tok_sep = vocab['[SEP]']
input_ids = [tok_cls] + q_tokens_id + [tok_sep] + c_tokens_id[c_s:c_e] + [tok_sep]
token_type_ids = [0] + [0] * len(q_tokens_id) + [0] + [1] * (c_e - c_s) + [0]
attention_mask = [1] * len(input_ids)
# pad the rest of the request
pad_len = max_length - len(input_ids)
input_ids += [0] * pad_len
token_type_ids += [0] * pad_len
attention_mask += [0] * pad_len
# create numpy inputs for IE
inputs = {
input_names[0]: np.array([input_ids], dtype=np.int32),
input_names[1]: np.array([attention_mask], dtype=np.int32),
input_names[2]: np.array([token_type_ids], dtype=np.int32),
}
t_start = time.time()
# infer by IE
res = ie_encoder_exec.infer(inputs=inputs)
t_end = time.time()
t_count += 1
log.info("Sequence of length {} is processed with {:0.2f} sentence/sec ({:0.2} sec per request)".format(
max_length,
1 / (t_end - t_start),
t_end - t_start
))
# get start-end scores for context
def get_score(name):
out = np.exp(res[name].reshape((max_length,)))
return out / out.sum(axis=-1)
score_s = get_score(output_names[0])
score_e = get_score(output_names[1])
# get 'no-answer' score (not valid if model has been fine-tuned on squad1.x)
if args.model_squad_ver.split('.')[0] == '1':
score_na = 0
else:
score_na = score_s[0] * score_e[0]
# find product of all start-end combinations to find the best one
c_s_idx = len(q_tokens_id) + 2 # index of first context token in tensor
c_e_idx = max_length - (1 + pad_len) # index of last+1 context token in tensor
score_mat = np.matmul(
score_s[c_s_idx:c_e_idx].reshape((c_e - c_s, 1)),
score_e[c_s_idx:c_e_idx].reshape((1, c_e - c_s))
)
# reset candidates with end before start
score_mat = np.triu(score_mat)
# reset long candidates (>max_answer_token_num)
score_mat = np.tril(score_mat, args.max_answer_token_num - 1)
# find the best start-end pair
max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1])
max_score = score_mat[max_s, max_e] * (1 - score_na)
# convert to context text start-end index
max_s = c_tokens_se[c_s + max_s][0]
max_e = c_tokens_se[c_s + max_e][1]
# check that answers list does not have duplicates (because of context windows overlapping)
same = [i for i, a in enumerate(answers) if a[1] == max_s and a[2] == max_e]
if same:
assert len(same) == 1
# update exist answer record
a = answers[same[0]]
answers[same[0]] = (max(max_score, a[0]), max_s, max_e)
else:
# add new record
answers.append((max_score, max_s, max_e))
# check that context window reach the end
if c_e == len(c_tokens_id):
break
# move to next window position
c_s = min(c_s + c_stride, len(c_tokens_id))
c_e = min(c_s + c_wnd_len, len(c_tokens_id))
t1 = time.time()
log.info("{} requests by {} length are processed by {:0.2f}sec ({:0.2}sec per request)".format(
t_count,
max_length,
t1 - t0,
(t1 - t0) / t_count
))
# print top 3 results
answers = list(sorted(answers, key=lambda x: -x[0]))
for score, s, e in answers[:3]:
log.info("---answer: {:0.2f} {}".format(score, context[s:e]))
c_s, c_e = find_sentence_range(context, s, e)
log.info(" " + context[c_s:s] + "\033[91m" + context[s:e] + '\033[0m' + context[e:c_e])
|
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
# load vocabulary file for model
log.info("Loading vocab file:\t{}".format(args.vocab))
with open(args.vocab, "r", encoding="utf-8") as r:
vocab = dict((t.rstrip("\n"), i) for i, t in enumerate(r.readlines()))
log.info("{} tokens loaded".format(len(vocab)))
# get context as a string (as we might need it's length for the sequence reshape)
context = get_context(args)
# encode context into token ids list
c_tokens_id, c_tokens_se = text_to_tokens(context, vocab)
log.info("Initializing Inference Engine")
ie = IECore()
log.info("Device is {}".format(args.device))
version = ie.get_versions(args.device)[args.device]
version_str = "{}.{}.{}".format(version.major, version.minor, version.build_number)
log.info("Plugin version is {}".format(version_str))
# read IR
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
ie_encoder = ie.read_network(model=model_xml, weights=model_bin)
if args.reshape:
# reshape the sequence length to the context + maximum question length (in tokens)
first_input_layer = next(iter(ie_encoder.inputs))
c = ie_encoder.inputs[first_input_layer].shape[1]
# find the closest multiple of 64
seq = min(c, round((len(c_tokens_id) + args.max_question_token_num) / 64) * 64)
if seq < c:
input_info = list(ie_encoder.inputs)
new_shapes = dict([])
for i in input_info:
n, c = ie_encoder.inputs[i].shape
new_shapes[i] = [n, seq]
log.info("Reshaped input {} from {} to the {}".format(i, ie_encoder.inputs[i].shape, new_shapes[i]))
log.info("Attempting to reshape the network to the modified inputs...")
try:
ie_encoder.reshape(new_shapes)
log.info("Successful!")
except:
log.info("Failed...reloading the network")
ie_encoder = ie.read_network(model=model_xml, weights=model_bin)
log.info("Done")
else:
log.info("Skipping network reshaping,"
" as (context length + max question length) exceeds the current (input) network sequence length")
# check input and output names
input_names_model = list(ie_encoder.inputs.keys())
output_names_model = list(ie_encoder.outputs.keys())
input_names = eval(args.input_names)
output_names = eval(args.output_names)
if set(input_names_model) != set(input_names) or set(output_names_model) != set(output_names):
log.error("Input or Output names do not match")
log.error(" Network input->output names: {}->{}".format(input_names_model, output_names_model))
log.error(" Expected (from the demo cmd-line) input->output names: {}->{}".format(input_names, output_names))
raise Exception("Unexpected network input or output names")
# load model to the device
log.info("Loading model to the {}".format(args.device))
ie_encoder_exec = ie.load_network(network=ie_encoder, device_name=args.device)
# loop on user's questions
while True:
question = input('Type question (enter to exit):')
if not question:
break
q_tokens_id, _ = text_to_tokens(question, vocab)
# maximum number of tokens that can be processed by network at once
max_length = ie_encoder.inputs[input_names[0]].shape[1]
# calculate number of tokens for context in each inference request.
# reserve 3 positions for special tokens
# [CLS] q_tokens [SEP] c_tokens [SEP]
c_wnd_len = max_length - (len(q_tokens_id) + 3)
# token num between two neighbour context windows
# 1/2 means that context windows are overlapped by half
c_stride = c_wnd_len // 2
t0 = time.time()
t_count = 0
# array of answers from each window
answers = []
# init a window to iterate over context
c_s, c_e = 0, min(c_wnd_len, len(c_tokens_id))
# iterate while context window is not empty
while c_e > c_s:
# form the request
tok_cls = vocab['[CLS]']
tok_sep = vocab['[SEP]']
input_ids = [tok_cls] + q_tokens_id + [tok_sep] + c_tokens_id[c_s:c_e] + [tok_sep]
token_type_ids = [0] + [0] * len(q_tokens_id) + [0] + [1] * (c_e - c_s) + [0]
attention_mask = [1] * len(input_ids)
# pad the rest of the request
pad_len = max_length - len(input_ids)
input_ids += [0] * pad_len
token_type_ids += [0] * pad_len
attention_mask += [0] * pad_len
# create numpy inputs for IE
inputs = {
input_names[0]: np.array([input_ids], dtype=np.int32),
input_names[1]: np.array([attention_mask], dtype=np.int32),
input_names[2]: np.array([token_type_ids], dtype=np.int32),
}
t_start = time.time()
# infer by IE
res = ie_encoder_exec.infer(inputs=inputs)
t_end = time.time()
t_count += 1
log.info("Sequence of length {} is processed with {:0.2f} sentence/sec ({:0.2} sec per request)".format(
max_length,
1 / (t_end - t_start),
t_end - t_start
))
# get start-end scores for context
def get_score(name):
out = np.exp(res[name].reshape((max_length,)))
return out / out.sum(axis=-1)
score_s = get_score(output_names[0])
score_e = get_score(output_names[1])
# get 'no-answer' score (not valid if model has been fine-tuned on squad1.x)
if args.model_squad_ver.split('.')[0] == '1':
score_na = 0
else:
score_na = score_s[0] * score_e[0]
# find product of all start-end combinations to find the best one
c_s_idx = len(q_tokens_id) + 2 # index of first context token in tensor
c_e_idx = max_length - (1 + pad_len) # index of last+1 context token in tensor
score_mat = np.matmul(
score_s[c_s_idx:c_e_idx].reshape((c_e - c_s, 1)),
score_e[c_s_idx:c_e_idx].reshape((1, c_e - c_s))
)
# reset candidates with end before start
score_mat = np.triu(score_mat)
# reset long candidates (>max_answer_token_num)
score_mat = np.tril(score_mat, args.max_answer_token_num - 1)
# find the best start-end pair
max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1])
max_score = score_mat[max_s, max_e] * (1 - score_na)
# convert to context text start-end index
max_s = c_tokens_se[c_s + max_s][0]
max_e = c_tokens_se[c_s + max_e][1]
# check that answers list does not have duplicates (because of context windows overlapping)
same = [i for i, a in enumerate(answers) if a[1] == max_s and a[2] == max_e]
if same:
assert len(same) == 1
# update exist answer record
a = answers[same[0]]
answers[same[0]] = (max(max_score, a[0]), max_s, max_e)
else:
# add new record
answers.append((max_score, max_s, max_e))
# check that context window reached the end
if c_e == len(c_tokens_id):
break
# move to next window position
c_s = min(c_s + c_stride, len(c_tokens_id))
c_e = min(c_s + c_wnd_len, len(c_tokens_id))
t1 = time.time()
log.info("{} requests by {} length are processed by {:0.2f}sec ({:0.2}sec per request)".format(
t_count,
max_length,
t1 - t0,
(t1 - t0) / t_count
))
# print top 3 results
answers = list(sorted(answers, key=lambda x: -x[0]))
for score, s, e in answers[:3]:
log.info("---answer: {:0.2f} {}".format(score, context[s:e]))
c_s, c_e = find_sentence_range(context, s, e)
log.info(" " + context[c_s:s] + "\033[91m" + context[s:e] + '\033[0m' + context[e:c_e])
|
32,847 |
def test_200_query_string(client, tracer, test_spans):
with override_http_config("starlette", dict(trace_query_string=True)):
r = client.get("?foo=bar")
assert r.status_code == 200
assert r.text == "Success"
request_span = list(test_spans.filter_spans(name="starlette.request"))[0]
assert request_span.service == "starlette"
assert request_span.name == "starlette.request"
assert request_span.resource == "GET /"
assert request_span.error == 0
assert request_span.get_tag("http.method") == "GET"
assert request_span.get_tag("http.url") == "http://testserver/"
assert request_span.get_tag("http.status_code") == "200"
assert request_span.get_tag("http.query.string") == "foo=bar"
|
def test_200_query_string(client, tracer, test_spans):
with override_http_config("starlette", dict(trace_query_string=True)):
r = client.get("?foo=bar")
assert r.status_code == 200
assert r.text == "Success"
request_span = next(test_spans.filter_spans(name="starlette.request"))
assert request_span.service == "starlette"
assert request_span.name == "starlette.request"
assert request_span.resource == "GET /"
assert request_span.error == 0
assert request_span.get_tag("http.method") == "GET"
assert request_span.get_tag("http.url") == "http://testserver/"
assert request_span.get_tag("http.status_code") == "200"
assert request_span.get_tag("http.query.string") == "foo=bar"
|
6,593 |
def make_custom_fields():
qr_code_field = dict(
fieldname='qr_code',
label='QR Code',
fieldtype='Attach Image',
read_only=1, no_copy=1, hidden=1)
create_custom_field('Sales Invoice', qr_code_field)
|
def make_qrcode_field():
qr_code_field = dict(
fieldname='qr_code',
label='QR Code',
fieldtype='Attach Image',
read_only=1, no_copy=1, hidden=1)
create_custom_field('Sales Invoice', qr_code_field)
|
2,200 |
def test_nowarn_if_metric_bool_data_bool():
# make sure no warning is raised if metric and data are both boolean
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/18996
pairwise_metric = 'rogerstanimoto'
X = np.random.randint(2, size=(5, 2), dtype=np.bool_)
with pytest.warns(None) as warn_record:
OPTICS(metric=pairwise_metric).fit(X)
assert len(warn_record) == 0
|
def test_nowarn_if_metric_bool_data_bool():
# make sure no warning is raised if metric and data are both boolean
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/18996
pairwise_metric = 'rogerstanimoto'
X = np.random.randint(2, size=(5, 2), dtype=bool)
with pytest.warns(None) as warn_record:
OPTICS(metric=pairwise_metric).fit(X)
assert len(warn_record) == 0
|
31,036 |
def parse_data_pattern_rule(report_json, verdict_field, results_field):
"""Parse data pattern matches for a given rule"""
if report_json.get(verdict_field) != "MATCHED":
return []
data_patterns = []
for dp in report_json.get("scanContentRawReport", {}).get(results_field, []):
if (dp.get("state") == "EVALUATED") and (dp.get("unique_detection_frequency", 0) >= 1):
data_patterns.append({
'DataPatternName': dp["name"],
'LowConfidenceFrequency': dp["low_confidence_frequency"],
'HighConfidenceFrequency': dp["high_confidence_frequency"],
'MediumConfidenceFrequency': dp["medium_confidence_frequency"],
'Detections': dp.get("detections")
})
return data_patterns
|
def parse_data_pattern_rule(report_json, verdict_field, results_field):
"""Parse data pattern matches for a given rule"""
if report_json.get(verdict_field) != "MATCHED":
return []
data_patterns = []
for dp in report_json.get("scanContentRawReport", {}).get(results_field, []):
if (dp.get("state") == "EVALUATED") and (dp.get("unique_detection_frequency", 0) >= 1):
data_patterns.append({
'DataPatternName': dp["name"],
'LowConfidenceFrequency': dp.get('low_confidence_frequency'),
'HighConfidenceFrequency': dp["high_confidence_frequency"],
'MediumConfidenceFrequency': dp["medium_confidence_frequency"],
'Detections': dp.get("detections")
})
return data_patterns
|
23,223 |
def _import_by_name(name: str, last_errors = None) -> Tuple[Any, Any, str]:
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
# try first interpret `name` as MODNAME.OBJ
modname = '.'.join(name_parts[:-1])
if modname:
try:
mod = import_module(modname)
return getattr(mod, name_parts[-1]), mod, modname
except (ImportError, IndexError, AttributeError) as e:
if last_errors is not None:
last_errors.append(str(str(e.args[0])))
pass
# ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
last_j = 0
modname = None
for j in reversed(range(1, len(name_parts) + 1)):
last_j = j
modname = '.'.join(name_parts[:j])
try:
import_module(modname)
except ImportError as e:
if last_errors is not None:
last_errors.append(str(e.args[0]))
continue
if modname in sys.modules:
break
if last_j < len(name_parts):
parent = None
obj = sys.modules[modname]
for obj_name in name_parts[last_j:]:
parent = obj
obj = getattr(obj, obj_name)
return obj, parent, modname
else:
return sys.modules[modname], None, modname
except (ValueError, ImportError, AttributeError, KeyError) as e:
raise ImportError(*e.args) from e
|
def _import_by_name(name: str, last_errors = None) -> Tuple[Any, Any, str]:
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
# try first interpret `name` as MODNAME.OBJ
modname = '.'.join(name_parts[:-1])
if modname:
try:
mod = import_module(modname)
return getattr(mod, name_parts[-1]), mod, modname
except (ImportError, IndexError, AttributeError) as e:
if last_errors is not None:
last_errors.append(str(str(e.args[0])))
pass
# ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
last_j = 0
modname = None
for j in reversed(range(1, len(name_parts) + 1)):
last_j = j
modname = '.'.join(name_parts[:j])
try:
import_module(modname)
except ImportError as e:
if last_errors is not None:
last_errors.append(str(e))
continue
if modname in sys.modules:
break
if last_j < len(name_parts):
parent = None
obj = sys.modules[modname]
for obj_name in name_parts[last_j:]:
parent = obj
obj = getattr(obj, obj_name)
return obj, parent, modname
else:
return sys.modules[modname], None, modname
except (ValueError, ImportError, AttributeError, KeyError) as e:
raise ImportError(*e.args) from e
|
32,742 |
def parse_query(query):
""" Return a command parsed from the given mongo db query. """
db, coll = None, None
ns = getattr(query, "ns", None)
if ns:
# version < 3.1 stores the full namespace
db, coll = _split_namespace(ns)
else:
# version >= 3.1 stores the db and coll seperately
coll = getattr(query, "coll", None)
db = getattr(query, "db", None)
# pymongo < 3.1 _Query doesn't not have a name field, so default to 'query'
cmd = Command(getattr(query, 'name', 'query'), db, coll)
cmd.query = query.spec
return cmd
|
def parse_query(query):
""" Return a command parsed from the given mongo db query. """
db, coll = None, None
ns = getattr(query, "ns", None)
if ns:
# version < 3.1 stores the full namespace
db, coll = _split_namespace(ns)
else:
# version >= 3.1 stores the db and coll seperately
coll = getattr(query, "coll", None)
db = getattr(query, "db", None)
# pymongo < 3.1 _Query does not have a name field, so default to 'query'
cmd = Command(getattr(query, 'name', 'query'), db, coll)
cmd.query = query.spec
return cmd
|
51,955 |
def test_changed_files_from_git_rev_base(tmpdir, capfd):
"""Test arbitrary git ref as base."""
git = which("git", required=True)
with tmpdir.as_cwd():
git("init", "-b", "main")
git("config", "user.name", "test user")
git("config", "user.email", "test@user.com")
git("commit", "--allow-empty", "-m", "initial commit")
tmpdir.ensure('bin/spack')
assert changed_files(base="HEAD") == ['bin/spack']
assert changed_files(base="main") == ['bin/spack']
git("add", 'bin/spack')
git("commit", "-m", "v1")
assert changed_files(base="HEAD") == []
assert changed_files(base="HEAD~") == ["bin/spack"]
|
def test_changed_files_from_git_rev_base(tmpdir, capfd):
"""Test arbitrary git ref as base."""
git = which("git", required=True)
with tmpdir.as_cwd():
git("init")
git("checkout", "-b", "main")
git("config", "user.name", "test user")
git("config", "user.email", "test@user.com")
git("commit", "--allow-empty", "-m", "initial commit")
tmpdir.ensure('bin/spack')
assert changed_files(base="HEAD") == ['bin/spack']
assert changed_files(base="main") == ['bin/spack']
git("add", 'bin/spack')
git("commit", "-m", "v1")
assert changed_files(base="HEAD") == []
assert changed_files(base="HEAD~") == ["bin/spack"]
|
5,647 |
def test_ncx2_gh12731():
# test that gh-12731 is resolved; previously these were all 0.5
nc = 10**np.arange(5,10)
assert_allclose(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
|
def test_ncx2_gh12731():
# test that gh-12731 is resolved; previously these were all 0.5
nc = 10**np.arange(5, 10)
assert_allclose(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
|
31,301 |
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: RubrikPolaris client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_api_token()
except DemistoException as e:
errorMessage = str(e)
if 'Verify that the server URL parameter' in errorMessage:
return """We were unable to connect to the provided\
Polaris Account. Verify it has been entered correctly."""
elif 'Unauthorized' in errorMessage:
return "Incorrect email address or password."
else:
raise e
return "ok"
|
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: RubrikPolaris client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_api_token()
except DemistoException as e:
errorMessage = str(e)
if 'Verify that the server URL parameter' in errorMessage:
return "We were unable to connect to the provided Polaris Account. Verify it has been entered correctly."
elif 'Unauthorized' in errorMessage:
return "Incorrect email address or password."
else:
raise e
return "ok"
|
35,368 |
def launch_mapdl(
exec_file=None,
run_location=None,
jobname="file",
nproc=2,
ram=None,
mode=None,
override=False,
loglevel="ERROR",
additional_switches="",
start_timeout=120,
port=None,
cleanup_on_exit=True,
start_instance=None,
ip=None,
clear_on_connect=True,
log_apdl=None,
remove_temp_files=False,
verbose_mapdl=False,
license_server_check=True,
license_type=None,
print_com=False,
add_env_vars=None,
replace_env_vars=None,
**kwargs,
) -> _MapdlCore:
"""Start MAPDL locally.
Parameters
----------
exec_file : str, optional
The location of the MAPDL executable. Will use the cached
location when left at the default ``None``.
run_location : str, optional
MAPDL working directory. Defaults to a temporary working
directory. If directory doesn't exist, will create one.
jobname : str, optional
MAPDL jobname. Defaults to ``'file'``.
nproc : int, optional
Number of processors. Defaults to 2.
ram : float, optional
Fixed amount of memory to request for MAPDL. If ``None``,
then MAPDL will use as much as available on the host machine.
mode : str, optional
Mode to launch MAPDL. Must be one of the following:
- ``'grpc'``
- ``'corba'``
- ``'console'``
The ``'grpc'`` mode is available on ANSYS 2021R1 or newer and
provides the best performance and stability. The ``'corba'``
mode is available from v17.0 and newer and is given legacy
support. This mode requires the additional
``ansys_corba`` module. Finally, the ``'console'`` mode
is for legacy use only Linux only prior to v17.0. This console
mode is pending depreciation.
override : bool, optional
Attempts to delete the lock file at the run_location.
Useful when a prior MAPDL session has exited prematurely and
the lock file has not been deleted.
loglevel : str, optional
Sets which messages are printed to the console. ``'INFO'``
prints out all ANSYS messages, ``'WARNING``` prints only
messages containing ANSYS warnings, and ``'ERROR'`` logs only
error messages.
additional_switches : str, optional
Additional switches for MAPDL, for example ``'aa_r'``, the
academic research license, would be added with:
- ``additional_switches="-aa_r"``
Avoid adding switches like -i -o or -b as these are already
included to start up the MAPDL server. See the notes
section for additional details.
start_timeout : float, optional
Maximum allowable time to connect to the MAPDL server.
port : int
Port to launch MAPDL gRPC on. Final port will be the first
port available after (or including) this port. Defaults to
50052. You can also override the default behavior of this
keyword argument with the environment variable
``PYMAPDL_PORT=<VALID PORT>``
custom_bin : str, optional
Path to the MAPDL custom executable. On release 2020R2 on
Linux, if ``None``, will check to see if you have
``ansys.mapdl_bin`` installed and use that executable.
cleanup_on_exit : bool, optional
Exit MAPDL when python exits or the mapdl Python instance is
garbage collected.
start_instance : bool, optional
When False, connect to an existing MAPDL instance at ``ip``
and ``port``, which default to ``'127.0.0.1'`` at 50052.
Otherwise, launch a local instance of MAPDL. You can also
override the default behavior of this keyword argument with
the environment variable ``PYMAPDL_START_INSTANCE=FALSE``.
ip : bool, optional
Used only when ``start_instance`` is ``False``. If provided,
it will force ``start_instance`` to be ``False``.
You can also provide a hostname as an alternative to an IP address.
Defaults to ``'127.0.0.1'``. You can also override the
default behavior of this keyword argument with the
environment variable "PYMAPDL_IP=FALSE".
clear_on_connect : bool, optional
Defaults to ``True``, giving you a fresh environment when
connecting to MAPDL. When if ``start_instance`` is specified
it defaults to ``False``.
log_apdl : str, optional
Enables logging every APDL command to the local disk. This
can be used to "record" all the commands that are sent to
MAPDL via PyMAPDL so a script can be run within MAPDL without
PyMAPDL. This string is the path of the output file (e.g.
``log_apdl='pymapdl_log.txt'``). By default this is disabled.
remove_temp_files : bool, optional
When ``run_location`` is ``None``, this launcher creates a new MAPDL
working directory within the user temporary directory, obtainable with
``tempfile.gettempdir()``. When this parameter is
``True``, this directory will be deleted when MAPDL is exited. Default
``False``.
verbose_mapdl : bool, optional
Enable printing of all output when launching and running
MAPDL. This should be used for debugging only as output can
be tracked within pymapdl. Default ``False``.
license_server_check : bool, optional
Check if the license server is available if MAPDL fails to
start. Only available on ``mode='grpc'``. Defaults ``True``.
license_type : str, optional
Enable license type selection. You can input a string for its
license name (for example ``'meba'`` or ``'ansys'``) or its description
("enterprise solver" or "enterprise" respectively).
You can also use legacy licenses (for example ``'aa_t_a'``) but it will
also raise a warning. If it is not used (``None``), no specific license
will be requested, being up to the license server to provide a specific
license type. Default is ``None``.
print_com : bool, optional
Print the command ``/COM`` arguments to the standard output.
Default ``False``.
add_env_vars : dict, optional
The provided dictionary will be used to extend the system or process
environment variables. If you want to control all of the environment
variables, use ``replace_env_vars``. Defaults to ``None``.
replace_env_vars : dict, optional
The provided dictionary will be used to replace all the system or process
environment variables. To just add some environment variables to the MAPDL
process, use ``add_env_vars``. Defaults to ``None``.
Returns
-------
ansys.mapdl.core.mapdl._MapdlCore
An instance of Mapdl. Type depends on the selected ``mode``.
Notes
-----
These are the MAPDL switch options as of 2020R2 applicable for
running MAPDL as a service via gRPC. Excluded switches such as
``"-j"`` either not applicable or are set via keyword arguments.
\-acc <device>
Enables the use of GPU hardware. See GPU
Accelerator Capability in the Parallel Processing Guide for more
information.
\-amfg
Enables the additive manufacturing capability. Requires
an additive manufacturing license. For general information about
this feature, see AM Process Simulation in ANSYS Workbench.
\-ansexe <executable>
Activates a custom mechanical APDL executable.
In the ANSYS Workbench environment, activates a custom
Mechanical APDL executable.
\-custom <executable>
Calls a custom Mechanical APDL executable
See Running Your Custom Executable in the Programmer's Reference
for more information.
\-db value
Initial memory allocation
Defines the portion of workspace (memory) to be used as the
initial allocation for the database. The default is 1024
MB. Specify a negative number to force a fixed size throughout
the run; useful on small memory systems.
\-dis
Enables Distributed ANSYS
See the Parallel Processing Guide for more information.
\-dvt
Enables ANSYS DesignXplorer advanced task (add-on).
Requires DesignXplorer.
\-l <language>
Specifies a language file to use other than English
This option is valid only if you have a translated message file
in an appropriately named subdirectory in
``/ansys_inc/v201/ansys/docu`` or
``Program Files\\ANSYS\\Inc\\V201\\ANSYS\\docu``
\-m <workspace>
Specifies the total size of the workspace
Workspace (memory) in megabytes used for the initial
allocation. If you omit the ``-m`` option, the default is 2 GB
(2048 MB). Specify a negative number to force a fixed size
throughout the run.
\-machines <IP>
Specifies the distributed machines
Machines on which to run a Distributed ANSYS analysis. See
Starting Distributed ANSYS in the Parallel Processing Guide for
more information.
\-mpi <value>
Specifies the type of MPI to use.
See the Parallel Processing Guide for more information.
\-mpifile <appfile>
Specifies an existing MPI file
Specifies an existing MPI file (appfile) to be used in a
Distributed ANSYS run. See Using MPI Files in the Parallel
Processing Guide for more information.
\-na <value>
Specifies the number of GPU accelerator devices
Number of GPU devices per machine or compute node when running
with the GPU accelerator feature. See GPU Accelerator Capability
in the Parallel Processing Guide for more information.
\-name <value>
Defines Mechanical APDL parameters
Set mechanical APDL parameters at program start-up. The parameter
name must be at least two characters long. For details about
parameters, see the ANSYS Parametric Design Language Guide.
\-p <productname>
ANSYS session product
Defines the ANSYS session product that will run during the
session. For more detailed information about the ``-p`` option,
see Selecting an ANSYS Product via the Command Line.
\-ppf <license feature name>
HPC license
Specifies which HPC license to use during a parallel processing
run. See HPC Licensing in the Parallel Processing Guide for more
information.
\-smp
Enables shared-memory parallelism.
See the Parallel Processing Guide for more information.
If the environment is configured to use `PyPIM <https://pypim.docs.pyansys.com>`_
and ``start_instance`` is ``True``, then starting the instance will be delegated to PyPIM.
In this event, most of the options will be ignored and the server side configuration will
be used.
Examples
--------
Launch MAPDL using the best protocol.
>>> from ansys.mapdl.core import launch_mapdl
>>> mapdl = launch_mapdl()
Run MAPDL with shared memory parallel and specify the location of
the Ansys binary.
>>> exec_file = 'C:/Program Files/ANSYS Inc/v201/ansys/bin/win64/ANSYS201.exe'
>>> mapdl = launch_mapdl(exec_file, additional_switches='-smp')
Connect to an existing instance of MAPDL at IP 192.168.1.30 and
port 50001. This is only available using the latest ``'grpc'``
mode.
>>> mapdl = launch_mapdl(start_instance=False, ip='192.168.1.30',
... port=50001)
Force the usage of the CORBA protocol.
>>> mapdl = launch_mapdl(mode='corba')
Run MAPDL using the console mode (available only on Linux).
>>> mapdl = launch_mapdl('/ansys_inc/v194/ansys/bin/ansys194',
... mode='console')
"""
# These parameters are partially used for unit testing
set_no_abort = kwargs.get("set_no_abort", True)
if ip is None:
ip = os.environ.get("PYMAPDL_IP", LOCALHOST)
else: # pragma: no cover
start_instance = False
ip = socket.gethostbyname(ip) # Converting ip or hostname to ip
check_valid_ip(ip) # double check
if port is None:
port = int(os.environ.get("PYMAPDL_PORT", MAPDL_DEFAULT_PORT))
check_valid_port(port)
# Start MAPDL with PyPIM if the environment is configured for it
# and the user did not pass a directive on how to launch it.
if _HAS_PIM and exec_file is None and pypim.is_configured():
LOG.info("Starting MAPDL remotely. The startup configuration will be ignored.")
return launch_remote_mapdl(cleanup_on_exit=cleanup_on_exit)
# connect to an existing instance if enabled
if start_instance is None:
start_instance = check_valid_start_instance(
os.environ.get("PYMAPDL_START_INSTANCE", True)
)
# special handling when building the gallery outside of CI. This
# creates an instance of mapdl the first time if PYMAPDL start instance
# is False.
if pymapdl.BUILDING_GALLERY: # pragma: no cover
# launch an instance of pymapdl if it does not already exist and
# we're allowed to start instances
if start_instance and GALLERY_INSTANCE[0] is None:
mapdl = launch_mapdl(
start_instance=True,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
GALLERY_INSTANCE[0] = {"ip": mapdl._ip, "port": mapdl._port}
return mapdl
# otherwise, connect to the existing gallery instance if available
elif GALLERY_INSTANCE[0] is not None:
mapdl = MapdlGrpc(
ip=GALLERY_INSTANCE[0]["ip"],
port=GALLERY_INSTANCE[0]["port"],
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
# finally, if running on CI/CD, connect to the default instance
else:
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
if not start_instance:
if clear_on_connect is None: # pragma: no cover
clear_on_connect = False
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
# verify executable
if exec_file is None:
# Load cached path
exec_file = get_ansys_path()
if exec_file is None:
raise FileNotFoundError(
"Invalid exec_file path or cannot load cached "
"mapdl path. Enter one manually by specifying "
"exec_file="
)
else: # verify ansys exists at this location
if not os.path.isfile(exec_file):
raise FileNotFoundError(
f'Invalid MAPDL executable at "{exec_file}"\n'
"Enter one manually using exec_file="
)
# verify run location
if run_location is None:
temp_dir = tempfile.gettempdir()
run_location = os.path.join(temp_dir, "ansys_%s" % random_string(10))
if not os.path.isdir(run_location):
try:
os.mkdir(run_location)
except:
raise RuntimeError(
"Unable to create the temporary working "
f'directory "{run_location}"\n'
"Please specify run_location="
)
else:
if not os.path.isdir(run_location):
raise FileNotFoundError(f'"{run_location}" is not a valid directory')
if remove_temp_files:
LOG.info("`run_location` set. Disabling the removal of temporary files.")
remove_temp_files = False
# verify no lock file and the mode is valid
check_lock_file(run_location, jobname, override)
mode = check_mode(mode, _version_from_path(exec_file))
# cache start parameters
additional_switches = _validate_add_sw(
additional_switches, exec_file, kwargs.pop("force_intel", False)
)
if isinstance(license_type, str):
# In newer license server versions an invalid license name just get discarded and produces no effect or warning.
# For example:
# ```bash
# mapdl.exe -p meba # works fine because 'meba' is a valid license in ALLOWABLE_LICENSES.
# mapdl.exe -p yoyoyo # The -p flag is ignored and it run the default license.
# ```
#
# In older versions probably it might raise an error. But not sure.
license_type = license_type.lower().strip()
if "enterprise" in license_type and "solver" not in license_type:
license_type = "ansys"
elif "enterprise" in license_type and "solver" in license_type:
license_type = "meba"
elif "premium" in license_type:
license_type = "mech_2"
elif "pro" in license_type:
license_type = "mech_1"
elif license_type not in ALLOWABLE_LICENSES:
allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES]
warn_text = (
f"The keyword argument 'license_type' value ('{license_type}') is not a recognized license name or has been deprecated.\n"
+ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"
+ f"Recognized license names: {' '.join(allow_lics)}"
)
warnings.warn(warn_text, UserWarning)
additional_switches += " -p " + license_type
LOG.debug(
f"Using specified license name '{license_type}' in the 'license_type' keyword argument."
)
elif "-p " in additional_switches:
# There is already a license request in additional switches.
license_type = re.findall(r"-p \b(\w*)", additional_switches)[
0
] # getting only the first product license.
if license_type not in ALLOWABLE_LICENSES:
allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES]
warn_text = (
f"The additional switch product value ('-p {license_type}') is not a recognized license name or has been deprecated.\n"
+ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"
+ f"Recognized license names: {' '.join(allow_lics)}"
)
warnings.warn(warn_text, UserWarning)
LOG.warning(warn_text)
LOG.debug(
f"Using specified license name '{license_type}' in the additional switches parameter."
)
elif license_type is not None:
raise TypeError("The argument 'license_type' does only accept str or None.")
start_parm = {
"exec_file": exec_file,
"run_location": run_location,
"additional_switches": additional_switches,
"jobname": jobname,
"nproc": nproc,
"print_com": print_com,
}
if mode in ["console", "corba"]:
start_parm["start_timeout"] = start_timeout
else:
start_parm["ram"] = ram
start_parm["override"] = override
start_parm["timeout"] = start_timeout
# Check the license server
if license_server_check:
# configure timeout to be 90% of the wait time of the startup
# time for Ansys.
lic_check = LicenseChecker(timeout=start_timeout * 0.9, verbose=verbose_mapdl)
lic_check.start()
try:
if mode == "console":
from ansys.mapdl.core.mapdl_console import MapdlConsole
mapdl = MapdlConsole(loglevel=loglevel, log_apdl=log_apdl, **start_parm)
elif mode == "corba":
try:
# pending deprecation to ansys-mapdl-corba
from ansys.mapdl.core.mapdl_corba import MapdlCorba
except ModuleNotFoundError:
raise ModuleNotFoundError(
"To use this feature, install the MAPDL CORBA package"
" with:\n\npip install ansys_corba"
) from None
broadcast = kwargs.get("log_broadcast", False)
mapdl = MapdlCorba(
loglevel=loglevel,
log_apdl=log_apdl,
log_broadcast=broadcast,
verbose=verbose_mapdl,
**start_parm,
)
elif mode == "grpc":
port, actual_run_location = launch_grpc(
port=port,
verbose=verbose_mapdl,
ip=ip,
add_env_vars=add_env_vars,
replace_env_vars=replace_env_vars,
**start_parm,
)
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=cleanup_on_exit,
loglevel=loglevel,
set_no_abort=set_no_abort,
remove_temp_files=remove_temp_files,
log_apdl=log_apdl,
**start_parm,
)
if run_location is None:
mapdl._path = actual_run_location
except Exception as exception:
# Failed to launch for some reason. Check if failure was due
# to the license check
if license_server_check:
lic_check.check()
# pass
raise exception
return mapdl
|
def launch_mapdl(
exec_file=None,
run_location=None,
jobname="file",
nproc=2,
ram=None,
mode=None,
override=False,
loglevel="ERROR",
additional_switches="",
start_timeout=120,
port=None,
cleanup_on_exit=True,
start_instance=None,
ip=None,
clear_on_connect=True,
log_apdl=None,
remove_temp_files=False,
verbose_mapdl=False,
license_server_check=True,
license_type=None,
print_com=False,
add_env_vars=None,
replace_env_vars=None,
**kwargs,
) -> _MapdlCore:
"""Start MAPDL locally.
Parameters
----------
exec_file : str, optional
The location of the MAPDL executable. Will use the cached
location when left at the default ``None``.
run_location : str, optional
MAPDL working directory. Defaults to a temporary working
directory. If directory doesn't exist, will create one.
jobname : str, optional
MAPDL jobname. Defaults to ``'file'``.
nproc : int, optional
Number of processors. Defaults to 2.
ram : float, optional
Fixed amount of memory to request for MAPDL. If ``None``,
then MAPDL will use as much as available on the host machine.
mode : str, optional
Mode to launch MAPDL. Must be one of the following:
- ``'grpc'``
- ``'corba'``
- ``'console'``
The ``'grpc'`` mode is available on ANSYS 2021R1 or newer and
provides the best performance and stability. The ``'corba'``
mode is available from v17.0 and newer and is given legacy
support. This mode requires the additional
``ansys_corba`` module. Finally, the ``'console'`` mode
is for legacy use only Linux only prior to v17.0. This console
mode is pending depreciation.
override : bool, optional
Attempts to delete the lock file at the run_location.
Useful when a prior MAPDL session has exited prematurely and
the lock file has not been deleted.
loglevel : str, optional
Sets which messages are printed to the console. ``'INFO'``
prints out all ANSYS messages, ``'WARNING``` prints only
messages containing ANSYS warnings, and ``'ERROR'`` logs only
error messages.
additional_switches : str, optional
Additional switches for MAPDL, for example ``'aa_r'``, the
academic research license, would be added with:
- ``additional_switches="-aa_r"``
Avoid adding switches like -i -o or -b as these are already
included to start up the MAPDL server. See the notes
section for additional details.
start_timeout : float, optional
Maximum allowable time to connect to the MAPDL server.
port : int
Port to launch MAPDL gRPC on. Final port will be the first
port available after (or including) this port. Defaults to
50052. You can also override the default behavior of this
keyword argument with the environment variable
``PYMAPDL_PORT=<VALID PORT>``
custom_bin : str, optional
Path to the MAPDL custom executable. On release 2020R2 on
Linux, if ``None``, will check to see if you have
``ansys.mapdl_bin`` installed and use that executable.
cleanup_on_exit : bool, optional
Exit MAPDL when python exits or the mapdl Python instance is
garbage collected.
start_instance : bool, optional
When False, connect to an existing MAPDL instance at ``ip``
and ``port``, which default to ``'127.0.0.1'`` at 50052.
Otherwise, launch a local instance of MAPDL. You can also
override the default behavior of this keyword argument with
the environment variable ``PYMAPDL_START_INSTANCE=FALSE``.
ip : bool, optional
Used only when ``start_instance`` is ``False``. If provided,
it will force ``start_instance`` to be ``False``.
You can also provide a hostname as an alternative to an IP address.
Defaults to ``'127.0.0.1'``. You can also override the
default behavior of this keyword argument with the
environment variable "PYMAPDL_IP=FALSE".
clear_on_connect : bool, optional
Defaults to ``True``, giving you a fresh environment when
connecting to MAPDL. When if ``start_instance`` is specified
it defaults to ``False``.
log_apdl : str, optional
Enables logging every APDL command to the local disk. This
can be used to "record" all the commands that are sent to
MAPDL via PyMAPDL so a script can be run within MAPDL without
PyMAPDL. This string is the path of the output file (e.g.
``log_apdl='pymapdl_log.txt'``). By default this is disabled.
remove_temp_files : bool, optional
When ``run_location`` is ``None``, this launcher creates a new MAPDL
working directory within the user temporary directory, obtainable with
``tempfile.gettempdir()``. When this parameter is
``True``, this directory will be deleted when MAPDL is exited. Default
``False``.
verbose_mapdl : bool, optional
Enable printing of all output when launching and running
MAPDL. This should be used for debugging only as output can
be tracked within pymapdl. Default ``False``.
license_server_check : bool, optional
Check if the license server is available if MAPDL fails to
start. Only available on ``mode='grpc'``. Defaults ``True``.
license_type : str, optional
Enable license type selection. You can input a string for its
license name (for example ``'meba'`` or ``'ansys'``) or its description
("enterprise solver" or "enterprise" respectively).
You can also use legacy licenses (for example ``'aa_t_a'``) but it will
also raise a warning. If it is not used (``None``), no specific license
will be requested, being up to the license server to provide a specific
license type. Default is ``None``.
print_com : bool, optional
Print the command ``/COM`` arguments to the standard output.
Default ``False``.
add_env_vars : dict, optional
The provided dictionary will be used to extend the system or process
environment variables. If you want to control all of the environment
variables, use ``replace_env_vars``. Defaults to ``None``.
replace_env_vars : dict, optional
The provided dictionary will be used to replace all the system or process
environment variables. To just add some environment variables to the MAPDL
process, use ``add_env_vars``. Defaults to ``None``.
Returns
-------
ansys.mapdl.core.mapdl._MapdlCore
An instance of Mapdl. Type depends on the selected ``mode``.
Notes
-----
These are the MAPDL switch options as of 2020R2 applicable for
running MAPDL as a service via gRPC. Excluded switches such as
``"-j"`` either not applicable or are set via keyword arguments.
\-acc <device>
Enables the use of GPU hardware. See GPU
Accelerator Capability in the Parallel Processing Guide for more
information.
\-amfg
Enables the additive manufacturing capability. Requires
an additive manufacturing license. For general information about
this feature, see AM Process Simulation in ANSYS Workbench.
\-ansexe <executable>
Activates a custom mechanical APDL executable.
In the ANSYS Workbench environment, activates a custom
Mechanical APDL executable.
\-custom <executable>
Calls a custom Mechanical APDL executable
See Running Your Custom Executable in the Programmer's Reference
for more information.
\-db value
Initial memory allocation
Defines the portion of workspace (memory) to be used as the
initial allocation for the database. The default is 1024
MB. Specify a negative number to force a fixed size throughout
the run; useful on small memory systems.
\-dis
Enables Distributed ANSYS
See the Parallel Processing Guide for more information.
\-dvt
Enables ANSYS DesignXplorer advanced task (add-on).
Requires DesignXplorer.
\-l <language>
Specifies a language file to use other than English
This option is valid only if you have a translated message file
in an appropriately named subdirectory in
``/ansys_inc/v201/ansys/docu`` or
``Program Files\\ANSYS\\Inc\\V201\\ANSYS\\docu``
\-m <workspace>
Specifies the total size of the workspace
Workspace (memory) in megabytes used for the initial
allocation. If you omit the ``-m`` option, the default is 2 GB
(2048 MB). Specify a negative number to force a fixed size
throughout the run.
\-machines <IP>
Specifies the distributed machines
Machines on which to run a Distributed ANSYS analysis. See
Starting Distributed ANSYS in the Parallel Processing Guide for
more information.
\-mpi <value>
Specifies the type of MPI to use.
See the Parallel Processing Guide for more information.
\-mpifile <appfile>
Specifies an existing MPI file
Specifies an existing MPI file (appfile) to be used in a
Distributed ANSYS run. See Using MPI Files in the Parallel
Processing Guide for more information.
\-na <value>
Specifies the number of GPU accelerator devices
Number of GPU devices per machine or compute node when running
with the GPU accelerator feature. See GPU Accelerator Capability
in the Parallel Processing Guide for more information.
\-name <value>
Defines Mechanical APDL parameters
Set mechanical APDL parameters at program start-up. The parameter
name must be at least two characters long. For details about
parameters, see the ANSYS Parametric Design Language Guide.
\-p <productname>
ANSYS session product
Defines the ANSYS session product that will run during the
session. For more detailed information about the ``-p`` option,
see Selecting an ANSYS Product via the Command Line.
\-ppf <license feature name>
HPC license
Specifies which HPC license to use during a parallel processing
run. See HPC Licensing in the Parallel Processing Guide for more
information.
\-smp
Enables shared-memory parallelism.
See the Parallel Processing Guide for more information.
If the environment is configured to use `PyPIM <https://pypim.docs.pyansys.com>`_
and ``start_instance`` is ``True``, then starting the instance will be delegated to PyPIM.
In this event, most of the options will be ignored and the server side configuration will
be used.
Examples
--------
Launch MAPDL using the best protocol.
>>> from ansys.mapdl.core import launch_mapdl
>>> mapdl = launch_mapdl()
Run MAPDL with shared memory parallel and specify the location of
the Ansys binary.
>>> exec_file = 'C:/Program Files/ANSYS Inc/v201/ansys/bin/win64/ANSYS201.exe'
>>> mapdl = launch_mapdl(exec_file, additional_switches='-smp')
Connect to an existing instance of MAPDL at IP 192.168.1.30 and
port 50001. This is only available using the latest ``'grpc'``
mode.
>>> mapdl = launch_mapdl(start_instance=False, ip='192.168.1.30',
... port=50001)
Force the usage of the CORBA protocol.
>>> mapdl = launch_mapdl(mode='corba')
Run MAPDL using the console mode (available only on Linux).
>>> mapdl = launch_mapdl('/ansys_inc/v194/ansys/bin/ansys194',
... mode='console')
"""
# These parameters are partially used for unit testing
set_no_abort = kwargs.get("set_no_abort", True)
if ip is None:
ip = os.environ.get("PYMAPDL_IP", LOCALHOST)
else: # pragma: no cover
start_instance = False
ip = socket.gethostbyname(ip) # Converting ip or hostname to ip
check_valid_ip(ip) # double check
if port is None:
port = int(os.environ.get("PYMAPDL_PORT", MAPDL_DEFAULT_PORT))
check_valid_port(port)
# Start MAPDL with PyPIM if the environment is configured for it
# and the user did not pass a directive on how to launch it.
if _HAS_PIM and exec_file is None and pypim.is_configured():
LOG.info("Starting MAPDL remotely. The startup configuration will be ignored.")
return launch_remote_mapdl(cleanup_on_exit=cleanup_on_exit)
# connect to an existing instance if enabled
if start_instance is None:
start_instance = check_valid_start_instance(
os.environ.get("PYMAPDL_START_INSTANCE", True)
)
# special handling when building the gallery outside of CI. This
# creates an instance of mapdl the first time if PYMAPDL start instance
# is False.
if pymapdl.BUILDING_GALLERY: # pragma: no cover
# launch an instance of pymapdl if it does not already exist and
# we're allowed to start instances
if start_instance and GALLERY_INSTANCE[0] is None:
mapdl = launch_mapdl(
start_instance=True,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
GALLERY_INSTANCE[0] = {"ip": mapdl._ip, "port": mapdl._port}
return mapdl
# otherwise, connect to the existing gallery instance if available
elif GALLERY_INSTANCE[0] is not None:
mapdl = MapdlGrpc(
ip=GALLERY_INSTANCE[0]["ip"],
port=GALLERY_INSTANCE[0]["port"],
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
# finally, if running on CI/CD, connect to the default instance
else:
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
if not start_instance:
if clear_on_connect is None: # pragma: no cover
clear_on_connect = False
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
# verify executable
if exec_file is None:
# Load cached path
exec_file = get_ansys_path()
if exec_file is None:
raise FileNotFoundError(
"Invalid exec_file path or cannot load cached "
"mapdl path. Enter one manually by specifying "
"exec_file="
)
else: # verify ansys exists at this location
if not os.path.isfile(exec_file):
raise FileNotFoundError(
f'Invalid MAPDL executable at "{exec_file}"\n'
"Enter one manually using exec_file="
)
# verify run location
if run_location is None:
temp_dir = tempfile.gettempdir()
run_location = os.path.join(temp_dir, "ansys_%s" % random_string(10))
if not os.path.isdir(run_location):
try:
os.mkdir(run_location)
except:
raise RuntimeError(
"Unable to create the temporary working "
f'directory "{run_location}"\n'
"Please specify run_location="
)
else:
if not os.path.isdir(run_location):
raise FileNotFoundError(f'"{run_location}" is not a valid directory')
if remove_temp_files:
LOG.info("`run_location` set. Disabling the removal of temporary files.")
remove_temp_files = False
# verify no lock file and the mode is valid
check_lock_file(run_location, jobname, override)
mode = check_mode(mode, _version_from_path(exec_file))
# cache start parameters
additional_switches = _validate_add_sw(
additional_switches, exec_file, kwargs.pop("force_intel", False)
)
if isinstance(license_type, str):
# In newer license server versions an invalid license name just get discarded and produces no effect or warning.
# For example:
# ```bash
# mapdl.exe -p meba # works fine because 'meba' is a valid license in ALLOWABLE_LICENSES.
# mapdl.exe -p yoyoyo # The -p flag is ignored and it run the default license.
# ```
#
# In older versions probably it might raise an error. But not sure.
license_type = license_type.lower().strip()
if "enterprise" in license_type and "solver" not in license_type:
license_type = "ansys"
elif "enterprise" in license_type and "solver" in license_type:
license_type = "meba"
elif "premium" in license_type:
license_type = "mech_2"
elif "pro" in license_type:
license_type = "mech_1"
elif license_type not in ALLOWABLE_LICENSES:
allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES]
warn_text = (
f"The keyword argument 'license_type' value ('{license_type}') is not a recognized license name or has been deprecated.\n"
+ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"
+ f"Recognized license names: {' '.join(allow_lics)}"
)
warnings.warn(warn_text, UserWarning)
additional_switches += " -p " + license_type
LOG.debug(
f"Using specified license name '{license_type}' in the 'license_type' keyword argument."
)
elif "-p " in additional_switches:
# There is already a license request in additional switches.
license_type = re.findall(r"-p \b(\w*)", additional_switches)[
0
] # getting only the first product license.
if license_type not in ALLOWABLE_LICENSES:
allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES]
warn_text = (
f"The additional switch product value ('-p {license_type}') is not a recognized license name or has been deprecated.\n"
+ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"
+ f"Recognized license names: {' '.join(allow_lics)}"
)
warnings.warn(warn_text, UserWarning)
LOG.warning(warn_text)
LOG.debug(
f"Using specified license name '{license_type}' in the additional switches parameter."
)
elif license_type is not None:
raise TypeError("The argument 'license_type' does only accept str or None.")
start_parm = {
"exec_file": exec_file,
"run_location": run_location,
"additional_switches": additional_switches,
"jobname": jobname,
"nproc": nproc,
"print_com": print_com,
}
if mode in ["console", "corba"]:
start_parm["start_timeout"] = start_timeout
else:
start_parm["ram"] = ram
start_parm["override"] = override
start_parm["timeout"] = start_timeout
# Check the license server
if license_server_check:
# configure timeout to be 90% of the wait time of the startup
# time for Ansys.
lic_check = LicenseChecker(timeout=start_timeout * 0.9, verbose=verbose_mapdl)
lic_check.start()
try:
if mode == "console":
from ansys.mapdl.core.mapdl_console import MapdlConsole
mapdl = MapdlConsole(loglevel=loglevel, log_apdl=log_apdl, **start_parm)
elif mode == "corba":
try:
# pending deprecation to ansys-mapdl-corba
from ansys.mapdl.core.mapdl_corba import MapdlCorba
except ModuleNotFoundError: # pragma: no cover
raise ModuleNotFoundError(
"To use this feature, install the MAPDL CORBA package"
" with:\n\npip install ansys_corba"
) from None
broadcast = kwargs.get("log_broadcast", False)
mapdl = MapdlCorba(
loglevel=loglevel,
log_apdl=log_apdl,
log_broadcast=broadcast,
verbose=verbose_mapdl,
**start_parm,
)
elif mode == "grpc":
port, actual_run_location = launch_grpc(
port=port,
verbose=verbose_mapdl,
ip=ip,
add_env_vars=add_env_vars,
replace_env_vars=replace_env_vars,
**start_parm,
)
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=cleanup_on_exit,
loglevel=loglevel,
set_no_abort=set_no_abort,
remove_temp_files=remove_temp_files,
log_apdl=log_apdl,
**start_parm,
)
if run_location is None:
mapdl._path = actual_run_location
except Exception as exception:
# Failed to launch for some reason. Check if failure was due
# to the license check
if license_server_check:
lic_check.check()
# pass
raise exception
return mapdl
|
31,568 |
def get_repos_command(client):
result = client.get_repos()
if not result.get('success'):
raise DemistoException(result['message'])
table_header = []
display_title = "LogPoint Repos"
allowed_repos = result.get('allowed_repos')
if allowed_repos and len(allowed_repos) > 0:
table_header = list(allowed_repos[0].keys())
markdown = tableToMarkdown(display_title, allowed_repos, headers=table_header)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Repos',
outputs_key_field='repo',
outputs=allowed_repos
)
|
def get_repos_command(client):
result = client.get_repos()
if not result.get('success'):
raise DemistoException(result.get('message'))
table_header = []
display_title = "LogPoint Repos"
allowed_repos = result.get('allowed_repos')
if allowed_repos and len(allowed_repos) > 0:
table_header = list(allowed_repos[0].keys())
markdown = tableToMarkdown(display_title, allowed_repos, headers=table_header)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Repos',
outputs_key_field='repo',
outputs=allowed_repos
)
|
1,743 |
def test_stacking_classifier_sample_weight_fit_param():
# check sample_weight is passed to all invokations of fit
stacker = StackingClassifier(
estimators=[
('lr', CheckingClassifier(expected_fit_params=['sample_weight']))
],
final_estimator=CheckingClassifier(
expected_fit_params=['sample_weight']
)
)
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
|
def test_stacking_classifier_sample_weight_fit_param():
# check sample_weight is passed to all invocations of fit
stacker = StackingClassifier(
estimators=[
('lr', CheckingClassifier(expected_fit_params=['sample_weight']))
],
final_estimator=CheckingClassifier(
expected_fit_params=['sample_weight']
)
)
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
|
9,059 |
def test_action_command_from_callable_regex_pattern(mockbot):
# prepare callable
@plugin.action_commands('do .*')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
# create rule from a cleaned callable
rule = rules.ActionCommand.from_callable(mockbot.settings, handler)
# does not match on ".do anything"
line = ':Foo!foo@example.com PRIVMSG #sopel :\x01ACTION do anything\x01'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert not results, 'Regex command are not allowed since Sopel 8.0'
# match on ".do .*"
line = ':Foo!foo@example.com PRIVMSG #sopel :\x01ACTION do .*\x01'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'Exactly 1 command must match'
result = results[0]
assert result.group(0) == 'do .*'
assert result.group(1) == 'do .*'
assert result.group(2) is None
assert result.group(3) is None
assert result.group(4) is None
assert result.group(5) is None
assert result.group(6) is None
|
def test_action_command_from_callable_regex_pattern(mockbot):
# prepare callable
@plugin.action_commands('do .*')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
# create rule from a cleaned callable
rule = rules.ActionCommand.from_callable(mockbot.settings, handler)
# does not match on ".do anything"
line = ':Foo!foo@example.com PRIVMSG #sopel :\x01ACTION do anything\x01'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert not results, 'Regex commands are not allowed since Sopel 8.0'
# match on ".do .*"
line = ':Foo!foo@example.com PRIVMSG #sopel :\x01ACTION do .*\x01'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'Exactly 1 command must match'
result = results[0]
assert result.group(0) == 'do .*'
assert result.group(1) == 'do .*'
assert result.group(2) is None
assert result.group(3) is None
assert result.group(4) is None
assert result.group(5) is None
assert result.group(6) is None
|
44,042 |
def _binary_matrix(terms, num_qubits):
"""Get a binary matrix representation of the hamiltonian where each row coressponds to a
Pauli term, which is represented by concatenation of Z and X vectors.
Args:
terms (Iterable[Observable]): operators defining the Hamiltonian.
num_qubits (int): number of wires required to define the Hamiltonian.
Returns:
E (ndarray): binary matrix representation of the Hamiltonian of shape
:math:`len(terms) \times 2*num_qubits`.
.. code-block::
>>> terms = [PauliZ(wires=[0]) @ PauliX(wires=[1]), PauliZ(wires=[0]) @ PauliY(wires=[2]),
PauliXwires=[0]) @ PauliY(wires=[3])]
>>> get_binary_matrix(terms, 4)
array([[1, 0, 0, 0, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 1]]))
"""
E = np.zeros((len(terms), 2 * num_qubits), dtype=int)
for idx, term in enumerate(terms):
ops, wires = term.name, term.wires
if len(term.wires) == 1:
ops = [ops]
for op, wire in zip(ops, wires):
if op in ["PauliX", "PauliY"]:
E[idx][wire + num_qubits] = 1
if op in ["PauliZ", "PauliY"]:
E[idx][wire] = 1
return E
|
def _binary_matrix(terms, num_qubits):
"""Get a binary matrix representation of the hamiltonian where each row coressponds to a
Pauli term, which is represented by a concatenation of Z and X vectors.
Args:
terms (Iterable[Observable]): operators defining the Hamiltonian.
num_qubits (int): number of wires required to define the Hamiltonian.
Returns:
E (ndarray): binary matrix representation of the Hamiltonian of shape
:math:`len(terms) \times 2*num_qubits`.
.. code-block::
>>> terms = [PauliZ(wires=[0]) @ PauliX(wires=[1]), PauliZ(wires=[0]) @ PauliY(wires=[2]),
PauliXwires=[0]) @ PauliY(wires=[3])]
>>> get_binary_matrix(terms, 4)
array([[1, 0, 0, 0, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 1]]))
"""
E = np.zeros((len(terms), 2 * num_qubits), dtype=int)
for idx, term in enumerate(terms):
ops, wires = term.name, term.wires
if len(term.wires) == 1:
ops = [ops]
for op, wire in zip(ops, wires):
if op in ["PauliX", "PauliY"]:
E[idx][wire + num_qubits] = 1
if op in ["PauliZ", "PauliY"]:
E[idx][wire] = 1
return E
|
47,895 |
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
# load vocabulary file for model
log.info("Loading vocab file:\t{}".format(args.vocab))
with open(args.vocab, "r", encoding="utf-8") as r:
vocab = dict((t.rstrip("\n"), i) for i, t in enumerate(r.readlines()))
log.info("{} tokens loaded".format(len(vocab)))
# get context as a string (as we might need it's length for the sequence reshape)
context = get_context(args)
# encode context into token ids list
c_tokens_id, c_tokens_se = text_to_tokens(context, vocab)
log.info("Initializing Inference Engine")
ie = IECore()
log.info("Device is {}".format(args.device))
version = ie.get_versions(args.device)[args.device]
version_str = "{}.{}.{}".format(version.major, version.minor, version.build_number)
log.info("Plugin version is {}".format(version_str))
# read IR
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
ie_encoder = ie.read_network(model=model_xml, weights=model_bin)
if args.reshape:
# reshape the sequence length to the context + maximum question length (in tokens)
first_input_layer = next(iter(ie_encoder.inputs))
c = ie_encoder.inputs[first_input_layer].shape[1]
# find the closest multiple of 64
seq = min(c, round((len(c_tokens_id) + args.max_question_token_num) / 64) * 64)
if seq < c:
input_info = list(ie_encoder.inputs)
new_shapes = dict([])
for i in input_info:
n, c = ie_encoder.inputs[i].shape
new_shapes[i] = [n, seq]
log.info("Reshaped input {} from {} to the {}".format(i, ie_encoder.inputs[i].shape, new_shapes[i]))
log.info("Attempting to reshape the network to the modified inputs...")
try:
ie_encoder.reshape(new_shapes)
log.info("Successful!")
except:
log.info("Failed...reloading the network")
ie_encoder = ie.read_network(model=model_xml, weights=model_bin)
log.info("Done")
else:
log.info("Skipping network reshaping,"
" as (context length + max question length) exceeds the current (input) network sequence length")
# check input and output names
input_names_model = list(ie_encoder.inputs.keys())
output_names_model = list(ie_encoder.outputs.keys())
input_names = eval(args.input_names)
output_names = eval(args.output_names)
if set(input_names_model) != set(input_names) or set(output_names_model) != set(output_names):
log.error("Input or Output names do not match")
log.error(" Network input->output names: {}->{}".format(input_names_model, output_names_model))
log.error(" Expected (from the demo cmd-line) input->output names: {}->{}".format(input_names, output_names))
raise Exception("Unexpected network input or output names")
# load model to the device
log.info("Loading model to the {}".format(args.device))
ie_encoder_exec = ie.load_network(network=ie_encoder, device_name=args.device)
# loop on user's questions
while True:
question = input('Type question (enter to exit):')
if not question:
break
q_tokens_id, _ = text_to_tokens(question, vocab)
# maximum number of tokens that can be processed by network at once
max_length = ie_encoder.inputs[input_names[0]].shape[1]
# calculate number of tokens for context in each inference request.
# reserve 3 positions for special tokens
# [CLS] q_tokens [SEP] c_tokens [SEP]
c_wnd_len = max_length - (len(q_tokens_id) + 3)
# token num between two neighbour context windows
# 1/2 means that context windows are overlapped by half
c_stride = c_wnd_len // 2
t0 = time.time()
t_count = 0
# array of answers from each window
answers = []
# init a window to iterate over context
c_s, c_e = 0, min(c_wnd_len, len(c_tokens_id))
# iterate while context window is not empty
while c_e > c_s:
# form the request
tok_cls = vocab['[CLS]']
tok_sep = vocab['[SEP]']
input_ids = [tok_cls] + q_tokens_id + [tok_sep] + c_tokens_id[c_s:c_e] + [tok_sep]
token_type_ids = [0] + [0] * len(q_tokens_id) + [0] + [1] * (c_e - c_s) + [0]
attention_mask = [1] * len(input_ids)
# pad the rest of the request
pad_len = max_length - len(input_ids)
input_ids += [0] * pad_len
token_type_ids += [0] * pad_len
attention_mask += [0] * pad_len
# create numpy inputs for IE
inputs = {
input_names[0]: np.array([input_ids], dtype=np.int32),
input_names[1]: np.array([attention_mask], dtype=np.int32),
input_names[2]: np.array([token_type_ids], dtype=np.int32),
}
t_start = time.time()
# infer by IE
res = ie_encoder_exec.infer(inputs=inputs)
t_end = time.time()
t_count += 1
log.info("Sequence of length {} is processed with {:0.2f} sentence/sec ({:0.2} sec per request)".format(
max_length,
1 / (t_end - t_start),
t_end - t_start
))
# get start-end scores for context
def get_score(name):
out = np.exp(res[name].reshape((max_length,)))
return out / out.sum(axis=-1)
score_s = get_score(output_names[0])
score_e = get_score(output_names[1])
# get 'no-answer' score (not valid if model has been fine-tuned on squad1.x)
if args.model_squad_ver.split('.')[0] == '1':
score_na = 0
else:
score_na = score_s[0] * score_e[0]
# find product of all start-end combinations to find the best one
c_s_idx = len(q_tokens_id) + 2 # index of first context token in tensor
c_e_idx = max_length - (1 + pad_len) # index of last+1 context token in tensor
score_mat = np.matmul(
score_s[c_s_idx:c_e_idx].reshape((c_e - c_s, 1)),
score_e[c_s_idx:c_e_idx].reshape((1, c_e - c_s))
)
# reset candidates with end before start
score_mat = np.triu(score_mat)
# reset long candidates (>max_answer_token_num)
score_mat = np.tril(score_mat, args.max_answer_token_num - 1)
# find the best start-end pair
max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1])
max_score = score_mat[max_s, max_e] * (1 - score_na)
# convert to context text start-end index
max_s = c_tokens_se[c_s + max_s][0]
max_e = c_tokens_se[c_s + max_e][1]
# check that answers list does not have duplicates (because of context windows overlapping)
same = [i for i, a in enumerate(answers) if a[1] == max_s and a[2] == max_e]
if same:
assert len(same) == 1
# update exist answer record
a = answers[same[0]]
answers[same[0]] = (max(max_score, a[0]), max_s, max_e)
else:
# add new record
answers.append((max_score, max_s, max_e))
# check that context window reach the end
if c_e == len(c_tokens_id):
break
# move to next window position
c_s = min(c_s + c_stride, len(c_tokens_id))
c_e = min(c_s + c_wnd_len, len(c_tokens_id))
t1 = time.time()
log.info("{} requests by {} length are processed by {:0.2f}sec ({:0.2}sec per request)".format(
t_count,
max_length,
t1 - t0,
(t1 - t0) / t_count
))
# print top 3 results
answers = list(sorted(answers, key=lambda x: -x[0]))
for score, s, e in answers[:3]:
log.info("---answer: {:0.2f} {}".format(score, context[s:e]))
c_s, c_e = find_sentence_range(context, s, e)
log.info(" " + context[c_s:s] + "\033[91m" + context[s:e] + '\033[0m' + context[e:c_e])
|
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
# load vocabulary file for model
log.info("Loading vocab file:\t{}".format(args.vocab))
with open(args.vocab, "r", encoding="utf-8") as r:
vocab = dict((t.rstrip("\n"), i) for i, t in enumerate(r.readlines()))
log.info("{} tokens loaded".format(len(vocab)))
# get context as a string (as we might need it's length for the sequence reshape)
context = get_context(args)
# encode context into token ids list
c_tokens_id, c_tokens_se = text_to_tokens(context, vocab)
log.info("Initializing Inference Engine")
ie = IECore()
log.info("Device is {}".format(args.device))
version = ie.get_versions(args.device)[args.device]
version_str = "{}.{}.{}".format(version.major, version.minor, version.build_number)
log.info("Plugin version is {}".format(version_str))
# read IR
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
ie_encoder = ie.read_network(model=model_xml, weights=model_bin)
if args.reshape:
# reshape the sequence length to the context + maximum question length (in tokens)
first_input_layer = next(iter(ie_encoder.inputs))
c = ie_encoder.inputs[first_input_layer].shape[1]
# find the closest multiple of 64
seq = min(c, round((len(c_tokens_id) + args.max_question_token_num) / 64) * 64)
if seq < c:
input_info = list(ie_encoder.inputs)
new_shapes = dict([])
for i in input_info:
n, c = ie_encoder.inputs[i].shape
new_shapes[i] = [n, seq]
log.info("Reshaped input {} from {} to the {}".format(i, ie_encoder.inputs[i].shape, new_shapes[i]))
log.info("Attempting to reshape the network to the modified inputs...")
try:
ie_encoder.reshape(new_shapes)
log.info("Successful!")
except:
log.info("Failed...reloading the network")
ie_encoder = ie.read_network(model=model_xml, weights=model_bin)
log.info("Done")
else:
log.info("Skipping network reshaping,"
" as (context length + max question length) exceeds the current (input) network sequence length")
# check input and output names
input_names_model = list(ie_encoder.inputs.keys())
output_names_model = list(ie_encoder.outputs.keys())
input_names = eval(args.input_names)
output_names = eval(args.output_names)
if set(input_names_model) != set(input_names) or set(output_names_model) != set(output_names):
log.error("Input or Output names do not match")
log.error(" Network input->output names: {}->{}".format(input_names_model, output_names_model))
log.error(" Expected (from the demo cmd-line) input->output names: {}->{}".format(input_names, output_names))
raise Exception("Unexpected network input or output names")
# load model to the device
log.info("Loading model to the {}".format(args.device))
ie_encoder_exec = ie.load_network(network=ie_encoder, device_name=args.device)
# loop on user's questions
while True:
question = input('Type question (enter to exit):')
if not question:
break
q_tokens_id, _ = text_to_tokens(question, vocab)
# maximum number of tokens that can be processed by network at once
max_length = ie_encoder.inputs[input_names[0]].shape[1]
# calculate number of tokens for context in each inference request.
# reserve 3 positions for special tokens
# [CLS] q_tokens [SEP] c_tokens [SEP]
c_wnd_len = max_length - (len(q_tokens_id) + 3)
# token num between two neighbour context windows
# 1/2 means that context windows are overlapped by half
c_stride = c_wnd_len // 2
t0 = time.time()
t_count = 0
# array of answers from each window
answers = []
# init a window to iterate over context
c_s, c_e = 0, min(c_wnd_len, len(c_tokens_id))
# iterate while context window is not empty
while c_e > c_s:
# form the request
tok_cls = vocab['[CLS]']
tok_sep = vocab['[SEP]']
input_ids = [tok_cls] + q_tokens_id + [tok_sep] + c_tokens_id[c_s:c_e] + [tok_sep]
token_type_ids = [0] + [0] * len(q_tokens_id) + [0] + [1] * (c_e - c_s) + [0]
attention_mask = [1] * len(input_ids)
# pad the rest of the request
pad_len = max_length - len(input_ids)
input_ids += [0] * pad_len
token_type_ids += [0] * pad_len
attention_mask += [0] * pad_len
# create numpy inputs for IE
inputs = {
input_names[0]: np.array([input_ids], dtype=np.int32),
input_names[1]: np.array([attention_mask], dtype=np.int32),
input_names[2]: np.array([token_type_ids], dtype=np.int32),
}
t_start = time.time()
# infer by IE
res = ie_encoder_exec.infer(inputs=inputs)
t_end = time.time()
t_count += 1
log.info("Sequence of length {} is processed with {:0.2f} request/sec ({:0.2} sec per request)".format(
max_length,
1 / (t_end - t_start),
t_end - t_start
))
# get start-end scores for context
def get_score(name):
out = np.exp(res[name].reshape((max_length,)))
return out / out.sum(axis=-1)
score_s = get_score(output_names[0])
score_e = get_score(output_names[1])
# get 'no-answer' score (not valid if model has been fine-tuned on squad1.x)
if args.model_squad_ver.split('.')[0] == '1':
score_na = 0
else:
score_na = score_s[0] * score_e[0]
# find product of all start-end combinations to find the best one
c_s_idx = len(q_tokens_id) + 2 # index of first context token in tensor
c_e_idx = max_length - (1 + pad_len) # index of last+1 context token in tensor
score_mat = np.matmul(
score_s[c_s_idx:c_e_idx].reshape((c_e - c_s, 1)),
score_e[c_s_idx:c_e_idx].reshape((1, c_e - c_s))
)
# reset candidates with end before start
score_mat = np.triu(score_mat)
# reset long candidates (>max_answer_token_num)
score_mat = np.tril(score_mat, args.max_answer_token_num - 1)
# find the best start-end pair
max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1])
max_score = score_mat[max_s, max_e] * (1 - score_na)
# convert to context text start-end index
max_s = c_tokens_se[c_s + max_s][0]
max_e = c_tokens_se[c_s + max_e][1]
# check that answers list does not have duplicates (because of context windows overlapping)
same = [i for i, a in enumerate(answers) if a[1] == max_s and a[2] == max_e]
if same:
assert len(same) == 1
# update exist answer record
a = answers[same[0]]
answers[same[0]] = (max(max_score, a[0]), max_s, max_e)
else:
# add new record
answers.append((max_score, max_s, max_e))
# check that context window reach the end
if c_e == len(c_tokens_id):
break
# move to next window position
c_s = min(c_s + c_stride, len(c_tokens_id))
c_e = min(c_s + c_wnd_len, len(c_tokens_id))
t1 = time.time()
log.info("{} requests by {} length are processed by {:0.2f}sec ({:0.2}sec per request)".format(
t_count,
max_length,
t1 - t0,
(t1 - t0) / t_count
))
# print top 3 results
answers = list(sorted(answers, key=lambda x: -x[0]))
for score, s, e in answers[:3]:
log.info("---answer: {:0.2f} {}".format(score, context[s:e]))
c_s, c_e = find_sentence_range(context, s, e)
log.info(" " + context[c_s:s] + "\033[91m" + context[s:e] + '\033[0m' + context[e:c_e])
|
15,402 |
def get_entities(onewirehub: OneWireHub, config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
for device in onewirehub.devices:
family = device["family"]
device_type = device["type"]
device_id = os.path.split(os.path.split(device["path"])[0])[1]
dev_type = "std"
device_path = device["path"]
if "EF" in family:
dev_type = "HobbyBoard"
family = device_type
if "7E" in family:
dev_type = "EDS00xx"
family = onewirehub.owproxy.read(f"{device_path}device_type").decode()
if family not in hb_info_from_type(dev_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, device_id)},
"manufacturer": "Maxim Integrated",
"model": device_type,
"name": device_id,
}
for entity_specs in hb_info_from_type(dev_type)[family]:
if entity_specs["type"] == SENSOR_TYPE_MOISTURE:
s_id = entity_specs["path"].split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device['path']}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
entity_specs["type"] = SENSOR_TYPE_WETNESS
entity_specs["name"] = f"Wetness {s_id}"
entity_path = os.path.join(
os.path.split(device["path"])[0], entity_specs["path"]
)
entities.append(
OneWireProxySensor(
device_id=device_id,
device_name=device_names.get(device_id, device_id),
device_info=device_info,
entity_path=entity_path,
entity_specs=entity_specs,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in onewirehub.devices:
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, sensor_id)},
"manufacturer": "Maxim Integrated",
"model": family,
"name": sensor_id,
}
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirectSensor(
device_names.get(sensor_id, sensor_id),
device_file,
device_info,
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFSSensor(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
|
def get_entities(onewirehub: OneWireHub, config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
for device in onewirehub.devices:
family = device["family"]
device_type = device["type"]
device_id = os.path.split(os.path.split(device["path"])[0])[1]
device_sub_type = "std"
device_path = device["path"]
if "EF" in family:
dev_type = "HobbyBoard"
family = device_type
if "7E" in family:
dev_type = "EDS00xx"
family = onewirehub.owproxy.read(f"{device_path}device_type").decode()
if family not in hb_info_from_type(dev_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, device_id)},
"manufacturer": "Maxim Integrated",
"model": device_type,
"name": device_id,
}
for entity_specs in hb_info_from_type(dev_type)[family]:
if entity_specs["type"] == SENSOR_TYPE_MOISTURE:
s_id = entity_specs["path"].split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device['path']}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
entity_specs["type"] = SENSOR_TYPE_WETNESS
entity_specs["name"] = f"Wetness {s_id}"
entity_path = os.path.join(
os.path.split(device["path"])[0], entity_specs["path"]
)
entities.append(
OneWireProxySensor(
device_id=device_id,
device_name=device_names.get(device_id, device_id),
device_info=device_info,
entity_path=entity_path,
entity_specs=entity_specs,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in onewirehub.devices:
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, sensor_id)},
"manufacturer": "Maxim Integrated",
"model": family,
"name": sensor_id,
}
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirectSensor(
device_names.get(sensor_id, sensor_id),
device_file,
device_info,
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFSSensor(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
|
13,671 |
def _generate_not_activated_message(user):
"""
Generates the message displayed on the sign-in screen when a learner attempts to access the
system with an inactive account.
"""
support_url = configuration_helpers.get_value(
'SUPPORT_SITE_LINK',
settings.SUPPORT_SITE_LINK
)
platform_name = configuration_helpers.get_value(
'PLATFORM_NAME',
settings.PLATFORM_NAME
)
not_activated_msg_template = _(u'In order to sign in, you need to activate your account.<br /><br />'
'We just sent an activation link to <strong>{email}</strong>. If ' # pylint: disable=unicode-format-string
'you do not receive an email, check your spam folders or '
'<a href="{support_url}">contact {platform} Support</a>.') # pylint: disable=unicode-format-string
not_activated_message = not_activated_msg_template.format(
email=user.email,
support_url=support_url,
platform=platform_name
)
return not_activated_message
|
def _generate_not_activated_message(user):
"""
Generates the message displayed on the sign-in screen when a learner attempts to access the
system with an inactive account.
"""
support_url = configuration_helpers.get_value(
'SUPPORT_SITE_LINK',
settings.SUPPORT_SITE_LINK
)
platform_name = configuration_helpers.get_value(
'PLATFORM_NAME',
settings.PLATFORM_NAME
)
not_activated_msg_template = _(u'In order to sign in, you need to activate your account.<br /><br />'
'We just sent an activation link to <strong>{email}</strong>. If ' # pylint: disable=unicode-format-string
u'you do not receive an email, check your spam folders or '
'<a href="{support_url}">contact {platform} Support</a>.') # pylint: disable=unicode-format-string
not_activated_message = not_activated_msg_template.format(
email=user.email,
support_url=support_url,
platform=platform_name
)
return not_activated_message
|
6,059 |
def installDiracOS(releaseConfig):
"""
Install the DIRAC os.
:param str releaseConfig: the version of the DIRAC OS
"""
diracos, diracOSVersion = releaseConfig.getDiracOSVersion(cliParams.diracOSVersion)
if not diracOSVersion:
logERROR("No diracos defined")
return False
tarsURL = None
if cliParams.installSource:
tarsURL = cliParams.installSource
else:
# if : is not exists in diracos version, we use diracos from DIRAC
if diracos == 'diracos':
tarsURL = releaseConfig.getDiracOsLocation(diracOSFromDIRAC=True)['Value']
else:
tarsURL = releaseConfig.getDiracOsLocation()['Value']
if not tarsURL:
tarsURL = releaseConfig.getTarsLocation('DIRAC')['Value']
logWARN("DIRACOS location is not specified using %s" % tarsURL)
if not downloadAndExtractTarball(tarsURL, diracos, diracOSVersion, cache=True):
return False
logNOTICE("Fixing externals paths...")
fixBuildPaths()
logNOTICE("Running externals post install...")
checkPlatformAliasLink()
return True
|
def installDiracOS(releaseConfig):
"""
Install the DIRAC os.
:param str releaseConfig: the version of the DIRAC OS
"""
diracos, diracOSVersion = releaseConfig.getDiracOSVersion(cliParams.diracOSVersion)
if not diracOSVersion:
logERROR("No diracos defined")
return False
tarsURL = None
if cliParams.installSource:
tarsURL = cliParams.installSource
else:
# if : is not exists in diracos version, we use diracos from DIRAC
if diracos.lower() == 'diracos':
tarsURL = releaseConfig.getDiracOsLocation(diracOSFromDIRAC=True)['Value']
else:
tarsURL = releaseConfig.getDiracOsLocation()['Value']
if not tarsURL:
tarsURL = releaseConfig.getTarsLocation('DIRAC')['Value']
logWARN("DIRACOS location is not specified using %s" % tarsURL)
if not downloadAndExtractTarball(tarsURL, diracos, diracOSVersion, cache=True):
return False
logNOTICE("Fixing externals paths...")
fixBuildPaths()
logNOTICE("Running externals post install...")
checkPlatformAliasLink()
return True
|
53,321 |
def uniform_nullpoint_find(
x_range=[0, 1],
y_range=[0, 1],
z_range=[0, 1],
func=(lambda x, y, z: [x, y, z]),
precision=[0.05, 0.05, 0.05],
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_range: array_like
A 1 by 2 array containing the range of x-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
y_range: array_like
A 1 by 2 array containing the range of y-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
z_range: array_like
A 1 by 2 array containing the range of z-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
func: <class 'function'>
A function that takes in 3 arguments, respectively representing a x, y, and z
coordinate of a point and returns the vector value for that point in the form
of a 1 by 3 array.
precision: array_like
A 1 by 3 array containing the approximate precision values for each dimension,
in the case where uniform arrays are being used.
The default value is [0.05, 0.05, 0.05].
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
Notes
----------
This method is described by :cite:t:`haynes:2007`.
"""
vspace = _vector_space(
None,
None,
None,
x_range,
y_range,
z_range,
None,
None,
None,
func,
precision,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
def uniform_nullpoint_find(
x_range=[0, 1],
y_range=[0, 1],
z_range=[0, 1],
func=(lambda x, y, z: [x, y, z]),
precision=[0.05, 0.05, 0.05],
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Return an array of `~plasmapy.analysis.nullpoint.NullPoint` objects, representing
the null points of the given vector space.
Parameters
----------
x_range: array_like
A 1 by 2 array containing the range of x-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
y_range: array_like
A 1 by 2 array containing the range of y-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
z_range: array_like
A 1 by 2 array containing the range of z-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
func: <class 'function'>
A function that takes in 3 arguments, respectively representing a x, y, and z
coordinate of a point and returns the vector value for that point in the form
of a 1 by 3 array.
precision: array_like
A 1 by 3 array containing the approximate precision values for each dimension,
in the case where uniform arrays are being used.
The default value is [0.05, 0.05, 0.05].
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
Notes
----------
This method is described by :cite:t:`haynes:2007`.
"""
vspace = _vector_space(
None,
None,
None,
x_range,
y_range,
z_range,
None,
None,
None,
func,
precision,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
3,799 |
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
precision=6, equal_nan=True, equal_inf=True):
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
x = np.asanyarray(x)
y = np.asanyarray(y)
# original array for output formatting
ox, oy = x, y
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def istime(x):
return x.dtype.char in "Mm"
def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
"""Handling nan/inf.
Combine results of running func on x and y, checking that they are True
at the same locations.
"""
__tracebackhide__ = True # Hide traceback for py.test
x_id = func(x)
y_id = func(y)
# We include work-arounds here to handle three types of slightly
# pathological ndarray subclasses:
# (1) all() on `masked` array scalars can return masked arrays, so we
# use != True
# (2) __eq__ on some ndarray subclasses returns Python booleans
# instead of element-wise comparisons, so we cast to bool_() and
# use isinstance(..., bool) checks
# (3) subclasses with bare-bones __array_function__ implementations may
# not implement np.all(), so favor using the .all() method
# We are not committed to supporting such subclasses, but it's nice to
# support them if possible.
if bool_(x_id == y_id).all() != True:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
if isinstance(x_id, bool) or x_id.ndim == 0:
return bool_(x_id)
elif isinstance(y_id, bool) or y_id.ndim == 0:
return bool_(y_id)
else:
return y_id
try:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ f'\n(shapes {x.shape}, {y.shape} mismatch)',
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
flagged = bool_(False)
if isnumber(x) and isnumber(y):
if equal_nan:
flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
if equal_inf:
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == +inf,
hasval='+inf')
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == -inf,
hasval='-inf')
elif istime(x) and istime(y):
# If one is datetime64 and the other timedelta64 there is no point
if equal_nan and x.dtype.type == y.dtype.type:
flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
if flagged.ndim > 0:
x, y = x[~flagged], y[~flagged]
# Only do the comparison if actual values are left
if x.size == 0:
return
elif flagged:
# no sense doing comparison if everything is flagged.
return
val = comparison(x, y)
if isinstance(val, bool):
cond = val
reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
n_mismatch = reduced.size - reduced.sum(dtype=intp)
n_elements = flagged.size if flagged.ndim != 0 else reduced.size
percent_mismatch = 100 * n_mismatch / n_elements
remarks = [
'Mismatched elements: {} / {} ({:.3g}%)'.format(
n_mismatch, n_elements, percent_mismatch)]
with errstate(invalid='ignore', divide='ignore',
over='ignore', under='ignore'):
# ignore errors for non-numeric types
with contextlib.suppress(TypeError):
error = abs(x - y)
max_abs_error = max(error)
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max absolute difference: '
+ str(max_abs_error))
else:
remarks.append('Max absolute difference: '
+ array2string(max_abs_error))
# note: this definition of relative error matches that one
# used by assert_allclose (found in np.isclose)
# Filter values where the divisor would be zero
nonzero = bool_(y != 0)
if all(~nonzero):
max_rel_error = array(inf)
else:
max_rel_error = max(error[nonzero] / abs(y[nonzero]))
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max relative difference: '
+ str(max_rel_error))
else:
remarks.append('Max relative difference: '
+ array2string(max_rel_error))
err_msg += '\n' + '\n'.join(remarks)
msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
except ValueError:
import traceback
efmt = traceback.format_exc()
header = f'error during assertion:\n\n{efmt}\n\n{header}'
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise ValueError(msg)
|
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
precision=6, equal_nan=True, equal_inf=True):
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
x = np.asanyarray(x)
y = np.asanyarray(y)
# original array for output formatting
ox, oy = x, y
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def istime(x):
return x.dtype.char in "Mm"
def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
"""Handling nan/inf.
Combine results of running func on x and y, checking that they are True
at the same locations.
"""
__tracebackhide__ = True # Hide traceback for py.test
x_id = func(x)
y_id = func(y)
# We include work-arounds here to handle three types of slightly
# pathological ndarray subclasses:
# (1) all() on `masked` array scalars can return masked arrays, so we
# use != True
# (2) __eq__ on some ndarray subclasses returns Python booleans
# instead of element-wise comparisons, so we cast to bool_() and
# use isinstance(..., bool) checks
# (3) subclasses with bare-bones __array_function__ implementations may
# not implement np.all(), so favor using the .all() method
# We are not committed to supporting such subclasses, but it's nice to
# support them if possible.
if bool_(x_id == y_id).all() != True:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
if isinstance(x_id, bool) or x_id.ndim == 0:
return bool_(x_id)
elif isinstance(y_id, bool) or y_id.ndim == 0:
return bool_(y_id)
else:
return y_id
try:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ f'\n(shapes {x.shape}, {y.shape} mismatch)',
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
flagged = bool_(False)
if isnumber(x) and isnumber(y):
if equal_nan:
flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
if equal_inf:
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == +inf,
hasval='+inf')
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == -inf,
hasval='-inf')
elif istime(x) and istime(y):
# If one is datetime64 and the other timedelta64 there is no point
if equal_nan and x.dtype.type == y.dtype.type:
flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
if flagged.ndim > 0:
x, y = x[~flagged], y[~flagged]
# Only do the comparison if actual values are left
if x.size == 0:
return
elif flagged:
# no sense doing comparison if everything is flagged.
return
val = comparison(x, y)
if isinstance(val, bool):
cond = val
reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
n_mismatch = reduced.size - reduced.sum(dtype=intp)
n_elements = flagged.size if flagged.ndim != 0 else reduced.size
percent_mismatch = 100 * n_mismatch / n_elements
remarks = [
'Mismatched elements: {} / {} ({:.3g}%)'.format(
n_mismatch, n_elements, percent_mismatch)]
with errstate(all='ignore'):
# ignore errors for non-numeric types
with contextlib.suppress(TypeError):
error = abs(x - y)
max_abs_error = max(error)
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max absolute difference: '
+ str(max_abs_error))
else:
remarks.append('Max absolute difference: '
+ array2string(max_abs_error))
# note: this definition of relative error matches that one
# used by assert_allclose (found in np.isclose)
# Filter values where the divisor would be zero
nonzero = bool_(y != 0)
if all(~nonzero):
max_rel_error = array(inf)
else:
max_rel_error = max(error[nonzero] / abs(y[nonzero]))
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max relative difference: '
+ str(max_rel_error))
else:
remarks.append('Max relative difference: '
+ array2string(max_rel_error))
err_msg += '\n' + '\n'.join(remarks)
msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
except ValueError:
import traceback
efmt = traceback.format_exc()
header = f'error during assertion:\n\n{efmt}\n\n{header}'
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise ValueError(msg)
|
32,334 |
def from_bytes_to_text(mode: str, binary: bytes) -> str:
"""
Make a text from a binary.
:param mode: How to convert the binary to text.
:return: A text converted from the binary.
"""
if mode == 'text-based-protocol':
# Keep all the charactors used in text based protocols
# * The unicodedata category names of control code start with C
return ''.join(' '
if c == u'\ufffd'
or (c not in ('\n', '\r', '\t') and unicodedata.category(c)[0] == 'C')
else c
for c in binary.decode('utf-8', errors='replace'))
elif mode == 'human-readable':
return binary.decode('utf-8', errors='replace_with_space')
else:
raise ValueError(f'Unknown text conversion mode: {mode}')
|
def from_bytes_to_text(mode: str, binary: bytes) -> str:
"""
Make a text from a binary.
:param mode: How to convert the binary to text.
:return: A text converted from the binary.
"""
if mode == 'text-based-protocol':
# Keep all the characters used in text based protocols
# * The unicodedata category names of control code start with C
return ''.join(' '
if c == u'\ufffd'
or (c not in ('\n', '\r', '\t') and unicodedata.category(c)[0] == 'C')
else c
for c in binary.decode('utf-8', errors='replace'))
elif mode == 'human-readable':
return binary.decode('utf-8', errors='replace_with_space')
else:
raise ValueError(f'Unknown text conversion mode: {mode}')
|
29,157 |
def url_unquote_plus(content):
"""Unquotes a string and replace plus signs by spaces, as required for
unquoting HTML form values using urllib.unquote_plus if run under Python 2
and urllib.parse.unquote_plus if run under Python 3.
Args:
content: str. The string to be unquoted.
Returns:
str. The unquoted string.
"""
try:
import urllib.parse as urlparse_unquote_plus
except ImportError:
import urllib as urlparse_unquote_plus
return urlparse_unquote_plus.unquote_plus(content)
|
def url_unquote_plus(content):
"""Unquotes a string and replace plus signs by spaces, as required for
unquoting HTML form values using urllib.unquote_plus if run under Python 2
and urllib.parse.unquote_plus if run under Python 3.
Args:
content: str. The string to be unquoted.
Returns:
str. The unquoted string.
"""
try:
import urllib.parse as url_parse
except ImportError:
import urllib as urlparse_unquote_plus
return urlparse_unquote_plus.unquote_plus(content)
|
37,806 |
def build(options: BuildOptions) -> None:
try:
subprocess.run(['docker', '--version'], check=True)
except Exception:
print('cibuildwheel: Docker not found. Docker is required to run Linux builds. '
'If you\'re building on Travis CI, add `services: [docker]` to your .travis.yml.'
'If you\'re building on Circle CI in Linux, add a `setup_remote_docker` step to your .circleci/config.yml',
file=sys.stderr)
sys.exit(2)
assert options.manylinux_images is not None
python_configurations = get_python_configurations(options.build_selector, options.architectures)
platforms = [
('cp', 'manylinux_x86_64', options.manylinux_images['x86_64']),
('cp', 'manylinux_i686', options.manylinux_images['i686']),
('cp', 'manylinux_aarch64', options.manylinux_images['aarch64']),
('cp', 'manylinux_ppc64le', options.manylinux_images['ppc64le']),
('cp', 'manylinux_s390x', options.manylinux_images['s390x']),
('pp', 'manylinux_x86_64', options.manylinux_images['pypy_x86_64']),
]
cwd = Path.cwd()
abs_package_dir = options.package_dir.resolve()
if cwd != abs_package_dir and cwd not in abs_package_dir.parents:
raise Exception('package_dir must be inside the working directory')
container_project_path = PurePath('/project')
container_package_dir = container_project_path / abs_package_dir.relative_to(cwd)
container_output_dir = PurePath('/output')
for implementation, platform_tag, docker_image in platforms:
platform_configs = [c for c in python_configurations if c.identifier.startswith(implementation) and c.identifier.endswith(platform_tag)]
if not platform_configs:
continue
try:
log.step(f'Starting Docker image {docker_image}...')
with DockerContainer(docker_image, simulate_32_bit=platform_tag.endswith('i686'), cwd=container_project_path) as docker:
log.step('Copying project into Docker...')
docker.copy_into(Path.cwd(), container_project_path)
if options.before_all:
log.step('Running before_all...')
env = docker.get_environment()
env['PATH'] = f'/opt/python/cp38-cp38/bin:{env["PATH"]}'
env['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
env = options.environment.as_dictionary(env, executor=docker.environment_executor)
before_all_prepared = prepare_command(options.before_all, project=container_project_path, package=container_package_dir)
docker.call(['sh', '-c', before_all_prepared], env=env)
for config in platform_configs:
log.build_start(config.identifier)
dependency_constraint_flags: List[PathOrStr] = []
if config.identifier.startswith("pp"):
# Patch PyPy to make sure headers get installed into a venv
patch_version = '_27' if config.version == '2.7' else ''
patch_path = resources_dir / f'pypy_venv{patch_version}.patch'
patch_docker_path = PurePath('/pypy_venv.patch')
docker.copy_into(patch_path, patch_docker_path)
try:
docker.call(['patch', '--force', '-p1', '-d', config.path, '-i', patch_docker_path])
except subprocess.CalledProcessError:
print("PyPy patch not applied", file=sys.stderr)
if options.dependency_constraints:
constraints_file = options.dependency_constraints.get_for_python_version(config.version)
container_constraints_file = PurePath('/constraints.txt')
docker.copy_into(constraints_file, container_constraints_file)
dependency_constraint_flags = ['-c', container_constraints_file]
log.step('Setting up build environment...')
env = docker.get_environment()
# put this config's python top of the list
python_bin = config.path / 'bin'
env['PATH'] = f'{python_bin}:{env["PATH"]}'
env = options.environment.as_dictionary(env, executor=docker.environment_executor)
# check config python and pip are still on PATH
which_python = docker.call(['which', 'python'], env=env, capture_output=True).strip()
if PurePath(which_python) != python_bin / 'python':
print("cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.", file=sys.stderr)
sys.exit(1)
which_pip = docker.call(['which', 'pip'], env=env, capture_output=True).strip()
if PurePath(which_pip) != python_bin / 'pip':
print("cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.", file=sys.stderr)
sys.exit(1)
if options.before_build:
log.step('Running before_build...')
before_build_prepared = prepare_command(options.before_build, project=container_project_path, package=container_package_dir)
docker.call(['sh', '-c', before_build_prepared], env=env)
log.step('Building wheel...')
temp_dir = PurePath('/tmp/cibuildwheel')
built_wheel_dir = temp_dir / 'built_wheel'
docker.call(['rm', '-rf', built_wheel_dir])
docker.call(['mkdir', '-p', built_wheel_dir])
docker.call([
'pip', 'wheel',
container_package_dir,
'-w', built_wheel_dir,
'--no-deps',
*get_build_verbosity_extra_flags(options.build_verbosity)
], env=env)
built_wheel = docker.glob(built_wheel_dir, '*.whl')[0]
repaired_wheel_dir = temp_dir / 'repaired_wheel'
docker.call(['rm', '-rf', repaired_wheel_dir])
docker.call(['mkdir', '-p', repaired_wheel_dir])
if built_wheel.name.endswith('none-any.whl'):
raise NonPlatformWheelError()
if options.repair_command:
log.step('Repairing wheel...')
repair_command_prepared = prepare_command(options.repair_command, wheel=built_wheel, dest_dir=repaired_wheel_dir)
docker.call(['sh', '-c', repair_command_prepared], env=env)
else:
docker.call(['mv', built_wheel, repaired_wheel_dir])
repaired_wheels = docker.glob(repaired_wheel_dir, '*.whl')
if options.test_command and options.test_selector(config.identifier):
log.step('Testing wheel...')
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
docker.call(['pip', 'install', 'virtualenv', *dependency_constraint_flags], env=env)
venv_dir = PurePath(docker.call(['mktemp', '-d'], capture_output=True).strip()) / 'venv'
docker.call(['python', '-m', 'virtualenv', '--no-download', venv_dir], env=env)
virtualenv_env = env.copy()
virtualenv_env['PATH'] = f"{venv_dir / 'bin'}:{virtualenv_env['PATH']}"
if options.before_test:
before_test_prepared = prepare_command(options.before_test, project=container_project_path, package=container_package_dir)
docker.call(['sh', '-c', before_test_prepared], env=virtualenv_env)
# Install the wheel we just built
# Note: If auditwheel produced two wheels, it's because the earlier produced wheel
# conforms to multiple manylinux standards. These multiple versions of the wheel are
# functionally the same, differing only in name, wheel metadata, and possibly include
# different external shared libraries. so it doesn't matter which one we run the tests on.
# Let's just pick the first one.
wheel_to_test = repaired_wheels[0]
docker.call(['pip', 'install', str(wheel_to_test) + options.test_extras], env=virtualenv_env)
# Install any requirements to run the tests
if options.test_requires:
docker.call(['pip', 'install', *options.test_requires], env=virtualenv_env)
# Run the tests from a different directory
test_command_prepared = prepare_command(options.test_command, project=container_project_path, package=container_package_dir)
docker.call(['sh', '-c', test_command_prepared], cwd='/root', env=virtualenv_env)
# clean up test environment
docker.call(['rm', '-rf', venv_dir])
# move repaired wheels to output
docker.call(['mkdir', '-p', container_output_dir])
docker.call(['mv', *repaired_wheels, container_output_dir])
log.build_end()
log.step('Copying wheels back to host...')
# copy the output back into the host
docker.copy_out(container_output_dir, options.output_dir)
log.step_end()
except subprocess.CalledProcessError as error:
log.step_end_with_error(f'Command {error.cmd} failed with code {error.returncode}. {error.stdout}')
troubleshoot(options.package_dir, error)
sys.exit(1)
|
def build(options: BuildOptions) -> None:
try:
subprocess.run(['docker', '--version'], check=True, stdout=subprocess.DEVNULL)
except Exception:
print('cibuildwheel: Docker not found. Docker is required to run Linux builds. '
'If you\'re building on Travis CI, add `services: [docker]` to your .travis.yml.'
'If you\'re building on Circle CI in Linux, add a `setup_remote_docker` step to your .circleci/config.yml',
file=sys.stderr)
sys.exit(2)
assert options.manylinux_images is not None
python_configurations = get_python_configurations(options.build_selector, options.architectures)
platforms = [
('cp', 'manylinux_x86_64', options.manylinux_images['x86_64']),
('cp', 'manylinux_i686', options.manylinux_images['i686']),
('cp', 'manylinux_aarch64', options.manylinux_images['aarch64']),
('cp', 'manylinux_ppc64le', options.manylinux_images['ppc64le']),
('cp', 'manylinux_s390x', options.manylinux_images['s390x']),
('pp', 'manylinux_x86_64', options.manylinux_images['pypy_x86_64']),
]
cwd = Path.cwd()
abs_package_dir = options.package_dir.resolve()
if cwd != abs_package_dir and cwd not in abs_package_dir.parents:
raise Exception('package_dir must be inside the working directory')
container_project_path = PurePath('/project')
container_package_dir = container_project_path / abs_package_dir.relative_to(cwd)
container_output_dir = PurePath('/output')
for implementation, platform_tag, docker_image in platforms:
platform_configs = [c for c in python_configurations if c.identifier.startswith(implementation) and c.identifier.endswith(platform_tag)]
if not platform_configs:
continue
try:
log.step(f'Starting Docker image {docker_image}...')
with DockerContainer(docker_image, simulate_32_bit=platform_tag.endswith('i686'), cwd=container_project_path) as docker:
log.step('Copying project into Docker...')
docker.copy_into(Path.cwd(), container_project_path)
if options.before_all:
log.step('Running before_all...')
env = docker.get_environment()
env['PATH'] = f'/opt/python/cp38-cp38/bin:{env["PATH"]}'
env['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
env = options.environment.as_dictionary(env, executor=docker.environment_executor)
before_all_prepared = prepare_command(options.before_all, project=container_project_path, package=container_package_dir)
docker.call(['sh', '-c', before_all_prepared], env=env)
for config in platform_configs:
log.build_start(config.identifier)
dependency_constraint_flags: List[PathOrStr] = []
if config.identifier.startswith("pp"):
# Patch PyPy to make sure headers get installed into a venv
patch_version = '_27' if config.version == '2.7' else ''
patch_path = resources_dir / f'pypy_venv{patch_version}.patch'
patch_docker_path = PurePath('/pypy_venv.patch')
docker.copy_into(patch_path, patch_docker_path)
try:
docker.call(['patch', '--force', '-p1', '-d', config.path, '-i', patch_docker_path])
except subprocess.CalledProcessError:
print("PyPy patch not applied", file=sys.stderr)
if options.dependency_constraints:
constraints_file = options.dependency_constraints.get_for_python_version(config.version)
container_constraints_file = PurePath('/constraints.txt')
docker.copy_into(constraints_file, container_constraints_file)
dependency_constraint_flags = ['-c', container_constraints_file]
log.step('Setting up build environment...')
env = docker.get_environment()
# put this config's python top of the list
python_bin = config.path / 'bin'
env['PATH'] = f'{python_bin}:{env["PATH"]}'
env = options.environment.as_dictionary(env, executor=docker.environment_executor)
# check config python and pip are still on PATH
which_python = docker.call(['which', 'python'], env=env, capture_output=True).strip()
if PurePath(which_python) != python_bin / 'python':
print("cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.", file=sys.stderr)
sys.exit(1)
which_pip = docker.call(['which', 'pip'], env=env, capture_output=True).strip()
if PurePath(which_pip) != python_bin / 'pip':
print("cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.", file=sys.stderr)
sys.exit(1)
if options.before_build:
log.step('Running before_build...')
before_build_prepared = prepare_command(options.before_build, project=container_project_path, package=container_package_dir)
docker.call(['sh', '-c', before_build_prepared], env=env)
log.step('Building wheel...')
temp_dir = PurePath('/tmp/cibuildwheel')
built_wheel_dir = temp_dir / 'built_wheel'
docker.call(['rm', '-rf', built_wheel_dir])
docker.call(['mkdir', '-p', built_wheel_dir])
docker.call([
'pip', 'wheel',
container_package_dir,
'-w', built_wheel_dir,
'--no-deps',
*get_build_verbosity_extra_flags(options.build_verbosity)
], env=env)
built_wheel = docker.glob(built_wheel_dir, '*.whl')[0]
repaired_wheel_dir = temp_dir / 'repaired_wheel'
docker.call(['rm', '-rf', repaired_wheel_dir])
docker.call(['mkdir', '-p', repaired_wheel_dir])
if built_wheel.name.endswith('none-any.whl'):
raise NonPlatformWheelError()
if options.repair_command:
log.step('Repairing wheel...')
repair_command_prepared = prepare_command(options.repair_command, wheel=built_wheel, dest_dir=repaired_wheel_dir)
docker.call(['sh', '-c', repair_command_prepared], env=env)
else:
docker.call(['mv', built_wheel, repaired_wheel_dir])
repaired_wheels = docker.glob(repaired_wheel_dir, '*.whl')
if options.test_command and options.test_selector(config.identifier):
log.step('Testing wheel...')
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
docker.call(['pip', 'install', 'virtualenv', *dependency_constraint_flags], env=env)
venv_dir = PurePath(docker.call(['mktemp', '-d'], capture_output=True).strip()) / 'venv'
docker.call(['python', '-m', 'virtualenv', '--no-download', venv_dir], env=env)
virtualenv_env = env.copy()
virtualenv_env['PATH'] = f"{venv_dir / 'bin'}:{virtualenv_env['PATH']}"
if options.before_test:
before_test_prepared = prepare_command(options.before_test, project=container_project_path, package=container_package_dir)
docker.call(['sh', '-c', before_test_prepared], env=virtualenv_env)
# Install the wheel we just built
# Note: If auditwheel produced two wheels, it's because the earlier produced wheel
# conforms to multiple manylinux standards. These multiple versions of the wheel are
# functionally the same, differing only in name, wheel metadata, and possibly include
# different external shared libraries. so it doesn't matter which one we run the tests on.
# Let's just pick the first one.
wheel_to_test = repaired_wheels[0]
docker.call(['pip', 'install', str(wheel_to_test) + options.test_extras], env=virtualenv_env)
# Install any requirements to run the tests
if options.test_requires:
docker.call(['pip', 'install', *options.test_requires], env=virtualenv_env)
# Run the tests from a different directory
test_command_prepared = prepare_command(options.test_command, project=container_project_path, package=container_package_dir)
docker.call(['sh', '-c', test_command_prepared], cwd='/root', env=virtualenv_env)
# clean up test environment
docker.call(['rm', '-rf', venv_dir])
# move repaired wheels to output
docker.call(['mkdir', '-p', container_output_dir])
docker.call(['mv', *repaired_wheels, container_output_dir])
log.build_end()
log.step('Copying wheels back to host...')
# copy the output back into the host
docker.copy_out(container_output_dir, options.output_dir)
log.step_end()
except subprocess.CalledProcessError as error:
log.step_end_with_error(f'Command {error.cmd} failed with code {error.returncode}. {error.stdout}')
troubleshoot(options.package_dir, error)
sys.exit(1)
|
39,168 |
def _make_version_file(version, sha):
sha = 'Unknown' if sha is None else sha
version_path = ROOT_DIR / 'torchaudio' / 'version.py'
with open(version_path, 'w') as f:
f.write(f"__version__ = '{version}'\n")
f.write(f"git_version = '{sha}'\n")
|
def _make_version_file(version, sha):
sha = sha or 'Unknown'
version_path = ROOT_DIR / 'torchaudio' / 'version.py'
with open(version_path, 'w') as f:
f.write(f"__version__ = '{version}'\n")
f.write(f"git_version = '{sha}'\n")
|
44,076 |
def contract_tensors(
tensors: Sequence,
communication_graph: MultiDiGraph,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
use_opt_einsum: bool = False,
):
"""Contract tensors according to the edges specified in the communication graph.
This operation is differentiable. The ``prepare_nodes`` and ``measure_nodes`` arguments are both
sequences of size ``len(communication_graph.nodes)`` that describe the order of indices in the
``tensors`` with respect to to the :class:`~.PrepareNode` and :class`~.MeasureNode` edges in the
communication graph.
Args:
tensors (Sequence): the tensors to be contracted
communication_graph (MultiDiGraph): the communication graph determining connectivity between
the tensors
prepare_nodes (Sequence[PrepareNode]): a sequence of size ``len(communication_graph.nodes)``
that determines the order of preparation indices in each tensor
measure_nodes (Sequence[MeasureNode]): a sequence of size ``len(communication_graph.nodes)``
that determines the order of measurement indices in each tensor
use_opt_einsum (bool): Determines whether to use the
[opt_einsum](https://dgasmith.github.io/opt_einsum/) package. This package is useful for
tensor contractions of large networks but must be installed separately using, e.g.,
``pip install opt_einsum``.
Returns:
float or array-like: the result of contracting the tensor network
**Example**
.. note::
This function is designed for use as part of the circuit cutting workflow. Check out the
:doc:`transforms </code/qml_transforms>` page for more details.
We first set up the tensors and their corresponding :class:`~.PrepareNode` and
:class`~.MeasureNode` orderings:
.. code-block:: python
t = [np.arange(4), np.arange(4, 8)]
p = [[], [qcut.PrepareNode(wires=0)]]
m = [[qcut.MeasureNode(wires=0)], []]
The communication graph describing edges in the tensor network must also be constructed:
.. code-block:: python
g = MultiDiGraph([(0, 1, {"pair": (m[0][0], p[1][0])})])
The network can then be contracted using:
>>> qcut.contract_tensors(t, g, p, m)
38
"""
# pylint: disable=import-outside-toplevel
if use_opt_einsum:
try:
from opt_einsum import contract, get_symbol
except ImportError as e:
raise ImportError(
"The opt_einsum package is required when use_opt_einsum is set to "
"True in the contract_tensors function. This package can be "
"installed using:\npip install opt_einsum"
) from e
else:
from string import ascii_letters as symbols
from pennylane.math import einsum as contract
def get_symbol(i):
if i >= len(symbols):
raise ValueError(
"Set the use_opt_einsum argument to True when applying more than "
f"{len(symbols)} wire cuts to a circuit"
)
return symbols[i]
ctr = 0
tensor_indxs = [""] * len(communication_graph.nodes)
meas_map = {}
for i, (node, prep) in enumerate(zip(communication_graph.nodes, prepare_nodes)):
predecessors = communication_graph.pred[node]
for p in prep:
for _, pred_edges in predecessors.items():
for pred_edge in pred_edges.values():
meas_op, prep_op = pred_edge["pair"]
if p is prep_op:
symb = get_symbol(ctr)
ctr += 1
tensor_indxs[i] += symb
meas_map[meas_op] = symb
for i, (node, meas) in enumerate(zip(communication_graph.nodes, measure_nodes)):
successors = communication_graph.succ[node]
for m in meas:
for _, succ_edges in successors.items():
for succ_edge in succ_edges.values():
meas_op, _ = succ_edge["pair"]
if m is meas_op:
symb = meas_map[meas_op]
tensor_indxs[i] += symb
eqn = ",".join(tensor_indxs)
kwargs = {} if use_opt_einsum else {"like": tensors[0]}
return contract(eqn, *tensors, **kwargs)
|
def contract_tensors(
tensors: Sequence,
communication_graph: MultiDiGraph,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
use_opt_einsum: bool = False,
):
"""Contract tensors according to the edges specified in the communication graph.
This operation is differentiable. The ``prepare_nodes`` and ``measure_nodes`` arguments are both
sequences of size ``len(communication_graph.nodes)`` that describe the order of indices in the
``tensors`` with respect to to the :class:`~.PrepareNode` and :class`~.MeasureNode` edges in the
communication graph.
Args:
tensors (Sequence): the tensors to be contracted
communication_graph (MultiDiGraph): the communication graph determining connectivity between
the tensors
prepare_nodes (Sequence[PrepareNode]): a sequence of size ``len(communication_graph.nodes)``
that determines the order of preparation indices in each tensor
measure_nodes (Sequence[MeasureNode]): a sequence of size ``len(communication_graph.nodes)``
that determines the order of measurement indices in each tensor
use_opt_einsum (bool): Determines whether to use the
[opt_einsum](https://dgasmith.github.io/opt_einsum/) package. This package is useful for
tensor contractions of large networks but must be installed separately using, e.g.,
``pip install opt_einsum``.
Returns:
float or array-like: the result of contracting the tensor network
**Example**
.. note::
This function is designed for use as part of the circuit cutting workflow. Check out the
:doc:`transforms </code/qml_transforms>` page for more details.
We first set up the tensors and their corresponding :class:`~.PrepareNode` and
:class`~.MeasureNode` orderings:
.. code-block:: python
t = [np.arange(4), np.arange(4, 8)]
p = [[], [qcut.PrepareNode(wires=0)]]
m = [[qcut.MeasureNode(wires=0)], []]
The communication graph describing edges in the tensor network must also be constructed:
.. code-block:: python
g = nx.MultiDiGraph([(0, 1, {"pair": (m[0][0], p[1][0])})])
The network can then be contracted using:
>>> qcut.contract_tensors(t, g, p, m)
38
"""
# pylint: disable=import-outside-toplevel
if use_opt_einsum:
try:
from opt_einsum import contract, get_symbol
except ImportError as e:
raise ImportError(
"The opt_einsum package is required when use_opt_einsum is set to "
"True in the contract_tensors function. This package can be "
"installed using:\npip install opt_einsum"
) from e
else:
from string import ascii_letters as symbols
from pennylane.math import einsum as contract
def get_symbol(i):
if i >= len(symbols):
raise ValueError(
"Set the use_opt_einsum argument to True when applying more than "
f"{len(symbols)} wire cuts to a circuit"
)
return symbols[i]
ctr = 0
tensor_indxs = [""] * len(communication_graph.nodes)
meas_map = {}
for i, (node, prep) in enumerate(zip(communication_graph.nodes, prepare_nodes)):
predecessors = communication_graph.pred[node]
for p in prep:
for _, pred_edges in predecessors.items():
for pred_edge in pred_edges.values():
meas_op, prep_op = pred_edge["pair"]
if p is prep_op:
symb = get_symbol(ctr)
ctr += 1
tensor_indxs[i] += symb
meas_map[meas_op] = symb
for i, (node, meas) in enumerate(zip(communication_graph.nodes, measure_nodes)):
successors = communication_graph.succ[node]
for m in meas:
for _, succ_edges in successors.items():
for succ_edge in succ_edges.values():
meas_op, _ = succ_edge["pair"]
if m is meas_op:
symb = meas_map[meas_op]
tensor_indxs[i] += symb
eqn = ",".join(tensor_indxs)
kwargs = {} if use_opt_einsum else {"like": tensors[0]}
return contract(eqn, *tensors, **kwargs)
|
24,080 |
def _showAddonRequiresNVDAUpdateDialog(parent, bundle):
incompatibleReasonMessage = _(
# Translators: The message displayed when installing an add-on package is prohibited,
# because it requires a newer version of NVDA than is currently in use.
"Installation of this add-on has been blocked. Your version of NVDA ({NVDAVersion}) "
"is too old to support {summary} version {version}.\n"
"Please update NVDA to at least version {minimumNVDAVersion}, to use this version of {summary}."
).format(
summary=bundle.manifest['summary'],
version=bundle.manifest['version'],
minimumNVDAVersion=addonAPIVersion.formatForGUI(bundle.minimumNVDAVersion),
NVDAVersion=addonAPIVersion.formatForGUI(addonAPIVersion.CURRENT)
)
ErrorAddonInstallDialog(
parent=parent,
# Translators: The title of a dialog presented when an error occurs.
title=_("Add-on not compatible"),
message=incompatibleReasonMessage,
showAddonInfoFunction=lambda: _showAddonInfo(bundle)
).ShowModal()
|
def _showAddonRequiresNVDAUpdateDialog(parent, bundle):
incompatibleReasonMessage = _(
# Translators: The message displayed when installing an add-on package is prohibited,
# because it requires a newer version of NVDA than is currently in use.
"Installation of this add-on has failed. Your version of NVDA ({NVDAVersion}) "
"is too old to support {summary} version {version}.\n"
"Please update NVDA to at least version {minimumNVDAVersion}, to use this version of {summary}."
).format(
summary=bundle.manifest['summary'],
version=bundle.manifest['version'],
minimumNVDAVersion=addonAPIVersion.formatForGUI(bundle.minimumNVDAVersion),
NVDAVersion=addonAPIVersion.formatForGUI(addonAPIVersion.CURRENT)
)
ErrorAddonInstallDialog(
parent=parent,
# Translators: The title of a dialog presented when an error occurs.
title=_("Add-on not compatible"),
message=incompatibleReasonMessage,
showAddonInfoFunction=lambda: _showAddonInfo(bundle)
).ShowModal()
|
49,808 |
def block_safe(block):
"""
Check if the block is safe to work with.
A BlockUserData must have been set on the block while it was known safe.
If an editor is cleared by editor.clear() or editor.set_text() for example,
all the old blocks will continue to report block.isValid() == True
but will raise a Segmentation Fault on almost all methods.
One way to check is that the userData is reset to None or
QTextBlockUserData. So if a block is known to have setUserData to
BlockUserData, this fact can be used to check the block.
"""
return block.isValid() and isinstance(block.userData(), BlockUserData)
|
def block_safe(block):
"""
Check if the block is safe to work with.
A BlockUserData must have been set on the block while it was known safe.
If an editor is cleared by editor.clear() or editor.set_text() for example,
all old blocks will continue to report block.isValid() == True, but will
raise a segmentation fault on almost all methods.
One way to check is that the userData is reset to None or
QTextBlockUserData. So if a block is known to have setUserData to
BlockUserData, this fact can be used to check the block.
"""
return block.isValid() and isinstance(block.userData(), BlockUserData)
|
27,849 |
def instance_normalization(x, gamma, beta, eps=2e-5):
"""Instance normalization function.
This function implements a "instance normalization"
which normalizes each sample by its mean and standard deviation.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Batch tensors.
First dimension of this value must be the size of minibatch and
second dimension must be the number of channels.
Moreover, this value must have one or more following dimensions,
such as height and width.
gamma (~chainer.Variable): Scaling parameter.
beta (~chainer.Variable): Shifting parameter.
eps (float): Epsilon value for numerical stability of normalization.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
See: `Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_
"""
if x.ndim <= 2:
raise ValueError('Input dimension must be grater than 2, '
'including batch size dimension '
'(first dimension).')
batch_size, channels = x.shape[:2]
original_shape = x.shape
x = reshape.reshape(x, (1, batch_size * channels) + x.shape[2:])
gamma = tile.tile(gamma, batch_size)
beta = tile.tile(beta, batch_size)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = batch_normalization.batch_normalization(x, gamma, beta, eps=eps)
x = reshape.reshape(x, original_shape)
return x
|
def instance_normalization(x, gamma, beta, eps=2e-5):
"""Instance normalization function.
This function implements instance normalization
which normalizes each sample by its mean and standard deviation.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Batch tensors.
First dimension of this value must be the size of minibatch and
second dimension must be the number of channels.
Moreover, this value must have one or more following dimensions,
such as height and width.
gamma (~chainer.Variable): Scaling parameter.
beta (~chainer.Variable): Shifting parameter.
eps (float): Epsilon value for numerical stability of normalization.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
See: `Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_
"""
if x.ndim <= 2:
raise ValueError('Input dimension must be grater than 2, '
'including batch size dimension '
'(first dimension).')
batch_size, channels = x.shape[:2]
original_shape = x.shape
x = reshape.reshape(x, (1, batch_size * channels) + x.shape[2:])
gamma = tile.tile(gamma, batch_size)
beta = tile.tile(beta, batch_size)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = batch_normalization.batch_normalization(x, gamma, beta, eps=eps)
x = reshape.reshape(x, original_shape)
return x
|
58,085 |
def get_tree(args):
sensor_ids = str(args["sensor_ids"])
process_ids = str(args["process_ids"])
if isinstance(sensor_ids, str):
sensor_ids = sensor_ids.replace('[', '').replace(']', '').replace("'", "").replace('"', '')
sensor_ids = sensor_ids.split(",")
if isinstance(process_ids, str):
process_ids = process_ids.replace('[', '').replace(']', '').replace("'", "").replace('"', '')
process_ids = process_ids.split(",")
all_results = {}
all_ips = []
all_sources = []
all_destinations = []
for sensor_id in sensor_ids:
for process_id in process_ids:
main_process = query_process(sensor_id, process_id)
if main_process.status_code != 200:
continue
results = {}
json_response = main_process.json()
for vertex in json_response["resources"]:
if vertex["vertex_type"] == "process":
#results["main_process"] = {"sensor_id": sensor_id, "process_id": process_id, "properties": vertex["properties"], "indicators": parse_indicators(json_response["resources"][0])}
results["main_process"] = {"sensor_id": sensor_id,
"process_id": process_id, "properties": vertex["properties"]}
ips, sources, destinations = parse_indicators(json_response["resources"][0])
all_ips += ips
all_sources += sources
all_destinations += destinations
for edge in vertex["edges"]:
if edge == "parent_process":
parent_data = query_process(vertex["edges"][edge][0]["device_id"],
vertex["edges"][edge][0]["object_id"])
if parent_data.status_code == 200:
parent_json = parent_data.json()
#results["parent_process"] = {"device_id": vertex["edges"][edge][0]["device_id"], "process_id": vertex["edges"][edge][0]["object_id"], "properties": parent_json["resources"][0]["properties"], "indicators": parse_indicators(parent_json["resources"][0])}
results["parent_process"] = {"device_id": vertex["edges"][edge][0]["device_id"], "process_id": vertex["edges"]
[edge][0]["object_id"], "properties": parent_json["resources"][0]["properties"]}
ips, sources, destinations = parse_indicators(parent_json["resources"][0])
all_ips += ips
all_sources += sources
all_destinations += destinations
else:
results["parent_process"] = {"device_id": vertex["edges"][edge][0]["device_id"],
"process_id": vertex["edges"][edge][0]["object_id"], "properties": "No response for parent"}
if edge == "child_process":
results["child_processes"] = []
for child in vertex["edges"][edge]:
child_data = query_process(child["device_id"], child["object_id"])
child_json = child_data.json()
if child_data.status_code == 200:
#results["child_processes"].append({"device_id": child["device_id"], "process_id": child["object_id"], "properties": child_json["resources"][0]["properties"], "indicators": parse_indicators(child_json["resources"][0])})
results["child_processes"].append(
{"device_id": child["device_id"], "process_id": child["object_id"], "properties": child_json["resources"][0]["properties"]})
ips, sources, destinations = parse_indicators(child_json["resources"][0])
all_ips += ips
all_sources += sources
all_destinations += destinations
else:
results["child_processes"].append(
{"device_id": child["device_id"], "process_id": child["object_id"], "properties": "No response for child"})
if results:
all_results[process_id] = results
all_results["ip_addresses"] = all_ips
ip_string = ''
for ip in all_sources:
ip_string += ip + ':s,'
for ip in all_destinations:
ip_string += ip + ':d,'
ip_string = ip_string.rstrip(',')
all_results["source_ip_addresses"] = all_sources
all_results["destination_ip_addresses"] = all_destinations
all_results["ip_type_string"] = ip_string
result_incident = createContext(all_results, id=None, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'ThreatGraph_data(val.Id && val.Id === obj.Id)': result_incident
}
title = 'Crowdstrike Threatgraph Results'
entry = {
'Type': entryTypes['note'],
'Contents': result_incident,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': result_incident,
'EntryContext': ec
}
return entry
|
def get_tree(args):
sensor_ids = argToList(args.get("sensor_ids"))
process_ids = argToList(args.get("process_ids"))
all_results = {}
all_ips = []
all_sources = []
all_destinations = []
for sensor_id in sensor_ids:
for process_id in process_ids:
main_process = query_process(sensor_id, process_id)
if main_process.status_code != 200:
continue
results = {}
json_response = main_process.json()
for vertex in json_response["resources"]:
if vertex["vertex_type"] == "process":
#results["main_process"] = {"sensor_id": sensor_id, "process_id": process_id, "properties": vertex["properties"], "indicators": parse_indicators(json_response["resources"][0])}
results["main_process"] = {"sensor_id": sensor_id,
"process_id": process_id, "properties": vertex["properties"]}
ips, sources, destinations = parse_indicators(json_response["resources"][0])
all_ips += ips
all_sources += sources
all_destinations += destinations
for edge in vertex["edges"]:
if edge == "parent_process":
parent_data = query_process(vertex["edges"][edge][0]["device_id"],
vertex["edges"][edge][0]["object_id"])
if parent_data.status_code == 200:
parent_json = parent_data.json()
#results["parent_process"] = {"device_id": vertex["edges"][edge][0]["device_id"], "process_id": vertex["edges"][edge][0]["object_id"], "properties": parent_json["resources"][0]["properties"], "indicators": parse_indicators(parent_json["resources"][0])}
results["parent_process"] = {"device_id": vertex["edges"][edge][0]["device_id"], "process_id": vertex["edges"]
[edge][0]["object_id"], "properties": parent_json["resources"][0]["properties"]}
ips, sources, destinations = parse_indicators(parent_json["resources"][0])
all_ips += ips
all_sources += sources
all_destinations += destinations
else:
results["parent_process"] = {"device_id": vertex["edges"][edge][0]["device_id"],
"process_id": vertex["edges"][edge][0]["object_id"], "properties": "No response for parent"}
if edge == "child_process":
results["child_processes"] = []
for child in vertex["edges"][edge]:
child_data = query_process(child["device_id"], child["object_id"])
child_json = child_data.json()
if child_data.status_code == 200:
#results["child_processes"].append({"device_id": child["device_id"], "process_id": child["object_id"], "properties": child_json["resources"][0]["properties"], "indicators": parse_indicators(child_json["resources"][0])})
results["child_processes"].append(
{"device_id": child["device_id"], "process_id": child["object_id"], "properties": child_json["resources"][0]["properties"]})
ips, sources, destinations = parse_indicators(child_json["resources"][0])
all_ips += ips
all_sources += sources
all_destinations += destinations
else:
results["child_processes"].append(
{"device_id": child["device_id"], "process_id": child["object_id"], "properties": "No response for child"})
if results:
all_results[process_id] = results
all_results["ip_addresses"] = all_ips
ip_string = ''
for ip in all_sources:
ip_string += ip + ':s,'
for ip in all_destinations:
ip_string += ip + ':d,'
ip_string = ip_string.rstrip(',')
all_results["source_ip_addresses"] = all_sources
all_results["destination_ip_addresses"] = all_destinations
all_results["ip_type_string"] = ip_string
result_incident = createContext(all_results, id=None, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'ThreatGraph_data(val.Id && val.Id === obj.Id)': result_incident
}
title = 'Crowdstrike Threatgraph Results'
entry = {
'Type': entryTypes['note'],
'Contents': result_incident,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': result_incident,
'EntryContext': ec
}
return entry
|
41,973 |
def save_study(study: optuna.study.Study, path: str) -> None:
study_in_memory = optuna.create_study()
for trial in study.trials:
study_in_memory.add_trial(trial)
with open(path, "wb") as f:
pickle.dump(study_in_memory, f)
|
def save_study(study: optuna.study.Study, path: str) -> None:
study_in_memory = optuna.create_study(directions=study.directions)
for trial in study.trials:
study_in_memory.add_trial(trial)
with open(path, "wb") as f:
pickle.dump(study_in_memory, f)
|
23,758 |
def create_package(conanfile, package_id, source_folder, build_folder, package_folder,
install_folder, hook_manager, conanfile_path, ref, local=False,
copy_info=False):
""" copies built artifacts, libs, headers, data, etc. from build_folder to
package folder
"""
mkdir(package_folder)
output = conanfile.output
# Make the copy of all the patterns
output.info("Generating the package")
output.info("Package folder %s" % package_folder)
try:
conanfile.package_folder = package_folder
conanfile.source_folder = source_folder
conanfile.install_folder = install_folder
conanfile.build_folder = build_folder
hook_manager.execute("pre_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
package_output = ScopedOutput("%s package()" % output.scope, output)
output.highlight("Calling package()")
folders = [source_folder, build_folder] if source_folder != build_folder else [build_folder]
conanfile.copy = FileCopier(folders, package_folder)
with conanfile_exception_formatter(str(conanfile), "package"):
with chdir(build_folder):
conanfile.package()
except Exception as e:
if not local:
os.chdir(build_folder)
try:
rmdir(package_folder)
except Exception as e_rm:
output.error("Unable to remove package folder %s\n%s" % (package_folder, str(e_rm)))
output.warn("**** Please delete it manually ****")
if isinstance(e, ConanExceptionInUserConanfileMethod):
raise
raise ConanException(e)
manifest = _create_aux_files(install_folder, package_folder, conanfile, copy_info)
_report_files_from_manifest(package_output, package_folder)
package_id = package_id or os.path.basename(package_folder)
output.success("Package '%s' created" % package_id)
hook_manager.execute("post_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
return manifest.summary_hash
|
def create_package(conanfile, package_id, source_folder, build_folder, package_folder,
install_folder, hook_manager, conanfile_path, ref, local=False,
copy_info=False):
""" copies built artifacts, libs, headers, data, etc. from build_folder to
package folder
"""
mkdir(package_folder)
output = conanfile.output
# Make the copy of all the patterns
output.info("Generating the package")
output.info("Package folder %s" % package_folder)
try:
conanfile.package_folder = package_folder
conanfile.source_folder = source_folder
conanfile.install_folder = install_folder
conanfile.build_folder = build_folder
hook_manager.execute("pre_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
package_output = ScopedOutput("%s package()" % output.scope, output)
output.highlight("Calling package()")
folders = [source_folder, build_folder] if source_folder != build_folder else [build_folder]
conanfile.copy = FileCopier(folders, package_folder)
with conanfile_exception_formatter(str(conanfile), "package"):
with chdir(build_folder):
conanfile.package()
except Exception as e:
if not local:
os.chdir(build_folder)
try:
rmdir(package_folder)
except Exception as e_rm:
output.error("Unable to remove package folder %s\n%s" % (package_folder, str(e_rm)))
output.warn("**** Please delete it manually ****")
if isinstance(e, ConanExceptionInUserConanfileMethod):
raise
raise ConanException(e)
digest = _create_aux_files(install_folder, package_folder, conanfile, copy_info)
_report_files_from_manifest(package_output, package_folder)
package_id = package_id or os.path.basename(package_folder)
output.success("Package '%s' created" % package_id)
hook_manager.execute("post_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
return manifest.summary_hash
|
11,070 |
def does_test_match_tags(test, tags, exclude_tags):
test_tags = set(getattr(test, 'tags', []))
test_fn_name = getattr(test, '_testMethodName', str(test))
if hasattr(test, test_fn_name):
test_fn = getattr(test, test_fn_name)
test_fn_tags = list(getattr(test_fn, 'tags', []))
test_tags = test_tags.union(test_fn_tags)
matched_tags = test_tags.intersection(tags)
return (
(matched_tags or not tags) and not test_tags.intersection(exclude_tags)
)
|
def does_test_match_tags(test, tags, exclude_tags):
test_tags = set(getattr(test, 'tags', []))
test_fn_name = getattr(test, '_testMethodName', str(test))
if hasattr(test, test_fn_name):
test_fn = getattr(test, test_fn_name)
test_fn_tags = list(getattr(test_fn, 'tags', []))
test_tags = test_tags.union(test_fn_tags)
matched_tags = test_tags.intersection(tags)
return (matched_tags or not tags) and not test_tags.intersection(exclude_tags)
|
31,577 |
def create_standard_domain_context(domain_data):
command_results = CommandResults(
outputs_prefix=f"Domain",
outputs_key_field="Name",
outputs=domain_data,
readable_output=tableToMarkdown(f"Domain(s):", domain_data)
)
return_results(command_results)
|
def create_standard_domain_context(domain_data):
command_results = CommandResults(
outputs_prefix="Domain",
outputs_key_field="Name",
outputs=domain_data,
readable_output=tableToMarkdown(f"Domain(s):", domain_data)
)
return_results(command_results)
|
9,911 |
def handle_request(url, timeout, data=None, method='GET'):
data = json.dumps(data)
response, info = fetch_url(module, url, data=data, method=method, timeout=timeout,
headers={'Content-Type': 'application/json'})
if info['status'] not in (200, 201, 204):
module.fail_json(msg=info['msg'])
body = response.read()
if body:
return json.loads(body)
else:
return {}
|
def handle_request(url, timeout, data=None, method='GET'):
data = json.dumps(data)
response, info = fetch_url(module, url, data=data, method=method, timeout=timeout,
headers={'Content-Type': 'application/json'})
if info['status'] not in (200, 201, 204):
module.fail_json(msg=info['msg'])
body = response.read()
if body:
ret = json.loads(body)
else:
return {}
|
23,811 |
def write_toolchain(conanfile, path, output):
if hasattr(conanfile, "toolchain"):
msg = ("\n*****************************************************************\n"
"******************************************************************\n"
"The 'toolchain' attribute or method has been deprecated.\n"
"It will be removed in next Conan release.\n"
"Use 'generators = ClassName' or 'generate()' method instead.\n"
"********************************************************************\n"
"********************************************************************\n")
output.warn(msg)
warnings.warn(msg)
output.highlight("Generating toolchain files")
if callable(conanfile.toolchain):
# This is the toolchain
with chdir(path):
with conanfile_exception_formatter(str(conanfile), "toolchain"):
conanfile.toolchain()
else:
try:
toolchain = {"cmake": CMakeToolchain}[conanfile.toolchain]
except KeyError:
raise ConanException("Unknown toolchain '%s'" % conanfile.toolchain)
tc = toolchain(conanfile)
with chdir(path):
tc.generate()
# TODO: Lets discuss what to do with the environment
if hasattr(conanfile, "generate"):
assert callable(conanfile.generate), "generate should be a method, not an attribute"
output.highlight("Calling generate()")
with chdir(path):
with conanfile_exception_formatter(str(conanfile), "generate"):
conanfile.generate()
|
def write_toolchain(conanfile, path, output):
if hasattr(conanfile, "toolchain"):
msg = ("\n*****************************************************************\n"
"******************************************************************\n"
"The 'toolchain' attribute or method has been deprecated.\n"
"It will be removed in next Conan release.\n"
"Use 'generators = \"ClassName\"' or 'generate()' method instead.\n"
"********************************************************************\n"
"********************************************************************\n")
output.warn(msg)
warnings.warn(msg)
output.highlight("Generating toolchain files")
if callable(conanfile.toolchain):
# This is the toolchain
with chdir(path):
with conanfile_exception_formatter(str(conanfile), "toolchain"):
conanfile.toolchain()
else:
try:
toolchain = {"cmake": CMakeToolchain}[conanfile.toolchain]
except KeyError:
raise ConanException("Unknown toolchain '%s'" % conanfile.toolchain)
tc = toolchain(conanfile)
with chdir(path):
tc.generate()
# TODO: Lets discuss what to do with the environment
if hasattr(conanfile, "generate"):
assert callable(conanfile.generate), "generate should be a method, not an attribute"
output.highlight("Calling generate()")
with chdir(path):
with conanfile_exception_formatter(str(conanfile), "generate"):
conanfile.generate()
|
31,925 |
def add_custom_fields(params):
global SNOW_ARGS
custom_fields = params.get('custom_fields')
custom_fields = custom_fields.split(",")
SNOW_ARGS += custom_fields
|
def add_custom_fields(params):
global SNOW_ARGS
custom_fields = argToList(params.get('custom_fields'))
SNOW_ARGS += custom_fields
|
42,859 |
def graph_embed(mat, max_mean_photon=1.0, make_traceless=True, tol=6):
r""" Given an symmetric adjacency matrix (that can be complex in general),
it returns the squeezing parameter and interferometer necessary for
implementing it in GBS.
Args:
mat (array): square symmetric complex (or real or integer) array
max_mean_photon (float): threshold value. It guarantees that the mode with
the largest squeezing has ``max_mean_photon`` as the mean photon number
i.e. :math:`sinh(r_{max})^2 == max_mean_photon`
make_traceless (boolean): removes the trace of the input matrix.
tol (int): the number of decimal places to check the input matrix is symmetric
Returns:
tuple(array, array): Tuple containing the squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = mat.shape
if m != n:
raise ValueError("The Matrix is not square")
if np.round(np.linalg.norm(mat-np.transpose(mat)), tol) != 0:
raise ValueError("The input matrix is not symmetric")
if make_traceless:
A = mat - np.trace(mat)*np.identity(n)/n
s, U = takagi(A)
sc = np.sqrt(1.0+1.0/max_mean_photon)
vals = -np.arctanh(s/(s[0]*sc))
return vals, U
|
def graph_embed(mat, max_mean_photon=1.0, make_traceless=True, tol=6):
r""" Given an symmetric adjacency matrix (that can be complex in general),
it returns the squeezing parameter and interferometer necessary for
implementing it in GBS.
Args:
mat (array): square symmetric complex (or real or integer) array
max_mean_photon (float): threshold value. It guarantees that the mode with
the largest squeezing has ``max_mean_photon`` as the mean photon number
i.e. :math:`sinh(r_{max})^2 == max_mean_photon`
make_traceless (boolean): removes the trace of the input matrix.
tol (int): the number of decimal places to check the input matrix is symmetric
Returns:
tuple(array, array): tuple containing the squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = mat.shape
if m != n:
raise ValueError("The Matrix is not square")
if np.round(np.linalg.norm(mat-np.transpose(mat)), tol) != 0:
raise ValueError("The input matrix is not symmetric")
if make_traceless:
A = mat - np.trace(mat)*np.identity(n)/n
s, U = takagi(A)
sc = np.sqrt(1.0+1.0/max_mean_photon)
vals = -np.arctanh(s/(s[0]*sc))
return vals, U
|
54,922 |
def parse_args():
parser = argparse.ArgumentParser(
description='Question Answering example. '
'We fine-tune the pretrained model on SQuAD dataset.')
parser.add_argument('--model_name', type=str, default='google_albert_base_v2',
help='Name of the pretrained model.')
parser.add_argument('--do_train', action='store_true',
help='Whether to train the model')
parser.add_argument('--do_eval', action='store_true',
help='Whether to evaluate the model')
parser.add_argument('--data_dir', type=str, default='squad')
parser.add_argument('--version', default='2.0', choices=['1.1', '2.0'],
help='Version of the SQuAD dataset.')
parser.add_argument('--output_dir', type=str, default='squad_out',
help='The output directory where the model params will be written.'
' default is squad_out')
# Communication
parser.add_argument('--comm_backend', type=str, default='device',
choices=['horovod', 'dist_sync_device', 'device'],
help='Communication backend.')
parser.add_argument('--gpus', type=str, default='0',
help='list of gpus to run, e.g. 0 or 0,2,5. -1 means using cpu.')
# Training hyperparameters
parser.add_argument('--seed', type=int, default=100, help='Random seed')
parser.add_argument('--log_interval', type=int, default=50,
help='The logging interval for training')
parser.add_argument('--eval_log_interval', type=int, default=10,
help='The logging interval for evaluation')
parser.add_argument('--save_interval', type=int, default=None,
help='the number of steps to save model parameters.'
'default is every epoch')
parser.add_argument('--epochs', type=float, default=3.0,
help='Number of epochs, default is 3')
parser.add_argument('--num_train_steps', type=int, default=None,
help='The number of training steps. Note that epochs will be ignored '
'if training steps are set')
parser.add_argument('--batch_size', type=int, default=8,
help='Batch size. Number of examples per gpu in a minibatch. default is 32')
parser.add_argument('--eval_batch_size', type=int, default=16,
help='Evaluate batch size. Number of examples per gpu in a minibatch for '
'evaluation.')
parser.add_argument('--max_grad_norm', type=float, default=1.0,
help='Max gradient norm.')
parser.add_argument('--optimizer', type=str, default='adamw',
help='optimization algorithm. default is adamw')
parser.add_argument('--adam_epsilon', type=float, default=1e-6,
help='epsilon of AdamW optimizer')
parser.add_argument('--adam_betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--num_accumulated', type=int, default=1,
help='The number of batches for gradients accumulation to '
'simulate large batch size.')
parser.add_argument('--lr', type=float, default=2e-5,
help='Initial learning rate. default is 2e-5')
parser.add_argument('--warmup_ratio', type=float, default=0.1,
help='Ratio of warmup steps in the learning rate scheduler.')
parser.add_argument('--warmup_steps', type=int, default=None,
help='warmup steps. Note that either warmup_steps or warmup_ratio is set.')
parser.add_argument('--wd', type=float, default=0.01, help='weight decay')
parser.add_argument('--layerwise_decay', type=float, default=-1, help='Layer-wise lr decay')
parser.add_argument('--untunable_depth', type=float, default=-1,
help='Depth of untunable parameters')
parser.add_argument('--classifier_dropout', type=float, default=0.1,
help='dropout of classifier')
# Data pre/post processing
parser.add_argument('--max_seq_length', type=int, default=512,
help='The maximum total input sequence length after tokenization.'
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded. default is 512')
parser.add_argument('--doc_stride', type=int, default=128,
help='When splitting up a long document into chunks, how much stride to '
'take between chunks. default is 128')
parser.add_argument('--max_query_length', type=int, default=64,
help='The maximum number of tokens for the query. Questions longer than '
'this will be truncated to this length. default is 64')
parser.add_argument('--pre_shuffle_seed', type=int, default=100,
help='Random seed for pre split shuffle')
parser.add_argument('--round_to', type=int, default=None,
help='The length of padded sequences will be rounded up to be multiple'
' of this argument. When round to is set to 8, training throughput '
'may increase for mixed precision training on GPUs with TensorCores.')
parser.add_argument('--overwrite_cache', action='store_true',
help='Whether to overwrite the feature cache.')
# Evaluation hyperparameters
parser.add_argument('--start_top_n', type=int, default=5,
help='Number of start-position candidates')
parser.add_argument('--end_top_n', type=int, default=5,
help='Number of end-position candidates corresponding '
'to a start position')
parser.add_argument('--n_best_size', type=int, default=20, help='Top N results written to file')
parser.add_argument('--max_answer_length', type=int, default=30,
help='The maximum length of an answer that can be generated. This is '
'needed because the start and end predictions are not conditioned '
'on one another. default is 30')
parser.add_argument('--param_checkpoint', type=str, default=None,
help='The parameter checkpoint for evaluating the model')
parser.add_argument('--backbone_path', type=str, default=None,
help='The parameter checkpoint of backbone model')
parser.add_argument('--all_evaluate', action='store_true',
help='Whether to evaluate all intermediate checkpoints '
'instead of only last one')
parser.add_argument('--max_saved_ckpt', type=int, default=5,
help='The maximum number of saved checkpoints')
parser.add_argument('--dtype', type=str, default='float32',
help='Data type used for evaluation. Either float32 or float16. When you '
'use --dtype float16, amp will be turned on in the training phase and '
'fp16 will be used in evaluation.')
args = parser.parse_args()
assert args.doc_stride <= args.max_seq_length - args.max_query_length - SEPARATORS, 'Possible loss of data while chunking input features'
return args
|
def parse_args():
parser = argparse.ArgumentParser(
description='Question Answering example. '
'We fine-tune the pretrained model on SQuAD dataset.')
parser.add_argument('--model_name', type=str, default='google_albert_base_v2',
help='Name of the pretrained model.')
parser.add_argument('--do_train', action='store_true',
help='Whether to train the model')
parser.add_argument('--do_eval', action='store_true',
help='Whether to evaluate the model')
parser.add_argument('--data_dir', type=str, default='squad')
parser.add_argument('--version', default='2.0', choices=['1.1', '2.0'],
help='Version of the SQuAD dataset.')
parser.add_argument('--output_dir', type=str, default='squad_out',
help='The output directory where the model params will be written.'
' default is squad_out')
# Communication
parser.add_argument('--comm_backend', type=str, default='device',
choices=['horovod', 'dist_sync_device', 'device'],
help='Communication backend.')
parser.add_argument('--gpus', type=str, default='0',
help='list of gpus to run, e.g. 0 or 0,2,5. -1 means using cpu.')
# Training hyperparameters
parser.add_argument('--seed', type=int, default=100, help='Random seed')
parser.add_argument('--log_interval', type=int, default=50,
help='The logging interval for training')
parser.add_argument('--eval_log_interval', type=int, default=10,
help='The logging interval for evaluation')
parser.add_argument('--save_interval', type=int, default=None,
help='the number of steps to save model parameters.'
'default is every epoch')
parser.add_argument('--epochs', type=float, default=3.0,
help='Number of epochs, default is 3')
parser.add_argument('--num_train_steps', type=int, default=None,
help='The number of training steps. Note that epochs will be ignored '
'if training steps are set')
parser.add_argument('--batch_size', type=int, default=8,
help='Batch size. Number of examples per gpu in a minibatch. default is 32')
parser.add_argument('--eval_batch_size', type=int, default=16,
help='Evaluate batch size. Number of examples per gpu in a minibatch for '
'evaluation.')
parser.add_argument('--max_grad_norm', type=float, default=1.0,
help='Max gradient norm.')
parser.add_argument('--optimizer', type=str, default='adamw',
help='optimization algorithm. default is adamw')
parser.add_argument('--adam_epsilon', type=float, default=1e-6,
help='epsilon of AdamW optimizer')
parser.add_argument('--adam_betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--num_accumulated', type=int, default=1,
help='The number of batches for gradients accumulation to '
'simulate large batch size.')
parser.add_argument('--lr', type=float, default=2e-5,
help='Initial learning rate. default is 2e-5')
parser.add_argument('--warmup_ratio', type=float, default=0.1,
help='Ratio of warmup steps in the learning rate scheduler.')
parser.add_argument('--warmup_steps', type=int, default=None,
help='warmup steps. Note that either warmup_steps or warmup_ratio is set.')
parser.add_argument('--wd', type=float, default=0.01, help='weight decay')
parser.add_argument('--layerwise_decay', type=float, default=-1, help='Layer-wise lr decay')
parser.add_argument('--untunable_depth', type=float, default=-1,
help='Depth of untunable parameters')
parser.add_argument('--classifier_dropout', type=float, default=0.1,
help='dropout of classifier')
# Data pre/post processing
parser.add_argument('--max_seq_length', type=int, default=512,
help='The maximum total input sequence length after tokenization.'
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded. default is 512')
parser.add_argument('--doc_stride', type=int, default=128,
help='When splitting up a long document into chunks, how much stride to '
'take between chunks. default is 128')
parser.add_argument('--max_query_length', type=int, default=64,
help='The maximum number of tokens for the query. Questions longer than '
'this will be truncated to this length. default is 64')
parser.add_argument('--pre_shuffle_seed', type=int, default=100,
help='Random seed for pre split shuffle')
parser.add_argument('--round_to', type=int, default=None,
help='The length of padded sequences will be rounded up to be multiple'
' of this argument. When round to is set to 8, training throughput '
'may increase for mixed precision training on GPUs with TensorCores.')
parser.add_argument('--overwrite_cache', action='store_true',
help='Whether to overwrite the feature cache.')
# Evaluation hyperparameters
parser.add_argument('--start_top_n', type=int, default=5,
help='Number of start-position candidates')
parser.add_argument('--end_top_n', type=int, default=5,
help='Number of end-position candidates corresponding '
'to a start position')
parser.add_argument('--n_best_size', type=int, default=20, help='Top N results written to file')
parser.add_argument('--max_answer_length', type=int, default=30,
help='The maximum length of an answer that can be generated. This is '
'needed because the start and end predictions are not conditioned '
'on one another. default is 30')
parser.add_argument('--param_checkpoint', type=str, default=None,
help='The parameter checkpoint for evaluating the model')
parser.add_argument('--backbone_path', type=str, default=None,
help='The parameter checkpoint of backbone model')
parser.add_argument('--all_evaluate', action='store_true',
help='Whether to evaluate all intermediate checkpoints '
'instead of only last one')
parser.add_argument('--max_saved_ckpt', type=int, default=5,
help='The maximum number of saved checkpoints')
parser.add_argument('--dtype', type=str, default='float32',
help='Data type used for evaluation. Either float32 or float16. When you '
'use --dtype float16, amp will be turned on in the training phase and '
'fp16 will be used in evaluation.')
args = parser.parse_args()
assert args.doc_stride <= args.max_seq_length - args.max_query_length - SEPARATORS, \
'Possible loss of data while chunking input features'
return args
|
27,419 |
def json_packer(obj):
try:
return json.dumps(
obj,
default=json_default,
ensure_ascii=False,
allow_nan=False,
).encode("utf8")
except ValueError as e:
# Fallback to trying to clean the json before serializing
warnings.warn(
f"Message serialization failed with:\n{e}\n"
"Supporting this message is deprecated, please make "
"sure your message is JSON-compliant",
stacklevel=2,
)
return json.dumps(
json_clean(obj),
default=json_default,
ensure_ascii=False,
allow_nan=False,
).encode("utf8")
|
def json_packer(obj):
try:
return json.dumps(
obj,
default=json_default,
ensure_ascii=False,
allow_nan=False,
).encode("utf8")
except ValueError as e:
# Fallback to trying to clean the json before serializing
warnings.warn(
f"Message serialization failed with:\n{e}\n"
"Supporting this message is deprecated in jupyter-client 7, please make "
"sure your message is JSON-compliant",
stacklevel=2,
)
return json.dumps(
json_clean(obj),
default=json_default,
ensure_ascii=False,
allow_nan=False,
).encode("utf8")
|
35,925 |
def make_frontier_receipt(base_header: BlockHeaderAPI,
transaction: SignedTransactionAPI,
computation: ComputationAPI,
gas_used: int) -> ReceiptAPI:
# Reusable for other forks
# This skips setting the state root (set to 0 instead). The logic for making a state root
# lives in the FrontierVM, so that state merkelization at each receipt is skipped at Byzantium+.
logs = [
Log(address, topics, data)
for address, topics, data
in computation.get_log_entries()
]
receipt = Receipt(
state_root=ZERO_HASH32,
gas_used=gas_used,
logs=logs,
)
return receipt
|
def make_frontier_receipt(base_header: BlockHeaderAPI,
transaction: SignedTransactionAPI,
computation: ComputationAPI,
new_cumulative_gas_used: int) -> ReceiptAPI:
# Reusable for other forks
# This skips setting the state root (set to 0 instead). The logic for making a state root
# lives in the FrontierVM, so that state merkelization at each receipt is skipped at Byzantium+.
logs = [
Log(address, topics, data)
for address, topics, data
in computation.get_log_entries()
]
receipt = Receipt(
state_root=ZERO_HASH32,
gas_used=gas_used,
logs=logs,
)
return receipt
|
25,751 |
def linexpr(*tuples, as_pandas=False, return_axes=False):
"""
Elementwise concatenation of tuples in the form (coefficient, variables).
Coefficient and variables can be arrays, series or frames. Returns
a np.ndarray of strings. If return_axes is set to True and a pd.Series or
pd.DataFrame was past, the corresponding index (and column if existent) is
returned additionaly.
Parameters
----------
tulples: tuple of tuples
Each tuple must of the form (coeff, var), where
* coeff is a numerical value, or a numeical array, series, frame
* var is a str or a array, series, frame of variable strings
as_pandas : bool, default False
Whether to return to resulting array as a series, if 1-dimensional, or
a frame, if 2-dimensional. Supersedes return_axes argument.
return_axes: Boolean, default False
Whether to return index and column (if existent)
Example
-------
Initialize coefficients and variables
>>> coeff1 = 1
>>> var1 = pd.Series(['a1', 'a2', 'a3'])
>>> coeff2 = pd.Series([-0.5, -0.3, -1])
>>> var2 = pd.Series(['b1', 'b2', 'b3'])
Create the linear expression strings
>>> linexpr((coeff1, var1), (coeff2, var2))
array(['+1.0 a1 -0.5 b1', '+1.0 a2 -0.3 b2', '+1.0 a3 -1.0 b3'], dtype=object)
For turning the result into a series or frame again:
>>> linexpr((coeff1, var1), (coeff2, var2), as_pandas=True)
0 +1.0 a1 -0.5 b1
1 +1.0 a2 -0.3 b2
2 +1.0 a3 -1.0 b3
dtype: object
For a further step the resulting frame can be used as the lhs of
:func:`pypsa.linopt.write_contraint`
"""
axes, shape = broadcasted_axes(*tuples)
expr = np.repeat('', np.prod(shape)).reshape(shape).astype(object)
if np.prod(shape):
for coeff, var in tuples:
expr += _str_array(coeff) + _str_array(var) + '\n'
if as_pandas:
twodims = len(shape) > 1
return pd.DataFrame(expr, *axes) if twodims else pd.Series(expr, *axes)
if return_axes:
return (expr, *axes)
return expr
|
def linexpr(*tuples, as_pandas=False, return_axes=False):
"""
Elementwise concatenation of tuples in the form (coefficient, variables).
Coefficient and variables can be arrays, series or frames. Returns
a np.ndarray of strings. If return_axes is set to True and a pd.Series or
pd.DataFrame was past, the corresponding index (and column if existent) is
returned additionaly.
Parameters
----------
tulples: tuple of tuples
Each tuple must of the form (coeff, var), where
* coeff is a numerical value, or a numerical array, series, frame
* var is a str or a array, series, frame of variable strings
as_pandas : bool, default False
Whether to return to resulting array as a series, if 1-dimensional, or
a frame, if 2-dimensional. Supersedes return_axes argument.
return_axes: Boolean, default False
Whether to return index and column (if existent)
Example
-------
Initialize coefficients and variables
>>> coeff1 = 1
>>> var1 = pd.Series(['a1', 'a2', 'a3'])
>>> coeff2 = pd.Series([-0.5, -0.3, -1])
>>> var2 = pd.Series(['b1', 'b2', 'b3'])
Create the linear expression strings
>>> linexpr((coeff1, var1), (coeff2, var2))
array(['+1.0 a1 -0.5 b1', '+1.0 a2 -0.3 b2', '+1.0 a3 -1.0 b3'], dtype=object)
For turning the result into a series or frame again:
>>> linexpr((coeff1, var1), (coeff2, var2), as_pandas=True)
0 +1.0 a1 -0.5 b1
1 +1.0 a2 -0.3 b2
2 +1.0 a3 -1.0 b3
dtype: object
For a further step the resulting frame can be used as the lhs of
:func:`pypsa.linopt.write_contraint`
"""
axes, shape = broadcasted_axes(*tuples)
expr = np.repeat('', np.prod(shape)).reshape(shape).astype(object)
if np.prod(shape):
for coeff, var in tuples:
expr += _str_array(coeff) + _str_array(var) + '\n'
if as_pandas:
twodims = len(shape) > 1
return pd.DataFrame(expr, *axes) if twodims else pd.Series(expr, *axes)
if return_axes:
return (expr, *axes)
return expr
|
14,339 |
def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: int = 0,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: If version == 0, a map of base glyph name to, either list of
(layer glyph name, color palette index) tuples.
If version >0, a list of BaseGlyphV1Record or tuples/dicts from which
BaseGlyphV1Record can be assembled.
version: the version of COLR table.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
Return:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
colr = ot.COLR()
if version == 0:
populateCOLRv0(colr, colorGlyphs, glyphMap)
else:
colr.BaseGlyphRecordCount = colr.LayerRecordCount = 0
colr.BaseGlyphRecordArray = colr.LayerRecordArray = None
colr.LayerV1List, colr.BaseGlyphV1List = buildColrV1(colorGlyphs, glyphMap)
self.version = colr.Version = version
if version == 0:
self._fromOTTable(colr)
else:
colr.VarStore = varStore
self.table = colr
return self
|
def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: int = 0,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: If version == 0, a map of base glyph name to, either list of
(layer glyph name, color palette index) tuples.
If version >0, a dict of BaseGlyphV1Record or tuples/dicts from which
BaseGlyphV1Record can be assembled.
version: the version of COLR table.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
Return:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
colr = ot.COLR()
if version == 0:
populateCOLRv0(colr, colorGlyphs, glyphMap)
else:
colr.BaseGlyphRecordCount = colr.LayerRecordCount = 0
colr.BaseGlyphRecordArray = colr.LayerRecordArray = None
colr.LayerV1List, colr.BaseGlyphV1List = buildColrV1(colorGlyphs, glyphMap)
self.version = colr.Version = version
if version == 0:
self._fromOTTable(colr)
else:
colr.VarStore = varStore
self.table = colr
return self
|
46,759 |
def test_rule_objects(schema_obj):
"""Ensure that all objects referenced in the schema rules are defined in
its object portion.
This test currently fails because rules files reference object keys for some object types,
including entities, columns, and metadata fields,
but reference "name" or "value" elements of the object definitions for other object types,
including suffixes and extensions.
In the case of datatypes, the key and "value" field are always the same.
Some other object types, such as associated_data, common_principles, formats, modalities,
and top_level_files, are not checked in the rules at all.
Additionally, this test only checks rules that fit the keys.
"""
OBJECT_TYPE_MAPPER = {
"metadata": "fields", # metadata in objects is referred to as fields in rules
}
not_found = [] # A list of undefined, but referenced, objects
object_types = list(schema_obj["objects"].keys())
for object_type in object_types:
# Find all uses of a given object type in the schema rules
type_instances_in_rules = _dict_key_lookup(
schema_obj["rules"],
OBJECT_TYPE_MAPPER.get(object_type, object_type),
)
if not type_instances_in_rules:
continue
for type_instance in type_instances_in_rules:
path, instance = type_instance
is_list = True
if isinstance(instance, dict):
instance = list(instance.keys())
is_list = False
for i_use, use in enumerate(instance):
if use == "derivatives":
# Skip derivatives folders, because the folder is treated as a "use" instead.
continue
elif "[]" in use:
# Rules may reference metadata fields with lists.
# This test can't handle this yet, so skip.
continue
elif "{}" in use:
# Rules may reference sub-dictionaries in metadata fields.
# This test can't handle this yet, so skip.
continue
if object_type in ["extensions", "suffixes"]:
# Some object types are referenced via their "value" fields in the rules
object_values = [
schema_obj["objects"][object_type][k]["value"]
for k in schema_obj["objects"][object_type].keys()
]
else:
# But other object types are referenced via their keys
object_values = list(schema_obj["objects"][object_type].keys())
# Build a list of items mentioned in rules, but not found in objects.
if use not in object_values:
temp_path = path[:]
if is_list:
temp_path[-1] += f"[{i_use}]"
temp_path.append(use)
not_found.append(temp_path)
if not_found:
not_found_string = "\n".join(
[".".join(sublist[:-1]) + " == " + sublist[-1] for sublist in not_found]
)
raise Exception(not_found_string)
|
def test_rule_objects(schema_obj):
"""Ensure that all objects referenced in the schema rules are defined in
its object portion.
This test currently fails because rules files reference object keys for some object types,
including entities, columns, and metadata fields,
but reference "name" or "value" elements of the object definitions for other object types,
including suffixes and extensions.
In the case of datatypes, the key and "value" field are always the same.
Some other object types, such as associated_data, common_principles, formats, modalities,
and top_level_files, are not checked in the rules at all.
Additionally, this test only checks rules that fit the keys.
"""
OBJECT_TYPE_MAPPER = {
"metadata": "fields", # metadata in objects is referred to as fields in rules
}
not_found = [] # A list of undefined, but referenced, objects
object_types = list(schema_obj["objects"].keys())
for object_type in object_types:
# Find all uses of a given object type in the schema rules
type_instances_in_rules = _dict_key_lookup(
schema_obj["rules"],
OBJECT_TYPE_MAPPER.get(object_type, object_type),
)
if not type_instances_in_rules:
continue
for type_instance in type_instances_in_rules:
path, instance = type_instance
is_list = True
if isinstance(instance, dict):
instance = list(instance.keys())
is_list = False
for i_use, use in enumerate(instance):
if use == "derivatives":
# Skip derivatives folders, because the folder is treated as a "use" instead.
continue
elif "[]" in use:
# Rules may reference metadata fields with lists.
# This test can't handle this yet, so skip.
continue
elif "{}" in use:
# Rules may reference sub-dictionaries in metadata fields.
# This test can't handle this yet, so skip.
continue
if object_type in ["extensions", "suffixes"]:
# Some object types are referenced via their "value" fields in the rules
object_values = [
schema_obj["objects"][object_type][k]["value"]
for k in schema_obj["objects"][object_type].keys()
]
else:
# But other object types are referenced via their keys
object_values = list(schema_obj["objects"][object_type].keys())
# Build a list of items mentioned in rules, but not found in objects.
if use not in object_values:
temp_path = path[:]
if is_list:
temp_path[-1] += f"[{i_use}]"
not_found.append((temp_path, use))
if not_found:
not_found_string = "\n".join(
[".".join(sublist[:-1]) + " == " + sublist[-1] for sublist in not_found]
)
raise Exception(not_found_string)
|
5,381 |
def sync_netapi(
saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None
):
"""
.. versionadded:: 3003
Sync netapi from ``salt://_netapi`` to the minion
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for netapi to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new netapis are synced. Set
to ``False`` to prevent this refresh.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Examples:
.. code-block:: bash
salt '*' saltutil.sync_netapi
salt '*' saltutil.sync_netapi saltenv=dev
"""
ret = _sync("netapi", saltenv, extmod_whitelist, extmod_blacklist)
if refresh:
refresh_modules()
return ret
|
def sync_netapi(
saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None
):
"""
.. versionadded:: 3004
Sync netapi from ``salt://_netapi`` to the minion
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for netapi to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new netapis are synced. Set
to ``False`` to prevent this refresh.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Examples:
.. code-block:: bash
salt '*' saltutil.sync_netapi
salt '*' saltutil.sync_netapi saltenv=dev
"""
ret = _sync("netapi", saltenv, extmod_whitelist, extmod_blacklist)
if refresh:
refresh_modules()
return ret
|
58,361 |
def parse_transforms(transforms_in, validate=True,level="run"):
""" Adapted from bids.modeling.statsmodels.BIDSStatsModelsGraph. Also
handles files/jsons that only define the transformations section of the
model.json """
# input is JSON as a file or dict
if isinstance(transforms_in, str):
if not Path(transforms_in).exists():
raise ValueError(f"Cannot find path: {transforms_in}")
with open(transforms_in, 'r', encoding='utf-8') as fobj:
transforms_raw = json.load(fobj)
else:
transforms_raw = transforms_in
# Convert JSON from CamelCase to snake_case keys
transforms_raw = convert_JSON(transforms_raw)
if validate:
# TODO
# validate_transforms(transforms_raw)
pass
# Process transformations
# TODO: some basic error checking to confirm the correct level of
# transformations has been obtained. This will most likely be the case since
# transformations at higher levels will no longer be required when the new
# "flow" approach is used.
if "transformations" in transforms_raw:
transforms = transforms_raw["transformations"]
elif any(k in transforms_raw for k in ["nodes","steps"]):
nodes_key = "nodes" if "nodes" in transforms_raw else "steps"
transforms = transforms_raw[nodes_key][0]["transformations"]
else:
raise ValueError("Cannot find a key for nodes in the json input representing the model")
return transforms
|
def parse_transforms(transforms_in, validate=True,level="run"):
""" Adapted from bids.modeling.statsmodels.BIDSStatsModelsGraph. Also
handles files/jsons that only define the transformations section of the
model.json """
# input is JSON as a file or dict
transforms_raw = transforms_in
if isinstance(transforms_raw, str):
if Path(transforms_raw).exists():
transforms_raw = Path(transforms_raw).load_text()
transforms_raw = json.loads(transforms_raw)
# Convert JSON from CamelCase to snake_case keys
transforms_raw = convert_JSON(transforms_raw)
if validate:
# TODO
# validate_transforms(transforms_raw)
pass
# Process transformations
# TODO: some basic error checking to confirm the correct level of
# transformations has been obtained. This will most likely be the case since
# transformations at higher levels will no longer be required when the new
# "flow" approach is used.
if "transformations" in transforms_raw:
transforms = transforms_raw["transformations"]
elif any(k in transforms_raw for k in ["nodes","steps"]):
nodes_key = "nodes" if "nodes" in transforms_raw else "steps"
transforms = transforms_raw[nodes_key][0]["transformations"]
else:
raise ValueError("Cannot find a key for nodes in the json input representing the model")
return transforms
|
5,442 |
def test_write_with_user():
"""
Test writing a default setting with a specific user
"""
mock = MagicMock()
with patch.dict(macdefaults.__salt__, {"cmd.run_all": mock}):
macdefaults.write(
"com.apple.CrashReporter", "DialogType", "Server", user="frank"
)
mock.assert_called_once_with(
'defaults write "com.apple.CrashReporter" "DialogType" -string' ' "Server"',
runas="frank",
)
|
def test_write_with_user():
"""
Test writing a default setting with a specific user
"""
mock = MagicMock()
with patch.dict(macdefaults.__salt__, {"cmd.run_all": mock}):
macdefaults.write(
"com.apple.CrashReporter", "DialogType", "Server", user="frank"
)
mock.assert_called_once_with(
'defaults write "com.apple.CrashReporter" "DialogType" -string "Server"',
runas="frank",
)
|
57,548 |
def _parse_span(span):
"""Parses the span protobuf into a dict that BigQuery can read.
Args:
span (google.cloud.trace_v1.TraceSpan): The trace span.
Returns:
A dict containing span data to be written to BigQuery.
"""
span_dict = {
'name': span.name,
'parent_span_id': span.parent_span_id,
'start_time_micros': span.start_time.ToMicroseconds(),
'end_time_micros': span.end_time.ToMicroseconds(),
'span_id': span.span_id,
'milliseconds':
(span.end_time.ToMicroseconds() -
span.start_time.ToMicroseconds()) / 1000.0,
'labels': []
}
# Span labels need to be converted from 'label_key': 'label_value' to
# 'key': 'label_key', 'value': 'label_value'.
for key, value in span.labels.items():
span_dict['labels'].append({
'key': key,
'value': value
})
return span_dict
|
def _parse_span(span):
"""Parses the span protobuf into a dict that BigQuery can read.
Args:
span (google.cloud.trace_v1.TraceSpan): The trace span.
Returns:
A dict containing span data to be written to BigQuery.
"""
span_dict = {
'name': span.name,
'parent_span_id': span.parent_span_id,
'start_time_micros': span.start_time.ToMicroseconds(),
'end_time_micros': span.end_time.ToMicroseconds(),
'span_id': span.span_id,
'milliseconds':
(span.end_time.ToMicroseconds() -
span.start_time.ToMicroseconds()) / 1000.0,
'labels': [{'key': key, 'value': value}
for key, value in span.labels.items()]
}
# Span labels need to be converted from 'label_key': 'label_value' to
# 'key': 'label_key', 'value': 'label_value'.
for key, value in span.labels.items():
span_dict['labels'].append({
'key': key,
'value': value
})
return span_dict
|
13,364 |
def parse_fstat(fstat, dirs):
# fstat output is separated by newlines
fstat = fstat.split('\n')
# first line is the column headers
fstat.pop(0)
# comprehension does a few things
# 1. remove any empty lines
# 2. remove any lines that dont have
# at least 5 columns. The 5th
# column is the path associated
# with the process.
# 3. remove the g_eli,g_mirror information
# from the output as its not needed
# and ends up shifting those lines
# quite a bit since the thread names
# have spaces in their names
# 4. remove any lines that don't have a traditional
# "path" since the path component for processes
# can be a stream/socket/pipe etc
fstat = [
i for i in fstat if i and len(i.split()) >= 5 and not i.split()[1].startswith(('g_eli', 'g_mirror')) and '/' in i.split()[4]
]
pids = {}
pid = command = None
for line in fstat:
line = line.split()
pid = command = None
try:
pid = int(line[2])
except ValueError:
pass
command = line[1]
if os.path.isabs(line[4]) and any(os.path.commonpath([line[4], dir]) == dir for dir in dirs):
if pid is not None and command is not None:
pids[pid] = command
return list(pids.items())
|
def parse_fstat(fstat, dirs):
# fstat output is separated by newlines
fstat = fstat.split('\n')
# first line is the column headers
fstat.pop(0)
# comprehension does a few things
# 1. remove any empty lines
# 2. remove any lines that dont have
# at least 5 columns. The 5th
# column is the path associated
# with the process.
# 3. remove the g_eli,g_mirror information
# from the output as its not needed
# and ends up shifting those lines
# quite a bit since the thread names
# have spaces in their names
# 4. remove any lines that don't have a traditional
# "path" since the path component for processes
# can be a stream/socket/pipe etc
fstat = [
i for i in fstat if i and len(i.split()) >= 5 and not i.split()[1].startswith(('g_eli', 'g_mirror')) and '/' in i.split()[4]
]
pids = {}
pid = command = None
for line in fstat:
pid = command = None
try:
pid = int(line[2])
except ValueError:
pass
command = line[1]
if os.path.isabs(line[4]) and any(os.path.commonpath([line[4], dir]) == dir for dir in dirs):
if pid is not None and command is not None:
pids[pid] = command
return list(pids.items())
|
6,386 |
def create_appointments(number):
for i in range(1, number):
frappe.get_doc({
'doctype': 'Appointment',
'scheduled_time': datetime.datetime.min,
'customer_name': 'Test Customer'+str(i),
'customer_phone_number': '8088',
'customer_skype': 'test'+str(i),
})
|
def create_appointments(number):
for i in range(1, number):
frappe.get_doc({
'doctype': 'Appointment',
'scheduled_time': datetime.datetime.min,
'customer_name': 'Test Customer' + str(count),
'customer_phone_number': '8088',
'customer_skype': 'test'+str(i),
})
|
59,599 |
def download(url: str, dest: Path) -> None:
print(f'+ Download {url} to {dest}')
dest_dir = dest.parent
if not dest_dir.exists():
dest_dir.mkdir(parents=True)
cafile = os.environ.get('SSL_CERT_FILE', certifi.where())
context = ssl.create_default_context(cafile=cafile)
repeat_num = 3
for i in range(repeat_num):
try:
response = urllib.request.urlopen(url, context=context)
except Exception:
if i == repeat_num - 1:
raise
sleep(3)
continue
break
try:
dest.write_bytes(response.read())
finally:
response.close()
|
def download(url: str, dest: Path) -> None:
print(f'+ Download {url} to {dest}')
dest_dir = dest.parent
if not dest_dir.exists():
dest_dir.mkdir(parents=True)
# we've had issues when relying on the host OS' CA certificates on Windows,
# so we use certifi (this sounds odd but requests also does this by default)
cafile = os.environ.get('SSL_CERT_FILE', certifi.where())
context = ssl.create_default_context(cafile=cafile)
repeat_num = 3
for i in range(repeat_num):
try:
response = urllib.request.urlopen(url, context=context)
except Exception:
if i == repeat_num - 1:
raise
sleep(3)
continue
break
try:
dest.write_bytes(response.read())
finally:
response.close()
|
7,392 |
def _moments_raw_to_central_fast(moments_raw):
"""Analytical equations for 2D and 3D central moments with order < 4.
`moments_raw_to_central` will automatically call this function when
possible.
Parameters
----------
moments_raw : ndarray
The raw moments.
Returns
-------
moments_central : ndarray
The central moments.
"""
ndim = moments_raw.ndim
order = moments_raw.shape[0] - 1
float_dtype = moments_raw.dtype
# convert to float64 during the computation for better accuracy
moments_raw = moments_raw.astype(np.float64, copy=False)
moments_central = np.zeros_like(moments_raw)
if order >= 4 or ndim not in [2, 3]:
raise ValueError(
"This function only supports 2D or 3D moments with order < 4"
)
m = moments_raw
if ndim == 2:
cx = m[1, 0] / m[0, 0]
cy = m[0, 1] / m[0, 0]
moments_central[0, 0] = m[0, 0]
# Note: 1st order moments are both 0
if order > 1:
# 2nd order moments
moments_central[1, 1] = m[1, 1] - cx*m[0, 1]
moments_central[2, 0] = m[2, 0] - cx*m[1, 0]
moments_central[0, 2] = m[0, 2] - cy*m[0, 1]
if order > 2:
# 3rd order moments
moments_central[2, 1] = (m[2, 1] - 2*cx*m[1, 1] - cy*m[2, 0]
+ cx**2*m[0, 1] + cy*cx*m[1, 0])
moments_central[1, 2] = (m[1, 2] - 2*cy*m[1, 1] - cx*m[0, 2]
+ 2*cy*cx*m[0, 1])
moments_central[3, 0] = m[3, 0] - 3*cx*m[2, 0] + 2*cx**2*m[1, 0]
moments_central[0, 3] = m[0, 3] - 3*cy*m[0, 2] + 2*cy**2*m[0, 1]
else:
# 3D case
cx = m[1, 0, 0] / m[0, 0, 0]
cy = m[0, 1, 0] / m[0, 0, 0]
cz = m[0, 0, 1] / m[0, 0, 0]
moments_central[0, 0, 0] = m[0, 0, 0]
# Note: all first order moments are 0
if order > 1:
# 2nd order moments
moments_central[0, 0, 2] = -cz*m[0, 0, 1] + m[0, 0, 2]
moments_central[0, 1, 1] = -cy*m[0, 0, 1] + m[0, 1, 1]
moments_central[0, 2, 0] = -cy*m[0, 1, 0] + m[0, 2, 0]
moments_central[1, 0, 1] = -cx*m[0, 0, 1] + m[1, 0, 1]
moments_central[1, 1, 0] = -cx*m[0, 1, 0] + m[1, 1, 0]
moments_central[2, 0, 0] = -cx*m[1, 0, 0] + m[2, 0, 0]
if order > 2:
# 3rd order moments
moments_central[0, 0, 3] = (2*cz**2*m[0, 0, 1]
- 3*cz*m[0, 0, 2]
+ m[0, 0, 3])
moments_central[0, 1, 2] = (-cy*m[0, 0, 2]
+ 2*cz*(cy*m[0, 0, 1] - m[0, 1, 1])
+ m[0, 1, 2])
moments_central[0, 2, 1] = (cy**2*m[0, 0, 1] - 2*cy*m[0, 1, 1]
+ cz*(cy*m[0, 1, 0] - m[0, 2, 0])
+ m[0, 2, 1])
moments_central[0, 3, 0] = (2*cy**2*m[0, 1, 0]
- 3*cy*m[0, 2, 0]
+ m[0, 3, 0])
moments_central[1, 0, 2] = (-cx*m[0, 0, 2]
+ 2*cz*(cx*m[0, 0, 1] - m[1, 0, 1])
+ m[1, 0, 2])
moments_central[1, 1, 1] = (-cx*m[0, 1, 1]
+ cy*(cx*m[0, 0, 1] - m[1, 0, 1])
+ cz*(cx*m[0, 1, 0] - m[1, 1, 0])
+ m[1, 1, 1])
moments_central[1, 2, 0] = (-cx*m[0, 2, 0]
- 2*cy*(-cx*m[0, 1, 0] + m[1, 1, 0])
+ m[1, 2, 0])
moments_central[2, 0, 1] = (cx**2*m[0, 0, 1]
- 2*cx*m[1, 0, 1]
+ cz*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 0, 1])
moments_central[2, 1, 0] = (cx**2*m[0, 1, 0]
- 2*cx*m[1, 1, 0]
+ cy*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 1, 0])
moments_central[3, 0, 0] = (2*cx**2*m[1, 0, 0]
- 3*cx*m[2, 0, 0]
+ m[3, 0, 0])
return moments_central.astype(float_dtype, copy=False)
|
def _moments_raw_to_central_fast(moments_raw):
"""Analytical equations for 2D and 3D central moments of order < 4.
`moments_raw_to_central` will automatically call this function when
possible.
Parameters
----------
moments_raw : ndarray
The raw moments.
Returns
-------
moments_central : ndarray
The central moments.
"""
ndim = moments_raw.ndim
order = moments_raw.shape[0] - 1
float_dtype = moments_raw.dtype
# convert to float64 during the computation for better accuracy
moments_raw = moments_raw.astype(np.float64, copy=False)
moments_central = np.zeros_like(moments_raw)
if order >= 4 or ndim not in [2, 3]:
raise ValueError(
"This function only supports 2D or 3D moments with order < 4"
)
m = moments_raw
if ndim == 2:
cx = m[1, 0] / m[0, 0]
cy = m[0, 1] / m[0, 0]
moments_central[0, 0] = m[0, 0]
# Note: 1st order moments are both 0
if order > 1:
# 2nd order moments
moments_central[1, 1] = m[1, 1] - cx*m[0, 1]
moments_central[2, 0] = m[2, 0] - cx*m[1, 0]
moments_central[0, 2] = m[0, 2] - cy*m[0, 1]
if order > 2:
# 3rd order moments
moments_central[2, 1] = (m[2, 1] - 2*cx*m[1, 1] - cy*m[2, 0]
+ cx**2*m[0, 1] + cy*cx*m[1, 0])
moments_central[1, 2] = (m[1, 2] - 2*cy*m[1, 1] - cx*m[0, 2]
+ 2*cy*cx*m[0, 1])
moments_central[3, 0] = m[3, 0] - 3*cx*m[2, 0] + 2*cx**2*m[1, 0]
moments_central[0, 3] = m[0, 3] - 3*cy*m[0, 2] + 2*cy**2*m[0, 1]
else:
# 3D case
cx = m[1, 0, 0] / m[0, 0, 0]
cy = m[0, 1, 0] / m[0, 0, 0]
cz = m[0, 0, 1] / m[0, 0, 0]
moments_central[0, 0, 0] = m[0, 0, 0]
# Note: all first order moments are 0
if order > 1:
# 2nd order moments
moments_central[0, 0, 2] = -cz*m[0, 0, 1] + m[0, 0, 2]
moments_central[0, 1, 1] = -cy*m[0, 0, 1] + m[0, 1, 1]
moments_central[0, 2, 0] = -cy*m[0, 1, 0] + m[0, 2, 0]
moments_central[1, 0, 1] = -cx*m[0, 0, 1] + m[1, 0, 1]
moments_central[1, 1, 0] = -cx*m[0, 1, 0] + m[1, 1, 0]
moments_central[2, 0, 0] = -cx*m[1, 0, 0] + m[2, 0, 0]
if order > 2:
# 3rd order moments
moments_central[0, 0, 3] = (2*cz**2*m[0, 0, 1]
- 3*cz*m[0, 0, 2]
+ m[0, 0, 3])
moments_central[0, 1, 2] = (-cy*m[0, 0, 2]
+ 2*cz*(cy*m[0, 0, 1] - m[0, 1, 1])
+ m[0, 1, 2])
moments_central[0, 2, 1] = (cy**2*m[0, 0, 1] - 2*cy*m[0, 1, 1]
+ cz*(cy*m[0, 1, 0] - m[0, 2, 0])
+ m[0, 2, 1])
moments_central[0, 3, 0] = (2*cy**2*m[0, 1, 0]
- 3*cy*m[0, 2, 0]
+ m[0, 3, 0])
moments_central[1, 0, 2] = (-cx*m[0, 0, 2]
+ 2*cz*(cx*m[0, 0, 1] - m[1, 0, 1])
+ m[1, 0, 2])
moments_central[1, 1, 1] = (-cx*m[0, 1, 1]
+ cy*(cx*m[0, 0, 1] - m[1, 0, 1])
+ cz*(cx*m[0, 1, 0] - m[1, 1, 0])
+ m[1, 1, 1])
moments_central[1, 2, 0] = (-cx*m[0, 2, 0]
- 2*cy*(-cx*m[0, 1, 0] + m[1, 1, 0])
+ m[1, 2, 0])
moments_central[2, 0, 1] = (cx**2*m[0, 0, 1]
- 2*cx*m[1, 0, 1]
+ cz*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 0, 1])
moments_central[2, 1, 0] = (cx**2*m[0, 1, 0]
- 2*cx*m[1, 1, 0]
+ cy*(cx*m[1, 0, 0] - m[2, 0, 0])
+ m[2, 1, 0])
moments_central[3, 0, 0] = (2*cx**2*m[1, 0, 0]
- 3*cx*m[2, 0, 0]
+ m[3, 0, 0])
return moments_central.astype(float_dtype, copy=False)
|
14,392 |
def porkchop(body_dpt, body_arr, dpt_start, dpt_end, arr_start, arr_end, N=50):
"""Plots porkchop between two bodies.
Parameters
----------
body_dpt: poliastro.bodies.Body
Body for launch
body_arr: poliastro.bodies.Body
Body for arrival
dpt_start: str
Porkchop launch date starts in this value
dpt_end: str
Porkchop launch date ends in this value
arr_start: str
Porkchop arrival date starts in this value
arr_end: str
Porkchop arrival date ends in this value
Returns
-------
dpt: np.array
Departure time span
arr: np.array
Arrival time span
deltav_dpt: np.ndarray
Departure velocity needed for each time of flight
deltav_arr: np.ndarray
Arrival velocity needed for each time of flight
c3_dpt: np.ndarray
Characteristic launch energy
c3_arr: np.ndarray
Characteristic arrival energy
Example
-------
# Time requirements YYYY-MM-DD
# Data is from porkchop pag. 180
>>> from poliastro.plotting.porkchop import porkchop
>>> from poliastro.bodies import Earth, Mars
>>> import matplotlib.pyplot as plt
>>> departure_start = "2005-04-30"
>>> departure_end = "2005-10-07"
>>> arrival_start = "2005-11-16"
>>> arrival_end = "2006-12-21"
>>> dpt, arr, dv_dpt, dv_arr, c3dpt, c3arr = porkchop(Earth, Mars, departure_start, departure_end, arrival_start, arrival_end)
>>> plt.show()
"""
# Computing time spans fot departure and arrival
dpt = [
Time(d, format="jd")
for d in np.linspace(Time(dpt_start).jd, Time(dpt_end).jd, N + 1)
]
arr = [
Time(d, format="jd")
for d in np.linspace(Time(arr_start).jd, Time(arr_end).jd, N + 1)
]
# Prellocate in memory the arrays
deltav_dpt = np.zeros((len(dpt), len(arr)))
deltav_arr = np.zeros((len(dpt), len(arr)))
c3_dpt = np.zeros((len(dpt), len(arr)))
c3_arr = np.zeros((len(dpt), len(arr)))
iso_tof = np.zeros((len(dpt), len(arr)))
idx = 0
for d in dpt:
dv_dpt, dv_arr, c3_d, c3_a, t_flight = lambert_porkchop_vectorized(
body_dpt, body_arr, d, arr
)
deltav_dpt[idx] = dv_dpt
deltav_arr[idx] = dv_arr
c3_dpt[idx] = c3_d
c3_arr[idx] = c3_a
iso_tof[idx] = t_flight
idx += 1
"""
Algorithm works: 'for each launch get all arrivals'.
Contourf works: 'for each Y -> all X'.
We need to transpose the arrays.
"""
fig, ax = plt.subplots(figsize=(15, 15))
c3_levels = np.linspace(0, 45, 30)
t_levels = np.linspace(100, 500, 5)
c = plt.contourf(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(c3_dpt),
c3_levels,
)
l = plt.contour(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(c3_dpt),
c3_levels,
colors="black",
linestyles="solid",
)
t = plt.contour(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(iso_tof),
t_levels,
colors="red",
linestyles="dashed",
linewidths=3.5,
)
cbar = plt.colorbar(c)
cbar.set_label("$km^2/s^2$")
plt.clabel(l, inline=1, fmt="%1.1f", colors="k", fontsize=10)
plt.clabel(t, inline=1, fmt="%1.1f", colors="r", fontsize=14)
plt.grid()
fig.autofmt_xdate()
plt.title(
"{} - {} for year {}, C3 Launch, TFL".format(
body_dpt.name, body_arr.name, dpt[0].datetime.year
),
fontsize=14,
fontweight="bold",
)
plt.xlabel("Launch date", fontsize=10, fontweight="bold")
plt.ylabel("Arrival date", fontsize=10, fontweight="bold")
plt.show()
return dpt, arr, deltav_dpt, deltav_arr, c3_dpt, c3_arr
|
def porkchop(body_dpt, body_arr, dpt_start, dpt_end, arr_start, arr_end, N=50):
"""Plots porkchop between two bodies.
Parameters
----------
body_dpt: poliastro.bodies.Body
Body for launch
body_arr: poliastro.bodies.Body
Body for arrival
dpt_start: str
Porkchop launch date starts in this value
dpt_end: str
Porkchop launch date ends in this value
arr_start: str
Porkchop arrival date starts in this value
arr_end: str
Porkchop arrival date ends in this value
Returns
-------
dpt: np.array
Departure time span
arr: np.array
Arrival time span
deltav_dpt: np.ndarray
Departure velocity needed for each time of flight
deltav_arr: np.ndarray
Arrival velocity needed for each time of flight
c3_dpt: np.ndarray
Characteristic launch energy
c3_arr: np.ndarray
Characteristic arrival energy
Example
-------
# Time requirements YYYY-MM-DD
# Data is from porkchop pag. 180
>>> from poliastro.plotting.porkchop import porkchop
>>> from poliastro.bodies import Earth, Mars
>>> import matplotlib.pyplot as plt
>>> departure_start = "2005-04-30"
>>> departure_end = "2005-10-07"
>>> arrival_start = "2005-11-16"
>>> arrival_end = "2006-12-21"
>>> dpt, arr, dv_dpt, dv_arr, c3dpt, c3arr = porkchop(Earth, Mars, departure_start, departure_end, arrival_start, arrival_end)
>>> plt.show()
"""
# Computing time spans fot departure and arrival
dpt = [
Time(d, format="jd")
for d in np.linspace(Time(dpt_start).jd, Time(dpt_end).jd, N + 1)
]
arr = [
Time(d, format="jd")
for d in np.linspace(Time(arr_start).jd, Time(arr_end).jd, N + 1)
]
# Prellocate in memory the arrays
deltav_dpt = np.zeros((len(dpt), len(arr)))
deltav_arr = np.zeros((len(dpt), len(arr)))
c3_dpt = np.zeros((len(dpt), len(arr)))
c3_arr = np.zeros((len(dpt), len(arr)))
iso_tof = np.zeros((len(dpt), len(arr)))
idx = 0
for d in dpt:
dv_dpt, dv_arr, c3_d, c3_a, t_flight = lambert_porkchop_vectorized(
body_dpt, body_arr, d, arr
)
deltav_dpt[idx] = dv_dpt
deltav_arr[idx] = dv_arr
c3_dpt[idx] = c3_d
c3_arr[idx] = c3_a
iso_tof[idx] = t_flight
idx += 1
"""
Algorithm works: 'for each launch get all arrivals'.
Contourf works: 'for each Y -> all X'.
We need to transpose the arrays.
"""
fig, ax = plt.subplots(figsize=(15, 15))
c3_levels = np.linspace(0, 45, 30)
t_levels = np.linspace(100, 500, 5)
c = plt.contourf(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
c3_dpt.T,
c3_levels,
)
l = plt.contour(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(c3_dpt),
c3_levels,
colors="black",
linestyles="solid",
)
t = plt.contour(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(iso_tof),
t_levels,
colors="red",
linestyles="dashed",
linewidths=3.5,
)
cbar = plt.colorbar(c)
cbar.set_label("$km^2/s^2$")
plt.clabel(l, inline=1, fmt="%1.1f", colors="k", fontsize=10)
plt.clabel(t, inline=1, fmt="%1.1f", colors="r", fontsize=14)
plt.grid()
fig.autofmt_xdate()
plt.title(
"{} - {} for year {}, C3 Launch, TFL".format(
body_dpt.name, body_arr.name, dpt[0].datetime.year
),
fontsize=14,
fontweight="bold",
)
plt.xlabel("Launch date", fontsize=10, fontweight="bold")
plt.ylabel("Arrival date", fontsize=10, fontweight="bold")
plt.show()
return dpt, arr, deltav_dpt, deltav_arr, c3_dpt, c3_arr
|
7,176 |
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61
Parameters
----------
size: int
filter size.
filter_name: str, optional
Filter used in frequency domain filtering. Ramp filter used by
default. Filters available: ramp, shepp-logan, cosine,
hamming, hann. Assign None to use no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.pi * np.linspace(0, 1, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
|
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61
Parameters
----------
size: int
filter size.
filter_name: str
Filter used in frequency domain filtering. Ramp filter used by
default. Filters available: ramp, shepp-logan, cosine,
hamming, hann. Assign None to use no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.pi * np.linspace(0, 1, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
|
37,039 |
def job_monitor(job, interval=None, monitor_async=False, quiet=False, to_file=None):
"""Monitor the status of a IBMQJob instance.
Args:
job (BaseJob): Job to monitor.
interval (int): Time interval between status queries.
monitor_async (bool): Monitor asyncronously (in Jupyter only).
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
Raises:
QiskitError: When trying to run async outside of Jupyter
ImportError: ipywidgets not available for notebook.
"""
if interval is None:
_interval_set = False
interval = 2
else:
_interval_set = True
if _NOTEBOOK_ENV:
if monitor_async:
try:
import ipywidgets as widgets # pylint: disable=import-error
except ImportError:
raise ImportError('These functions need ipywidgets. '
'Run "pip install ipywidgets" before.')
from qiskit.tools.jupyter.jupyter_magics import _html_checker # pylint: disable=C0412
style = "font-size:16px;"
header = "<p style='{style}'>Job Status: %s </p>".format(
style=style)
status = widgets.HTML(value=header % job.status().value)
display(status)
thread = threading.Thread(target=_html_checker, args=(job, interval,
status, header))
thread.start()
else:
_text_checker(job, interval, _interval_set,
quiet=quiet, to_file=to_file)
else:
if monitor_async:
raise QiskitError(
'monitor_async only available in Jupyter notebooks.')
_text_checker(job, interval, _interval_set, quiet=quiet, to_file=to_file)
|
def job_monitor(job, interval=None, monitor_async=False, quiet=False, to_file=None):
"""Monitor the status of a IBMQJob instance.
Args:
job (BaseJob): Job to monitor.
interval (int): Time interval between status queries.
monitor_async (bool): Monitor asyncronously (in Jupyter only).
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
Raises:
QiskitError: When trying to run async outside of Jupyter
ImportError: ipywidgets not available for notebook.
"""
if interval is None:
_interval_set = False
interval = 2
else:
_interval_set = True
if _NOTEBOOK_ENV:
if monitor_async:
try:
import ipywidgets as widgets # pylint: disable=import-error
except ImportError:
raise ImportError('These functions need ipywidgets. '
'Run "pip install ipywidgets" before.')
from qiskit.tools.jupyter.jupyter_magics import _html_checker # pylint: disable=C0412
style = "font-size:16px;"
header = "<p style='{style}'>Job Status: %s </p>".format(
style=style)
status = widgets.HTML(value=header % job.status().value)
display(status)
thread = threading.Thread(target=_html_checker, args=(job, interval,
status, header))
thread.start()
else:
_text_checker(job, interval, _interval_set,
quiet=quiet, to_file=to_file)
else:
if monitor_async:
raise QiskitError(
'monitor_async only available in Jupyter notebooks.')
_text_checker(job, interval, _interval_set, quiet=quiet, output=output)
|
10,937 |
def _route_to_regex(route, is_endpoint=False):
"""
Convert a path pattern into a regular expression. Return the regular
expression and a dictionary mapping the capture names to the converters.
For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)'
and {'pk': <django.urls.converters.IntConverter>}.
"""
if any((s in route) for s in string.whitespace):
raise ImproperlyConfigured("URL route '%s' contain invalid character space." % route)
original_route = route
parts = ['^']
converters = {}
while True:
match = _PATH_PARAMETER_COMPONENT_RE.search(route)
if not match:
parts.append(re.escape(route))
break
parts.append(re.escape(route[:match.start()]))
route = route[match.end():]
parameter = match.group('parameter')
if not parameter.isidentifier():
raise ImproperlyConfigured(
"URL route '%s' uses parameter name %r which isn't a valid "
"Python identifier." % (original_route, parameter)
)
raw_converter = match.group('converter')
if raw_converter is None:
# If a converter isn't specified, the default is `str`.
raw_converter = 'str'
try:
converter = get_converter(raw_converter)
except KeyError as e:
raise ImproperlyConfigured(
"URL route '%s' uses invalid converter %s." % (original_route, e)
)
converters[parameter] = converter
parts.append('(?P<' + parameter + '>' + converter.regex + ')')
if is_endpoint:
parts.append('$')
return ''.join(parts), converters
|
def _route_to_regex(route, is_endpoint=False):
"""
Convert a path pattern into a regular expression. Return the regular
expression and a dictionary mapping the capture names to the converters.
For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)'
and {'pk': <django.urls.converters.IntConverter>}.
"""
if not set(route).isdisjoint(string.whitespace):
raise ImproperlyConfigured("URL route '%s' contain invalid character space." % route)
original_route = route
parts = ['^']
converters = {}
while True:
match = _PATH_PARAMETER_COMPONENT_RE.search(route)
if not match:
parts.append(re.escape(route))
break
parts.append(re.escape(route[:match.start()]))
route = route[match.end():]
parameter = match.group('parameter')
if not parameter.isidentifier():
raise ImproperlyConfigured(
"URL route '%s' uses parameter name %r which isn't a valid "
"Python identifier." % (original_route, parameter)
)
raw_converter = match.group('converter')
if raw_converter is None:
# If a converter isn't specified, the default is `str`.
raw_converter = 'str'
try:
converter = get_converter(raw_converter)
except KeyError as e:
raise ImproperlyConfigured(
"URL route '%s' uses invalid converter %s." % (original_route, e)
)
converters[parameter] = converter
parts.append('(?P<' + parameter + '>' + converter.regex + ')')
if is_endpoint:
parts.append('$')
return ''.join(parts), converters
|
13,899 |
def timestamp(value: str) -> datetime.datetime:
from .timestamps import parse_timestamp # lazy import
try:
return parse_timestamp(value)
except ValueError as ex:
(msg,) = ex.args
raise ArgumentTypeError(f"{msg}: {value:!r}") from None
|
def timestamp(value: str) -> datetime.datetime:
from .timestamps import parse_timestamp # lazy import
try:
return parse_timestamp(value)
except ValueError as ex: # pragma: no cover
(msg,) = ex.args
raise ArgumentTypeError(f"{msg}: {value:!r}") from None
|
50,057 |
def install(args, parser, command='install'):
"""
conda install, conda update, and conda create
"""
context.validate_configuration()
check_non_admin()
# this is sort of a hack. current_repodata.json may not have any .tar.bz2 files,
# because it deduplicates records that exist as both formats. Forcing this to
# repodata.json ensures that .tar.bz2 files are available
if context.use_only_tar_bz2:
args.repodata_fns = ('repodata.json', )
newenv = bool(command == 'create')
isupdate = bool(command == 'update')
isinstall = bool(command == 'install')
isremove = bool(command == 'remove')
prefix = context.target_prefix
if newenv:
check_prefix(prefix, json=context.json)
if context.force_32bit and prefix == context.root_prefix:
raise CondaValueError("cannot use CONDA_FORCE_32BIT=1 in base env")
if isupdate and not (args.file or args.packages
or context.update_modifier == UpdateModifier.UPDATE_ALL):
raise CondaValueError("""no package names supplied
# Example: conda update -n myenv scipy
""")
if not newenv:
if isdir(prefix):
delete_trash(prefix)
if not isfile(join(prefix, 'conda-meta', 'history')):
if paths_equal(prefix, context.conda_prefix):
raise NoBaseEnvironmentError()
else:
if not path_is_clean(prefix):
raise DirectoryNotACondaEnvironmentError(prefix)
else:
# fall-through expected under normal operation
pass
else:
if hasattr(args, "mkdir") and args.mkdir:
try:
mkdir_p(prefix)
except EnvironmentError as e:
raise CondaOSError("Could not create directory: %s" % prefix, caused_by=e)
else:
raise EnvironmentLocationNotFound(prefix)
args_packages = [s.strip('"\'') for s in args.packages]
if newenv and not args.no_default_packages:
# Override defaults if they are specified at the command line
# TODO: rework in 4.4 branch using MatchSpec
args_packages_names = [pkg.replace(' ', '=').split('=', 1)[0] for pkg in args_packages]
for default_pkg in context.create_default_packages:
default_pkg_name = default_pkg.replace(' ', '=').split('=', 1)[0]
if default_pkg_name not in args_packages_names:
args_packages.append(default_pkg)
index_args = {
'use_cache': args.use_index_cache,
'channel_urls': context.channels,
'unknown': args.unknown,
'prepend': not args.override_channels,
'use_local': args.use_local
}
num_cp = sum(is_package_file(s) for s in args_packages)
if num_cp:
if num_cp == len(args_packages):
explicit(args_packages, prefix, verbose=not context.quiet)
return
else:
raise CondaValueError("cannot mix specifications with conda package"
" filenames")
specs = []
if args.file:
for fpath in args.file:
try:
specs.extend(common.specs_from_url(fpath, json=context.json))
except UnicodeError:
raise CondaError("Error reading file, file should be a text file containing"
" packages \nconda create --help for details")
if '@EXPLICIT' in specs:
explicit(specs, prefix, verbose=not context.quiet, index_args=index_args)
return
specs.extend(common.specs_from_args(args_packages, json=context.json))
if isinstall and args.revision:
get_revision(args.revision, json=context.json)
elif isinstall and not (args.file or args_packages):
raise CondaValueError("too few arguments, "
"must supply command line package specs or --file")
# for 'conda update', make sure the requested specs actually exist in the prefix
# and that they are name-only specs
if isupdate and context.update_modifier != UpdateModifier.UPDATE_ALL:
prefix_data = PrefixData(prefix)
for spec in specs:
spec = MatchSpec(spec)
if not spec.is_name_only_spec:
raise CondaError("Invalid spec for 'conda update': %s\n"
"Use 'conda install' instead." % spec)
if not prefix_data.get(spec.name, None):
raise PackageNotInstalledError(prefix, spec.name)
if newenv and args.clone:
if args.packages:
raise TooManyArgumentsError(0, len(args.packages), list(args.packages),
'did not expect any arguments for --clone')
clone(args.clone, prefix, json=context.json, quiet=context.quiet, index_args=index_args)
touch_nonadmin(prefix)
print_activate(args.name if args.name else prefix)
return
repodata_fns = args.repodata_fns
if not repodata_fns:
repodata_fns = context.repodata_fns
if REPODATA_FN not in repodata_fns:
repodata_fns.append(REPODATA_FN)
args_set_update_modifier = hasattr(args, "update_modifier") and args.update_modifier != NULL
# This helps us differentiate between an update, the --freeze-installed option, and the retry
# behavior in our initial fast frozen solve
_should_retry_unfrozen = (not args_set_update_modifier or args.update_modifier not in (
UpdateModifier.FREEZE_INSTALLED,
UpdateModifier.UPDATE_SPECS)) and not newenv
for repodata_fn in repodata_fns:
is_last_repodata = repodata_fn == repodata_fns[-1]
try:
if isinstall and args.revision:
index = get_index(channel_urls=index_args['channel_urls'],
prepend=index_args['prepend'], platform=None,
use_local=index_args['use_local'],
use_cache=index_args['use_cache'],
unknown=index_args['unknown'], prefix=prefix,
repodata_fn=repodata_fn)
unlink_link_transaction = revert_actions(prefix, get_revision(args.revision),
index)
else:
SolverType = _get_solver_class()
solver = SolverType(prefix, context.channels, context.subdirs, specs_to_add=specs,
repodata_fn=repodata_fn, command=args.cmd)
update_modifier = context.update_modifier
if (isinstall or isremove) and args.update_modifier == NULL:
update_modifier = UpdateModifier.FREEZE_INSTALLED
deps_modifier = context.deps_modifier
if isupdate:
deps_modifier = context.deps_modifier or DepsModifier.UPDATE_SPECS
unlink_link_transaction = solver.solve_for_transaction(
deps_modifier=deps_modifier,
update_modifier=update_modifier,
force_reinstall=context.force_reinstall or context.force,
should_retry_solve=(_should_retry_unfrozen or repodata_fn != repodata_fns[-1]),
)
if (context.prerelease_behavior is PrereleaseBehavior.LIMIT and
not is_last_repodata and
unlink_link_transaction.adds_prereleases):
continue
# we only need one of these to work. If we haven't raised an exception,
# we're good.
break
except (ResolvePackageNotFound, PackagesNotFoundError) as e:
# end of the line. Raise the exception
if is_last_repodata:
# PackagesNotFoundError is the only exception type we want to raise.
# Over time, we should try to get rid of ResolvePackageNotFound
if isinstance(e, PackagesNotFoundError):
raise e
else:
channels_urls = tuple(calculate_channel_urls(
channel_urls=index_args['channel_urls'],
prepend=index_args['prepend'],
platform=None,
use_local=index_args['use_local'],
))
# convert the ResolvePackageNotFound into PackagesNotFoundError
raise PackagesNotFoundError(e._formatted_chains, channels_urls)
except (UnsatisfiableError, SystemExit, SpecsConfigurationConflictError) as e:
if not getattr(e, "allow_retry", True):
# TODO: This is a temporary workaround to allow downstream libraries
# to inject this attribute set to False and skip the retry logic
# Other solvers might implement their own internal retry logic without
# depending --freeze-install implicitly like conda classic does. Example
# retry loop in conda-libmamba-solver:
# https://github.com/conda-incubator/conda-libmamba-solver/blob/da5b1ba/conda_libmamba_solver/solver.py#L254-L299
# If we end up raising UnsatisfiableError, we annotate it with `allow_retry`
# so we don't have go through all the repodatas and freeze-installed logic
# unnecessarily (see https://github.com/conda/conda/issues/11294). see also:
# https://github.com/conda-incubator/conda-libmamba-solver/blob/7c698209/conda_libmamba_solver/solver.py#L617
raise e
# Quick solve with frozen env or trimmed repodata failed. Try again without that.
if not hasattr(args, 'update_modifier'):
if is_last_repodata:
raise e
elif _should_retry_unfrozen:
try:
unlink_link_transaction = solver.solve_for_transaction(
deps_modifier=deps_modifier,
update_modifier=UpdateModifier.UPDATE_SPECS,
force_reinstall=context.force_reinstall or context.force,
should_retry_solve=(repodata_fn != repodata_fns[-1]),
)
except (UnsatisfiableError, SystemExit, SpecsConfigurationConflictError) as e:
# Unsatisfiable package specifications/no such revision/import error
if e.args and 'could not import' in e.args[0]:
raise CondaImportError(str(e))
# we want to fall through without raising if we're not at the end of the list
# of fns. That way, we fall to the next fn.
if is_last_repodata:
raise e
elif not is_last_repodata:
continue # if we hit this, we should retry with next repodata source
else:
# end of the line. Raise the exception
# Unsatisfiable package specifications/no such revision/import error
if e.args and 'could not import' in e.args[0]:
raise CondaImportError(str(e))
raise e
handle_txn(unlink_link_transaction, prefix, args, newenv)
|
def install(args, parser, command='install'):
"""
conda install, conda update, and conda create
"""
context.validate_configuration()
check_non_admin()
# this is sort of a hack. current_repodata.json may not have any .tar.bz2 files,
# because it deduplicates records that exist as both formats. Forcing this to
# repodata.json ensures that .tar.bz2 files are available
if context.use_only_tar_bz2:
args.repodata_fns = ('repodata.json', )
newenv = bool(command == 'create')
isupdate = bool(command == 'update')
isinstall = bool(command == 'install')
isremove = bool(command == 'remove')
prefix = context.target_prefix
if newenv:
check_prefix(prefix, json=context.json)
if context.force_32bit and prefix == context.root_prefix:
raise CondaValueError("cannot use CONDA_FORCE_32BIT=1 in base env")
if isupdate and not (args.file or args.packages
or context.update_modifier == UpdateModifier.UPDATE_ALL):
raise CondaValueError("""no package names supplied
# Example: conda update -n myenv scipy
""")
if not newenv:
if isdir(prefix):
delete_trash(prefix)
if not isfile(join(prefix, 'conda-meta', 'history')):
if paths_equal(prefix, context.conda_prefix):
raise NoBaseEnvironmentError()
else:
if not path_is_clean(prefix):
raise DirectoryNotACondaEnvironmentError(prefix)
else:
# fall-through expected under normal operation
pass
else:
if hasattr(args, "mkdir") and args.mkdir:
try:
mkdir_p(prefix)
except EnvironmentError as e:
raise CondaOSError("Could not create directory: %s" % prefix, caused_by=e)
else:
raise EnvironmentLocationNotFound(prefix)
args_packages = [s.strip('"\'') for s in args.packages]
if newenv and not args.no_default_packages:
# Override defaults if they are specified at the command line
# TODO: rework in 4.4 branch using MatchSpec
args_packages_names = [pkg.replace(' ', '=').split('=', 1)[0] for pkg in args_packages]
for default_pkg in context.create_default_packages:
default_pkg_name = default_pkg.replace(' ', '=').split('=', 1)[0]
if default_pkg_name not in args_packages_names:
args_packages.append(default_pkg)
index_args = {
'use_cache': args.use_index_cache,
'channel_urls': context.channels,
'unknown': args.unknown,
'prepend': not args.override_channels,
'use_local': args.use_local
}
num_cp = sum(is_package_file(s) for s in args_packages)
if num_cp:
if num_cp == len(args_packages):
explicit(args_packages, prefix, verbose=not context.quiet)
return
else:
raise CondaValueError("cannot mix specifications with conda package"
" filenames")
specs = []
if args.file:
for fpath in args.file:
try:
specs.extend(common.specs_from_url(fpath, json=context.json))
except UnicodeError:
raise CondaError("Error reading file, file should be a text file containing"
" packages \nconda create --help for details")
if '@EXPLICIT' in specs:
explicit(specs, prefix, verbose=not context.quiet, index_args=index_args)
return
specs.extend(common.specs_from_args(args_packages, json=context.json))
if isinstall and args.revision:
get_revision(args.revision, json=context.json)
elif isinstall and not (args.file or args_packages):
raise CondaValueError("too few arguments, "
"must supply command line package specs or --file")
# for 'conda update', make sure the requested specs actually exist in the prefix
# and that they are name-only specs
if isupdate and context.update_modifier != UpdateModifier.UPDATE_ALL:
prefix_data = PrefixData(prefix)
for spec in specs:
spec = MatchSpec(spec)
if not spec.is_name_only_spec:
raise CondaError("Invalid spec for 'conda update': %s\n"
"Use 'conda install' instead." % spec)
if not prefix_data.get(spec.name, None):
raise PackageNotInstalledError(prefix, spec.name)
if newenv and args.clone:
if args.packages:
raise TooManyArgumentsError(0, len(args.packages), list(args.packages),
'did not expect any arguments for --clone')
clone(args.clone, prefix, json=context.json, quiet=context.quiet, index_args=index_args)
touch_nonadmin(prefix)
print_activate(args.name if args.name else prefix)
return
repodata_fns = args.repodata_fns
if not repodata_fns:
repodata_fns = context.repodata_fns
if REPODATA_FN not in repodata_fns:
repodata_fns.append(REPODATA_FN)
args_set_update_modifier = hasattr(args, "update_modifier") and args.update_modifier != NULL
# This helps us differentiate between an update, the --freeze-installed option, and the retry
# behavior in our initial fast frozen solve
_should_retry_unfrozen = (not args_set_update_modifier or args.update_modifier not in (
UpdateModifier.FREEZE_INSTALLED,
UpdateModifier.UPDATE_SPECS)) and not newenv
for repodata_fn in repodata_fns:
is_last_repodata = repodata_fn == repodata_fns[-1]
try:
if isinstall and args.revision:
index = get_index(channel_urls=index_args['channel_urls'],
prepend=index_args['prepend'], platform=None,
use_local=index_args['use_local'],
use_cache=index_args['use_cache'],
unknown=index_args['unknown'], prefix=prefix,
repodata_fn=repodata_fn)
unlink_link_transaction = revert_actions(prefix, get_revision(args.revision),
index)
else:
SolverType = _get_solver_class()
solver = SolverType(prefix, context.channels, context.subdirs, specs_to_add=specs,
repodata_fn=repodata_fn, command=args.cmd)
update_modifier = context.update_modifier
if (isinstall or isremove) and args.update_modifier == NULL:
update_modifier = UpdateModifier.FREEZE_INSTALLED
deps_modifier = context.deps_modifier
if isupdate:
deps_modifier = context.deps_modifier or DepsModifier.UPDATE_SPECS
unlink_link_transaction = solver.solve_for_transaction(
deps_modifier=deps_modifier,
update_modifier=update_modifier,
force_reinstall=context.force_reinstall or context.force,
should_retry_solve=(_should_retry_unfrozen or repodata_fn != repodata_fns[-1]),
)
if (
context.prerelease_behavior is PrereleaseBehavior.LIMIT
and not is_last_repodata
and unlink_link_transaction.adds_prereleases
):
continue
# we only need one of these to work. If we haven't raised an exception,
# we're good.
break
except (ResolvePackageNotFound, PackagesNotFoundError) as e:
# end of the line. Raise the exception
if is_last_repodata:
# PackagesNotFoundError is the only exception type we want to raise.
# Over time, we should try to get rid of ResolvePackageNotFound
if isinstance(e, PackagesNotFoundError):
raise e
else:
channels_urls = tuple(calculate_channel_urls(
channel_urls=index_args['channel_urls'],
prepend=index_args['prepend'],
platform=None,
use_local=index_args['use_local'],
))
# convert the ResolvePackageNotFound into PackagesNotFoundError
raise PackagesNotFoundError(e._formatted_chains, channels_urls)
except (UnsatisfiableError, SystemExit, SpecsConfigurationConflictError) as e:
if not getattr(e, "allow_retry", True):
# TODO: This is a temporary workaround to allow downstream libraries
# to inject this attribute set to False and skip the retry logic
# Other solvers might implement their own internal retry logic without
# depending --freeze-install implicitly like conda classic does. Example
# retry loop in conda-libmamba-solver:
# https://github.com/conda-incubator/conda-libmamba-solver/blob/da5b1ba/conda_libmamba_solver/solver.py#L254-L299
# If we end up raising UnsatisfiableError, we annotate it with `allow_retry`
# so we don't have go through all the repodatas and freeze-installed logic
# unnecessarily (see https://github.com/conda/conda/issues/11294). see also:
# https://github.com/conda-incubator/conda-libmamba-solver/blob/7c698209/conda_libmamba_solver/solver.py#L617
raise e
# Quick solve with frozen env or trimmed repodata failed. Try again without that.
if not hasattr(args, 'update_modifier'):
if is_last_repodata:
raise e
elif _should_retry_unfrozen:
try:
unlink_link_transaction = solver.solve_for_transaction(
deps_modifier=deps_modifier,
update_modifier=UpdateModifier.UPDATE_SPECS,
force_reinstall=context.force_reinstall or context.force,
should_retry_solve=(repodata_fn != repodata_fns[-1]),
)
except (UnsatisfiableError, SystemExit, SpecsConfigurationConflictError) as e:
# Unsatisfiable package specifications/no such revision/import error
if e.args and 'could not import' in e.args[0]:
raise CondaImportError(str(e))
# we want to fall through without raising if we're not at the end of the list
# of fns. That way, we fall to the next fn.
if is_last_repodata:
raise e
elif not is_last_repodata:
continue # if we hit this, we should retry with next repodata source
else:
# end of the line. Raise the exception
# Unsatisfiable package specifications/no such revision/import error
if e.args and 'could not import' in e.args[0]:
raise CondaImportError(str(e))
raise e
handle_txn(unlink_link_transaction, prefix, args, newenv)
|
54,494 |
def _get_pareto_front_trials_2d(
orig_trials: List[FrozenTrial], directions: List[StudyDirection]
) -> List[FrozenTrial]:
trials = [trial for trial in orig_trials if trial.state == TrialState.COMPLETE]
n_trials = len(trials)
if n_trials == 0:
return []
trials.sort(
key=lambda trial: (
_normalize_value(trial.values[0], directions[0]),
_normalize_value(trial.values[1], directions[1]),
),
)
last_nondominated_trial = trials[0]
pareto_front = [last_nondominated_trial]
for i in range(1, n_trials):
trial = trials[i]
if _dominates(last_nondominated_trial, trial, directions):
continue
pareto_front.append(trial)
last_nondominated_trial = trial
pareto_front.sort(key=lambda trial: trial.number)
return pareto_front
|
def _get_pareto_front_trials_2d(
trials: List[FrozenTrial], directions: List[StudyDirection]
) -> List[FrozenTrial]:
_trials = [trial for trial in orig_trials if trial.state == TrialState.COMPLETE] # `complete_trials` may be more developer friendly.
n_trials = len(trials)
if n_trials == 0:
return []
trials.sort(
key=lambda trial: (
_normalize_value(trial.values[0], directions[0]),
_normalize_value(trial.values[1], directions[1]),
),
)
last_nondominated_trial = trials[0]
pareto_front = [last_nondominated_trial]
for i in range(1, n_trials):
trial = trials[i]
if _dominates(last_nondominated_trial, trial, directions):
continue
pareto_front.append(trial)
last_nondominated_trial = trial
pareto_front.sort(key=lambda trial: trial.number)
return pareto_front
|
43,805 |
def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
A set of edges has zero net flow whenever the following Hamiltonian is minimized:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
6,146 |
def isURL(url):
"""
Just a test to check if URL is already given or not
"""
return url.startswith('http') or url.startswith('dip')
|
def isURL(url):
"""
Just a test to check if URL is already given or not
"""
return url.startswith(('http', 'dip'))
|
30,305 |
def test_creaet_indicator_with_none_date():
from PhishLabsIOC import create_indicator_content
files_json = """
{
"attributes": [
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "xyz",
"name": "md5",
"value": "c8092abd8d581750c0530fa1fc8d8318"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "abc",
"name": "filetype",
"value": "application/zip"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "qwe",
"name": "name",
"value": "Baycc.zip"
}
],
"createdAt": "2019-05-14T13:03:45Z",
"updatedAt": "0001-01-01T00:00:00Z",
"falsePositive": false,
"id": "def",
"type": "Attachment",
"value": "c8092abd8d581750c0530fa1fc8d8318"
} """
result = {
'ID': 'def',
'Indicator': 'c8092abd8d581750c0530fa1fc8d8318',
'Type': 'Attachment',
'CreatedAt': '2019-05-14T13:03:45Z',
'UpdatedAt': '',
'FalsePositive': False,
}
indicator = json.loads(files_json)
actual = create_indicator_content(indicator)
assert actual == result
|
def test_create_indicator_with_none_date():
from PhishLabsIOC import create_indicator_content
files_json = """
{
"attributes": [
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "xyz",
"name": "md5",
"value": "c8092abd8d581750c0530fa1fc8d8318"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "abc",
"name": "filetype",
"value": "application/zip"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "qwe",
"name": "name",
"value": "Baycc.zip"
}
],
"createdAt": "2019-05-14T13:03:45Z",
"updatedAt": "0001-01-01T00:00:00Z",
"falsePositive": false,
"id": "def",
"type": "Attachment",
"value": "c8092abd8d581750c0530fa1fc8d8318"
} """
result = {
'ID': 'def',
'Indicator': 'c8092abd8d581750c0530fa1fc8d8318',
'Type': 'Attachment',
'CreatedAt': '2019-05-14T13:03:45Z',
'UpdatedAt': '',
'FalsePositive': False,
}
indicator = json.loads(files_json)
actual = create_indicator_content(indicator)
assert actual == result
|
43,895 |
def atom_basis_data(name, atom):
r"""Generate default basis set parameters for an atom.
This function extracts the default angular momentum and the exponents and contraction
coefficients of Gaussian functions forming a Gaussian Type Orbital (GTO) for a given atom. These
values are taken from the basis set data provided in ``basis_data.py``.
Args:
name (str): name of the basis set
atom (str): atomic symbol of the chemical element
Returns:
list(tuple): tuple containing the angular momentum, the exponents and contraction
coefficients of a basis function
**Example**
>>> params = atom_basis_data('sto-3g', 'H')
>>> print(params)
[((0, 0, 0), [3.425250914, 0.6239137298, 0.168855404], [0.1543289673, 0.5353281423, 0.4446345422])]
"""
basis_sets = {"sto-3g": STO3G}
s = [(0, 0, 0)]
p = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
basis = basis_sets[name][atom]
params = []
for i, j in enumerate(basis["orbitals"]):
if j == "S":
params.append((s[0], basis["exponents"][i], basis["coefficients"][i]))
elif j == "SP":
for term in j:
if term == "S":
params.append((s[0], basis["exponents"][i], basis["coefficients"][i]))
if term == "P":
for l in p:
params.append((l, basis["exponents"][i], basis["coefficients"][i + 1]))
return params
|
def atom_basis_data(name, atom):
r"""Generate default basis set parameters for an atom.
This function extracts the default angular momentum and the exponents and contraction
coefficients of Gaussian functions forming a Gaussian Type Orbital (GTO) for a given atom. These
values are taken from the basis set data provided in ``basis_data.py``.
Args:
name (str): name of the basis set
atom (str): atomic symbol of the chemical element
Returns:
list(tuple): tuple containing the angular momentum, the exponents and contraction
coefficients of a basis function
**Example**
>>> params = atom_basis_data('sto-3g', 'H')
>>> print(params)
[((0, 0, 0), [3.425250914, 0.6239137298, 0.168855404], [0.1543289673, 0.5353281423, 0.4446345422])]
"""
basis_sets = {"sto-3g": STO3G}
s = [(0, 0, 0)]
p = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
basis = basis_sets[name][atom]
params = []
for i, j in enumerate(basis["orbitals"]):
if j == "S":
params.append((s[0], basis["exponents"][i], basis["coefficients"][i]))
elif j == "SP":
for term in j:
if term == "S":
params.append((s[0], basis["exponents"][i], basis["coefficients"][i]))
if term == "P":
for l in p:
params.append((l, basis["exponents"][i], basis["coefficients"][i + 1]))
return params
|
20,538 |
def register_wrapper(fname_src, fname_dest, param, paramregmulti, fname_src_seg='', fname_dest_seg='', fname_src_label='',
fname_dest_label='', fname_mask='', fname_initwarp='', fname_initwarpinv='', identity=False,
interp='linear', fname_output='', fname_output_warp='', path_out='', same_space=False):
"""
Wrapper for image registration.
:param fname_src:
:param fname_dest:
:param param: Class Param(): See definition in sct_register_multimodal
:param paramregmulti: Class ParamregMultiStep(): See definition in this file
:param fname_src_seg:
:param fname_dest_seg:
:param fname_src_label:
:param fname_dest_label:
:param fname_mask:
:param fname_initwarp: str: File name of initial transformation
:param fname_initwarpinv: str: File name of initial inverse transformation
:param identity:
:param interp:
:param fname_output:
:param fname_output_warp:
:param path_out:
:param same_space: Bool: Source and destination images are in the same physical space (i.e. same coordinates).
:return: fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv
"""
# TODO: move interp inside param.
# TODO: merge param inside paramregmulti by having a "global" sets of parameters that apply to all steps
# Extract path, file and extension
path_src, file_src, ext_src = extract_fname(fname_src)
path_dest, file_dest, ext_dest = extract_fname(fname_dest)
# check if source and destination images have the same name (related to issue #373)
# If so, change names to avoid conflict of result files and warns the user
suffix_src, suffix_dest = '_reg', '_reg'
if file_src == file_dest:
suffix_src, suffix_dest = '_src_reg', '_dest_reg'
# define output folder and file name
if fname_output == '':
path_out = '' if not path_out else path_out # output in user's current directory
file_out = file_src + suffix_src
file_out_inv = file_dest + suffix_dest
ext_out = ext_src
else:
path, file_out, ext_out = extract_fname(fname_output)
path_out = path if not path_out else path_out
file_out_inv = file_out + '_inv'
# create temporary folder
path_tmp = tmp_create(basename="register")
printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
Image(fname_src).save(os.path.join(path_tmp, "src.nii"))
Image(fname_dest).save(os.path.join(path_tmp, "dest.nii"))
if fname_src_seg:
Image(fname_src_seg).save(os.path.join(path_tmp, "src_seg.nii"))
if fname_dest_seg:
Image(fname_dest_seg).save(os.path.join(path_tmp, "dest_seg.nii"))
if fname_src_label:
Image(fname_src_label).save(os.path.join(path_tmp, "src_label.nii"))
Image(fname_dest_label).save(os.path.join(path_tmp, "dest_label.nii"))
if fname_mask != '':
Image(fname_mask).save(os.path.join(path_tmp, "mask.nii.gz"))
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# reorient destination to RPI
Image('dest.nii').change_orientation("RPI").save('dest_RPI.nii')
if fname_dest_seg:
Image('dest_seg.nii').change_orientation("RPI").save('dest_seg_RPI.nii')
if fname_dest_label:
Image('dest_label.nii').change_orientation("RPI").save('dest_label_RPI.nii')
if fname_mask:
# TODO: change output name
Image('mask.nii.gz').change_orientation("RPI").save('mask.nii.gz')
if identity:
# overwrite paramregmulti and only do one identity transformation
step0 = Paramreg(step='0', type='im', algo='syn', metric='MI', iter='0', shrink='1', smooth='0', gradStep='0.5')
paramregmulti = ParamregMultiStep([step0])
# initialize list of warping fields
warp_forward = []
warp_forward_winv = []
warp_inverse = []
warp_inverse_winv = []
generate_warpinv = 1
# initial warping is specified, update list of warping fields and skip step=0
if fname_initwarp:
printv('\nSkip step=0 and replace with initial transformations: ', param.verbose)
printv(' ' + fname_initwarp, param.verbose)
# copy(fname_initwarp, 'warp_forward_0.nii.gz')
warp_forward.append(fname_initwarp)
start_step = 1
if fname_initwarpinv:
warp_inverse.append(fname_initwarpinv)
else:
printv('\nWARNING: No initial inverse warping field was specified, therefore the registration will be '
'src->dest only, and the inverse warping field will NOT be generated.', param.verbose, 'warning')
generate_warpinv = 0
else:
if same_space:
start_step = 1
else:
start_step = 0
# loop across registration steps
for i_step in range(start_step, len(paramregmulti.steps)):
step = paramregmulti.steps[str(i_step)]
printv('\n--\nESTIMATE TRANSFORMATION FOR STEP #' + str(i_step), param.verbose)
# identify which is the src and dest
if step.type == 'im':
src = ['src.nii']
dest = ['dest_RPI.nii']
interp_step = ['spline']
elif step.type == 'seg':
src = ['src_seg.nii']
dest = ['dest_seg_RPI.nii']
interp_step = ['nn']
elif step.type == 'imseg':
src = ['src.nii', 'src_seg.nii']
dest = ['dest_RPI.nii', 'dest_seg_RPI.nii']
interp_step = ['spline', 'nn']
elif step.type == 'label':
src = ['src_label.nii']
dest = ['dest_label_RPI.nii']
interp_step = ['nn']
else:
printv('ERROR: Wrong image type: {}'.format(step.type), 1, 'error')
# if step>0, apply warp_forward_concat to the src image to be used
if (not same_space and i_step > 0) or (same_space and i_step > 1):
printv('\nApply transformation from previous step', param.verbose)
for ifile in range(len(src)):
sct_apply_transfo.main(argv=[
'-i', src[ifile],
'-d', dest[ifile],
'-o', add_suffix(src[ifile], '_reg'),
'-x', interp_step[ifile],
'-w'] + warp_forward
)
src[ifile] = add_suffix(src[ifile], '_reg')
# register src --> dest
warp_forward_out, warp_inverse_out = register(src=src, dest=dest, step=step, param=param)
# deal with transformations with "-" as prefix. They should be inverted with calling isct_ComposeMultiTransform.
if warp_forward_out[0] == "-":
warp_forward_out = warp_forward_out[1:]
warp_forward_winv.append(warp_forward_out)
if warp_inverse_out[0] == "-":
warp_inverse_out = warp_inverse_out[1:]
warp_inverse_winv.append(warp_inverse_out)
# update list of forward/inverse transformations
warp_forward.append(warp_forward_out)
warp_inverse.insert(0, warp_inverse_out)
# Concatenate transformations
printv('\nConcatenate transformations...', param.verbose)
# if a warping field needs to be inverted, remove it from warp_forward
warp_forward = [f for f in warp_forward if f not in warp_forward_winv]
dimensionality = len(Image("dest.nii").hdr.get_data_shape())
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_src2dest.nii.gz', '-R', 'dest.nii']
if warp_forward_winv:
cmd.append('-i')
cmd += reversed(warp_forward_winv)
if warp_forward:
cmd += reversed(warp_forward)
status, output = run_proc(cmd, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# if an inverse warping field needs to be inverted, remove it from warp_inverse_winv
warp_inverse = [f for f in warp_inverse if f not in warp_inverse_winv]
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_dest2src.nii.gz', '-R', 'src.nii']
dimensionality = len(Image("dest.nii").hdr.get_data_shape())
if warp_inverse_winv:
cmd.append('-i')
cmd += reversed(warp_inverse_winv)
if warp_inverse:
cmd += reversed(warp_inverse)
status, output = run_proc(cmd, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# TODO: make the following code optional (or move it to sct_register_multimodal)
# Apply warping field to src data
printv('\nApply transfo source --> dest...', param.verbose)
sct_apply_transfo.main(argv=[
'-i', 'src.nii',
'-d', 'dest.nii',
'-w', 'warp_src2dest.nii.gz',
'-o', 'src_reg.nii',
'-x', interp])
if generate_warpinv:
printv('\nApply transfo dest --> source...', param.verbose)
sct_apply_transfo.main(argv=[
'-i', 'dest.nii',
'-d', 'src.nii',
'-w', 'warp_dest2src.nii.gz',
'-o', 'dest_reg.nii',
'-x', interp])
# come back
os.chdir(curdir)
# Generate output files
# ------------------------------------------------------------------------------------------------------------------
printv('\nGenerate output files...', param.verbose)
# generate src -> dest output files
fname_src2dest = os.path.join(path_out, file_out + ext_out)
generate_output_file(os.path.join(path_tmp, "src_reg.nii"), fname_src2dest, param.verbose)
if fname_output_warp == '':
fname_output_warp = os.path.join(path_out, 'warp_' + file_src + '2' + file_dest + '.nii.gz')
generate_output_file(os.path.join(path_tmp, "warp_src2dest.nii.gz"), fname_output_warp, param.verbose)
# generate dest -> sec output files
if generate_warpinv:
fname_dest2src = os.path.join(path_out, file_out_inv + ext_dest)
generate_output_file(os.path.join(path_tmp, "dest_reg.nii"), fname_dest2src, param.verbose)
fname_output_warpinv = os.path.join(path_out, 'warp_' + file_dest + '2' + file_src + '.nii.gz')
generate_output_file(os.path.join(path_tmp, "warp_dest2src.nii.gz"), fname_output_warpinv, param.verbose)
else:
# we skip generating files if there is no inverse warping field (i.e. we're doing a one-way registration)
fname_dest2src = None
fname_output_warpinv = None
# Delete temporary files
if param.remove_temp_files:
printv('\nRemove temporary files...', param.verbose)
rmtree(path_tmp, verbose=param.verbose)
return fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv
|
def register_wrapper(fname_src, fname_dest, param, paramregmulti, fname_src_seg='', fname_dest_seg='', fname_src_label='',
fname_dest_label='', fname_mask='', fname_initwarp='', fname_initwarpinv='', identity=False,
interp='linear', fname_output='', fname_output_warp='', path_out='', same_space=False):
"""
Wrapper for image registration.
:param fname_src:
:param fname_dest:
:param param: Class Param(): See definition in sct_register_multimodal
:param paramregmulti: Class ParamregMultiStep(): See definition in this file
:param fname_src_seg:
:param fname_dest_seg:
:param fname_src_label:
:param fname_dest_label:
:param fname_mask:
:param fname_initwarp: str: File name of initial transformation
:param fname_initwarpinv: str: File name of initial inverse transformation
:param identity:
:param interp:
:param fname_output:
:param fname_output_warp:
:param path_out:
:param same_space: Bool: Source and destination images are in the same physical space (i.e. same coordinates).
:return: fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv
"""
# TODO: move interp inside param.
# TODO: merge param inside paramregmulti by having a "global" sets of parameters that apply to all steps
# Extract path, file and extension
path_src, file_src, ext_src = extract_fname(fname_src)
path_dest, file_dest, ext_dest = extract_fname(fname_dest)
# check if source and destination images have the same name (related to issue #373)
# If so, change names to avoid conflict of result files and warns the user
suffix_src, suffix_dest = '_reg', '_reg'
if file_src == file_dest:
suffix_src, suffix_dest = '_src_reg', '_dest_reg'
# define output folder and file name
if fname_output == '':
path_out = '' if not path_out else path_out # output in user's current directory
file_out = file_src + suffix_src
file_out_inv = file_dest + suffix_dest
ext_out = ext_src
else:
path, file_out, ext_out = extract_fname(fname_output)
path_out = path if not path_out else path_out
file_out_inv = file_out + '_inv'
# create temporary folder
path_tmp = tmp_create(basename="register")
printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
Image(fname_src).save(os.path.join(path_tmp, "src.nii"))
Image(fname_dest).save(os.path.join(path_tmp, "dest.nii"))
if fname_src_seg:
Image(fname_src_seg).save(os.path.join(path_tmp, "src_seg.nii"))
if fname_dest_seg:
Image(fname_dest_seg).save(os.path.join(path_tmp, "dest_seg.nii"))
if fname_src_label:
Image(fname_src_label).save(os.path.join(path_tmp, "src_label.nii"))
Image(fname_dest_label).save(os.path.join(path_tmp, "dest_label.nii"))
if fname_mask != '':
Image(fname_mask).save(os.path.join(path_tmp, "mask.nii.gz"))
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# reorient destination to RPI
Image('dest.nii').change_orientation("RPI").save('dest_RPI.nii')
if fname_dest_seg:
Image('dest_seg.nii').change_orientation("RPI").save('dest_seg_RPI.nii')
if fname_dest_label:
Image('dest_label.nii').change_orientation("RPI").save('dest_label_RPI.nii')
if fname_mask:
# TODO: change output name
Image('mask.nii.gz').change_orientation("RPI").save('mask.nii.gz')
if identity:
# overwrite paramregmulti and only do one identity transformation
step0 = Paramreg(step='0', type='im', algo='syn', metric='MI', iter='0', shrink='1', smooth='0', gradStep='0.5')
paramregmulti = ParamregMultiStep([step0])
# initialize list of warping fields
warp_forward = []
warp_forward_winv = []
warp_inverse = []
warp_inverse_winv = []
generate_warpinv = 1
# initial warping is specified, update list of warping fields and skip step=0
if fname_initwarp:
printv('\nSkip step=0 and replace with initial transformations: ', param.verbose)
printv(' ' + fname_initwarp, param.verbose)
# copy(fname_initwarp, 'warp_forward_0.nii.gz')
warp_forward.append(fname_initwarp)
start_step = 1
if fname_initwarpinv:
warp_inverse.append(fname_initwarpinv)
else:
printv('\nWARNING: No initial inverse warping field was specified, therefore the registration will be '
'src->dest only, and the inverse warping field will NOT be generated.', param.verbose, 'warning')
generate_warpinv = 0
else:
if same_space:
start_step = 1
else:
start_step = 0
# loop across registration steps
for i_step in range(start_step, len(paramregmulti.steps)):
step = paramregmulti.steps[str(i_step)]
printv('\n--\nESTIMATE TRANSFORMATION FOR STEP #' + str(i_step), param.verbose)
# identify which is the src and dest
if step.type == 'im':
src = ['src.nii']
dest = ['dest_RPI.nii']
interp_step = ['spline']
elif step.type == 'seg':
src = ['src_seg.nii']
dest = ['dest_seg_RPI.nii']
interp_step = ['nn']
elif step.type == 'imseg':
src = ['src.nii', 'src_seg.nii']
dest = ['dest_RPI.nii', 'dest_seg_RPI.nii']
interp_step = ['spline', 'nn']
elif step.type == 'label':
src = ['src_label.nii']
dest = ['dest_label_RPI.nii']
interp_step = ['nn']
else:
printv('ERROR: Wrong image type: {}'.format(step.type), 1, 'error')
# if step>0, apply warp_forward_concat to the src image to be used
if (not same_space and i_step > 0) or (same_space and i_step > 1):
printv('\nApply transformation from previous step', param.verbose)
for ifile in range(len(src)):
sct_apply_transfo.main(argv=[
'-i', src[ifile],
'-d', dest[ifile],
'-o', add_suffix(src[ifile], '_reg'),
'-x', interp_step[ifile],
'-w'] + warp_forward
)
src[ifile] = add_suffix(src[ifile], '_reg')
# register src --> dest
warp_forward_out, warp_inverse_out = register(src=src, dest=dest, step=step, param=param)
# deal with transformations with "-" as prefix. They should be inverted with calling isct_ComposeMultiTransform.
if warp_forward_out[0] == "-":
warp_forward_out = warp_forward_out[1:]
warp_forward_winv.append(warp_forward_out)
if warp_inverse_out[0] == "-":
warp_inverse_out = warp_inverse_out[1:]
warp_inverse_winv.append(warp_inverse_out)
# update list of forward/inverse transformations
warp_forward.append(warp_forward_out)
warp_inverse.insert(0, warp_inverse_out)
# Concatenate transformations
printv('\nConcatenate transformations...', param.verbose)
# if a warping field needs to be inverted, remove it from warp_forward
warp_forward = [f for f in warp_forward if f not in warp_forward_winv]
dimensionality = len(Image("dest.nii").hdr.get_data_shape())
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_src2dest.nii.gz', '-R', 'dest.nii']
if warp_forward_winv:
cmd.append('-i')
cmd += reversed(warp_forward_winv)
if warp_forward:
cmd += reversed(warp_forward)
status, output = run_proc(cmd, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# if an inverse warping field needs to be inverted, remove it from warp_inverse_winv
warp_inverse = [f for f in warp_inverse if f not in warp_inverse_winv]
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_dest2src.nii.gz', '-R', 'src.nii']
dimensionality = len(Image("dest.nii").hdr.get_data_shape())
if warp_inverse_winv:
cmd.append('-i')
cmd += reversed(warp_inverse_winv)
if warp_inverse:
cmd += reversed(warp_inverse)
status, output = run_proc(cmd, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# TODO: make the following code optional (or move it to sct_register_multimodal)
# Apply warping field to src data
printv('\nApply transfo source --> dest...', param.verbose)
sct_apply_transfo.main(argv=[
'-i', 'src.nii',
'-d', 'dest.nii',
'-w', 'warp_src2dest.nii.gz',
'-o', 'src_reg.nii',
'-x', interp])
if generate_warpinv:
printv('\nApply transfo dest --> source...', param.verbose)
sct_apply_transfo.main(argv=[
'-i', 'dest.nii',
'-d', 'src.nii',
'-w', 'warp_dest2src.nii.gz',
'-o', 'dest_reg.nii',
'-x', interp])
# come back
os.chdir(curdir)
# Generate output files
# ------------------------------------------------------------------------------------------------------------------
printv('\nGenerate output files...', param.verbose)
# generate src -> dest output files
fname_src2dest = os.path.join(path_out, file_out + ext_out)
generate_output_file(os.path.join(path_tmp, "src_reg.nii"), fname_src2dest, param.verbose)
if fname_output_warp == '':
fname_output_warp = os.path.join(path_out, 'warp_' + file_src + '2' + file_dest + '.nii.gz')
generate_output_file(os.path.join(path_tmp, "warp_src2dest.nii.gz"), fname_output_warp, param.verbose)
# generate dest -> src output files
if generate_warpinv:
fname_dest2src = os.path.join(path_out, file_out_inv + ext_dest)
generate_output_file(os.path.join(path_tmp, "dest_reg.nii"), fname_dest2src, param.verbose)
fname_output_warpinv = os.path.join(path_out, 'warp_' + file_dest + '2' + file_src + '.nii.gz')
generate_output_file(os.path.join(path_tmp, "warp_dest2src.nii.gz"), fname_output_warpinv, param.verbose)
else:
# we skip generating files if there is no inverse warping field (i.e. we're doing a one-way registration)
fname_dest2src = None
fname_output_warpinv = None
# Delete temporary files
if param.remove_temp_files:
printv('\nRemove temporary files...', param.verbose)
rmtree(path_tmp, verbose=param.verbose)
return fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv
|
23,082 |
def merge_chunk(lhs, *args, **kwargs):
empty_index_dtype = kwargs.pop("empty_index_dtype", None)
categorical_columns = kwargs.pop("categorical_columns", None)
rhs, *args = args
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
if categorical_columns is not None and PANDAS_GT_100:
for col in categorical_columns:
left = None
right = None
if col in lhs:
left = lhs[col]
elif col == kwargs.get("right_on", None) and left_index:
if is_categorical_dtype(lhs.index):
left = lhs.index
if col in rhs:
right = rhs[col]
elif col == kwargs.get("left_on", None) and right_index:
if is_categorical_dtype(rhs.index):
right = rhs.index
dtype = "category"
if left is not None and right is not None:
dtype = union_categoricals(
[left.astype("category").values, right.astype("category").values]
).dtype
if left is not None:
if isinstance(left, pd.Index):
lhs.index = left.astype(dtype)
else:
lhs.assign(**{col: left.astype(dtype)})
if right is not None:
if isinstance(right, pd.Index):
rhs.index = right.astype(dtype)
else:
rhs.assign(**{col: right.astype(dtype)})
out = lhs.merge(rhs, *args, **kwargs)
# Workaround pandas bug where if the output result of a merge operation is
# an empty dataframe, the output index is `int64` in all cases, regardless
# of input dtypes.
if len(out) == 0 and empty_index_dtype is not None:
out.index = out.index.astype(empty_index_dtype)
return out
|
def merge_chunk(lhs, *args, **kwargs):
empty_index_dtype = kwargs.pop("empty_index_dtype", None)
categorical_columns = kwargs.pop("categorical_columns", None)
rhs, *args = args
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
if categorical_columns is not None and PANDAS_GT_100:
for col in categorical_columns:
left = None
right = None
if col in lhs:
left = lhs[col]
elif col == kwargs.get("right_on", None) and left_index:
if is_categorical_dtype(lhs.index):
left = lhs.index
if col in rhs:
right = rhs[col]
elif col == kwargs.get("left_on", None) and right_index:
if is_categorical_dtype(rhs.index):
right = rhs.index
dtype = "category"
if left is not None and right is not None:
dtype = union_categoricals(
[left.astype("category").values, right.astype("category").values]
).dtype
if left is not None:
if isinstance(left, pd.Index):
lhs.index = left.astype(dtype)
else:
lhs.assign(**{col: left.astype(dtype)})
if right is not None:
if isinstance(right, pd.Index):
rhs.index = right.astype(dtype)
else:
rhs = rhs.assign(**{col: right.astype(dtype)})
out = lhs.merge(rhs, *args, **kwargs)
# Workaround pandas bug where if the output result of a merge operation is
# an empty dataframe, the output index is `int64` in all cases, regardless
# of input dtypes.
if len(out) == 0 and empty_index_dtype is not None:
out.index = out.index.astype(empty_index_dtype)
return out
|
31,375 |
def pipeline_query_command(client: Client, collection: str, pipeline: str, limit: str = '50', offset: str = '0',
**kwargs) -> Tuple[str, dict, list]:
limit = int(limit)
offset = int(offset)
try:
json_pipeline = validate_json_objects(json.loads(pipeline))
raw_response = client.pipeline_query(
collection=collection,
pipeline=json_pipeline,
)
except JSONDecodeError:
raise DemistoException('The `pipeline` argument is not a valid json.')
if raw_response:
raw_response = raw_response if len(raw_response) <= limit else raw_response[offset:(offset + limit)]
readable_outputs = tableToMarkdown(
f'Total of {len(raw_response)} entries were found in MongoDB collection `{collection}` '
f'with pipeline: {pipeline}:',
t=[entry.get('_id') for entry in raw_response],
headers=['_id'],
)
outputs_objects = list()
for item in raw_response:
item.update({'collection': collection})
outputs_objects.append(item)
outputs = {CONTEXT_KEY: outputs_objects}
return readable_outputs, outputs, raw_response
else:
return 'MongoDB: No results found', {}, raw_response
|
def pipeline_query_command(client: Client, collection: str, pipeline: str, limit: str = '50', offset: str = '0',
**kwargs) -> Tuple[str, dict, list]:
limit = int(limit)
offset = int(offset)
try:
json_pipeline = validate_json_objects(json.loads(pipeline))
raw_response = client.pipeline_query(
collection=collection,
pipeline=json_pipeline,
)
except JSONDecodeError:
raise DemistoException('The `pipeline` argument is not a valid json.')
if raw_response:
raw_response = raw_response if len(raw_response) <= limit else raw_response[offset:(offset + limit)]
readable_outputs = tableToMarkdown(
f'Total of {len(raw_response)} entries were found in MongoDB collection `{collection}` '
f'with pipeline: {pipeline}:',
t=[entry.get('_id') for entry in raw_response],
headers=['_id'],
)
outputs = {CONTEXT_KEY: [item.update({'collection': collection}) for item in raw_response]}
return readable_outputs, outputs, raw_response
else:
return 'MongoDB: No results found', {}, raw_response
|
42,902 |
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by ``alpha`` and ``beta`` are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
24,702 |
def _declare_qos_parameteres(
entity_type: Union[Type[Publisher], Type[Subscription]],
node: 'Node',
topic_name: Text,
qos: QoSProfile,
options: QoSOverridingOptions
) -> QoSProfile:
"""
Declare qos parameters for a Publisher or a Subscription.
:param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`.
:param node: Node used to declare the parameters.
:param topic_name: Topic name of the entity being created.
:param qos: Default qos settings of the entity being created, that will be overriden
with the user provided qos parameter overrides.
:param options: Options that indicates which parameters are going to be declared.
"""
if not issubclass(entity_type, (Publisher, Subscription)):
raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription')
entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription
id_suffix = '' if options.entity_id is None else f'_{options.entity_id}'
name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}'
description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`'
allowed_policies = _get_allowed_policies(entity_type)
for policy in options.policy_kinds:
if policy not in allowed_policies:
continue
policy_name = policy.name.lower()
descriptor = ParameterDescriptor()
descriptor.description = description.format(policy_name)
descriptor.read_only = True
param = node.declare_parameter(
name.format(policy_name),
_get_qos_policy_parameter(qos, policy),
descriptor)
_override_qos_policy_with_param(qos, policy, param)
if options.callback is not None and not options.callback(qos):
raise InvalidQosOverridesError(
description.format('Provided qos overrides') + ', are not valid')
|
def _declare_qos_parameteres(
entity_type: Union[Type[Publisher], Type[Subscription]],
node: 'Node',
topic_name: Text,
qos: QoSProfile,
options: QoSOverridingOptions
) -> QoSProfile:
"""
Declare qos parameters for a Publisher or a Subscription.
:param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`.
:param node: Node used to declare the parameters.
:param topic_name: Topic name of the entity being created.
:param qos: Default qos settings of the entity being created, that will be overriden
with the user provided qos parameter overrides.
:param options: Options that indicates which parameters are going to be declared.
"""
if not issubclass(entity_type, (Publisher, Subscription)):
raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription')
entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription
id_suffix = '' if options.entity_id is None else f'_{options.entity_id}'
name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}'
description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`'
allowed_policies = _get_allowed_policies(entity_type)
for policy in options.policy_kinds:
if policy not in allowed_policies:
continue
policy_name = policy.name.lower()
descriptor = ParameterDescriptor()
descriptor.description = description.format(policy_name)
descriptor.read_only = True
param = node.declare_parameter(
name.format(policy_name),
_get_qos_policy_parameter(qos, policy),
descriptor)
_override_qos_policy_with_param(qos, policy, param)
if options.callback is not None and not options.callback(qos):
raise InvalidQosOverridesError(
description.format('Provided QoS overrides') + ', are not valid')
|
23,668 |
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""
Smooth out short-term model transience using the Prilliman model [1]_.
The Prilliman et al. model applies an exponential moving average to
the output of a steady-state cell temperature model to account for a
module's thermal inertia and smooth out the cell temperature's response
to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time. Data with irregular time steps should be resampled
prior to using this function.
Parameters
----------
temp_cell : pandas Series
Cell temperature modeled with steady-state assumptions [C]
wind_speed : pandas Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0–a_3 from [1]_
Returns
-------
temp_cell : pandas Series
Smoothed version of the input cell temperature [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
model for the steady-state input.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`
"""
# TODO: check inputs to ensure regular spacing?
time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds()
if time_step >= 1200:
# too coarsely sampled for smoothing to be relevant
return temp_cell
window = min(int(1200 / time_step), # time series > 20 minutes
len(temp_cell)) # time series < 20 minutes
# prefix with NaNs so that the rolling window is "full",
# even for the first actual value:
prefix = np.full(window, np.nan)
temp_cell_prefixed = np.append(prefix, temp_cell.values)
# get one row per 20-minute window
H = scipy.linalg.hankel(np.arange(window),
np.arange(window - 1, len(temp_cell_prefixed)))
subsets = temp_cell_prefixed[H].T
# calculate weights for the values in each window
if coefficients is not None:
a = coefficients
else:
# values from [1], Table II
a = [0.0046, 0.00046, -0.00023, -1.6e-5]
wind_speed = wind_speed.values
P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
timedeltas = np.arange(window, 0, -1) * time_step
weights = np.exp(-P[:, np.newaxis] * timedeltas)
# set weights corresponding to the prefix values to zero; otherwise the
# denominator of the weighted average below would be wrong
mask_idx = np.triu_indices(window)
np.fliplr(weights)[mask_idx] = 0
# change the first row of weights from zero to nan -- this is a
# trick to prevent div by zero warning when dividing by summed weights
weights[0, :] = np.nan
# finally, take the weighted average of each window
numerator = np.nansum(subsets[:-1] * weights, axis=1)
denominator = np.sum(weights, axis=1)
smoothed = numerator / denominator
smoothed[0] = temp_cell.values[0]
smoothed = pd.Series(smoothed, index=temp_cell.index)
return smoothed
|
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""
Smooth out short-term model transience using the Prilliman model [1]_.
The Prilliman et al. model applies an exponential moving average to
the output of a steady-state cell temperature model to account for a
module's thermal inertia and smooth out the cell temperature's response
to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time. Data with irregular time steps should be resampled
prior to using this function.
Parameters
----------
temp_cell : pandas Series with DatetimeIndex
Cell temperature modeled with steady-state assumptions [C]
wind_speed : pandas Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0–a_3 from [1]_
Returns
-------
temp_cell : pandas Series
Smoothed version of the input cell temperature [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
model for the steady-state input.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`
"""
# TODO: check inputs to ensure regular spacing?
time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds()
if time_step >= 1200:
# too coarsely sampled for smoothing to be relevant
return temp_cell
window = min(int(1200 / time_step), # time series > 20 minutes
len(temp_cell)) # time series < 20 minutes
# prefix with NaNs so that the rolling window is "full",
# even for the first actual value:
prefix = np.full(window, np.nan)
temp_cell_prefixed = np.append(prefix, temp_cell.values)
# get one row per 20-minute window
H = scipy.linalg.hankel(np.arange(window),
np.arange(window - 1, len(temp_cell_prefixed)))
subsets = temp_cell_prefixed[H].T
# calculate weights for the values in each window
if coefficients is not None:
a = coefficients
else:
# values from [1], Table II
a = [0.0046, 0.00046, -0.00023, -1.6e-5]
wind_speed = wind_speed.values
P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
timedeltas = np.arange(window, 0, -1) * time_step
weights = np.exp(-P[:, np.newaxis] * timedeltas)
# set weights corresponding to the prefix values to zero; otherwise the
# denominator of the weighted average below would be wrong
mask_idx = np.triu_indices(window)
np.fliplr(weights)[mask_idx] = 0
# change the first row of weights from zero to nan -- this is a
# trick to prevent div by zero warning when dividing by summed weights
weights[0, :] = np.nan
# finally, take the weighted average of each window
numerator = np.nansum(subsets[:-1] * weights, axis=1)
denominator = np.sum(weights, axis=1)
smoothed = numerator / denominator
smoothed[0] = temp_cell.values[0]
smoothed = pd.Series(smoothed, index=temp_cell.index)
return smoothed
|
52,228 |
def service_status(names=[]):
"""
Show status information about one or more services (all by default)
Keyword argument:
names -- Services name to show
"""
services = _get_services()
# If function was called with a specific list of service
if names != []:
# If user wanna check the status of a single service
if isinstance(names, str):
names = [names]
# Validate service names requested
for name in names:
if name not in services.keys():
raise YunohostError('service_unknown', service=name)
# Filter only requested servivces
services = {k: v for k, v in services.items() if k in names}
result = {}
for name, infos in services.items():
# this "service" isn't a service actually so we skip it
#
# the historical reason is because regenconf has been hacked into the
# service part of YunoHost will in some situation we need to regenconf
# for things that aren't services
# the hack was to add fake services...
# we need to extract regenconf from service at some point, also because
# some app would really like to use it
if infos.get("status", "") is None:
continue
systemd_service = infos.get("actual_systemd_service", name)
status = _get_service_information_from_systemd(systemd_service)
if status is None:
logger.error("Failed to get status information via dbus for service %s, systemctl didn't recognize this service ('NoSuchUnit')." % systemd_service)
result[name] = {
'status': "unknown",
'start_on_boot': "unknown",
'last_state_change': "unknown",
'description': "Error: failed to get information for this service, it doesn't exists for systemd",
'configuration': "unknown",
}
else:
translation_key = "service_description_%s" % name
if "description" in infos is not None:
description = infos.get("description")
else:
description = m18n.n(translation_key)
# that mean that we don't have a translation for this string
# that's the only way to test for that for now
# if we don't have it, uses the one provided by systemd
if description == translation_key:
description = str(status.get("Description", ""))
result[name] = {
'status': str(status.get("SubState", "unknown")),
'start_on_boot': str(status.get("UnitFileState", "unknown")),
'last_state_change': "unknown",
'description': description,
'configuration': "unknown",
}
# Fun stuff™ : to obtain the enabled/disabled status for sysv services,
# gotta do this ... cf code of /lib/systemd/systemd-sysv-install
if result[name]["start_on_boot"] == "generated":
result[name]["start_on_boot"] = "enabled" if glob("/etc/rc[S5].d/S??" + name) else "disabled"
elif os.path.exists("/etc/systemd/system/multi-user.target.wants/%s.service" % name):
result[name]["start_on_boot"] = "enabled"
if "StateChangeTimestamp" in status:
result[name]['last_state_change'] = datetime.utcfromtimestamp(status["StateChangeTimestamp"] / 1000000)
# 'test_status' is an optional field to test the status of the service using a custom command
if "test_status" in infos:
p = subprocess.Popen(infos["test_status"],
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
result[name]["status"] = "running" if p.returncode == 0 else "failed"
# 'test_status' is an optional field to test the status of the service using a custom command
if "test_conf" in infos:
p = subprocess.Popen(infos["test_conf"],
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = p.communicate()
if p.returncode == 0:
result[name]["configuration"] = "valid"
else:
result[name]["configuration"] = "broken"
result[name]["configuration-details"] = out.strip().split("\n")
if len(names) == 1:
return result[names[0]]
return result
|
def service_status(names=[]):
"""
Show status information about one or more services (all by default)
Keyword argument:
names -- Services name to show
"""
services = _get_services()
# If function was called with a specific list of service
if names != []:
# If user wanna check the status of a single service
if isinstance(names, str):
names = [names]
# Validate service names requested
for name in names:
if name not in services.keys():
raise YunohostError('service_unknown', service=name)
# Filter only requested servivces
services = {k: v for k, v in services.items() if k in names}
result = {}
for name, infos in services.items():
# this "service" isn't a service actually so we skip it
#
# the historical reason is because regenconf has been hacked into the
# service part of YunoHost will in some situation we need to regenconf
# for things that aren't services
# the hack was to add fake services...
# we need to extract regenconf from service at some point, also because
# some app would really like to use it
if infos.get("status", "") is None:
continue
systemd_service = infos.get("actual_systemd_service", name)
status = _get_service_information_from_systemd(systemd_service)
if status is None:
logger.error("Failed to get status information via dbus for service %s, systemctl didn't recognize this service ('NoSuchUnit')." % systemd_service)
result[name] = {
'status': "unknown",
'start_on_boot': "unknown",
'last_state_change': "unknown",
'description': "Error: failed to get information for this service, it doesn't exists for systemd",
'configuration': "unknown",
}
else:
translation_key = "service_description_%s" % name
description = infos.get("description")
if not description:
description = m18n.n(translation_key)
# that mean that we don't have a translation for this string
# that's the only way to test for that for now
# if we don't have it, uses the one provided by systemd
if description == translation_key:
description = str(status.get("Description", ""))
result[name] = {
'status': str(status.get("SubState", "unknown")),
'start_on_boot': str(status.get("UnitFileState", "unknown")),
'last_state_change': "unknown",
'description': description,
'configuration': "unknown",
}
# Fun stuff™ : to obtain the enabled/disabled status for sysv services,
# gotta do this ... cf code of /lib/systemd/systemd-sysv-install
if result[name]["start_on_boot"] == "generated":
result[name]["start_on_boot"] = "enabled" if glob("/etc/rc[S5].d/S??" + name) else "disabled"
elif os.path.exists("/etc/systemd/system/multi-user.target.wants/%s.service" % name):
result[name]["start_on_boot"] = "enabled"
if "StateChangeTimestamp" in status:
result[name]['last_state_change'] = datetime.utcfromtimestamp(status["StateChangeTimestamp"] / 1000000)
# 'test_status' is an optional field to test the status of the service using a custom command
if "test_status" in infos:
p = subprocess.Popen(infos["test_status"],
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
result[name]["status"] = "running" if p.returncode == 0 else "failed"
# 'test_status' is an optional field to test the status of the service using a custom command
if "test_conf" in infos:
p = subprocess.Popen(infos["test_conf"],
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = p.communicate()
if p.returncode == 0:
result[name]["configuration"] = "valid"
else:
result[name]["configuration"] = "broken"
result[name]["configuration-details"] = out.strip().split("\n")
if len(names) == 1:
return result[names[0]]
return result
|
42,794 |
def _flag_missing_timestamps(
df: pd.DataFrame,
frequency: str,
column_name: str,
first_time_stamp: pd.Timestamp,
last_time_stamp: pd.Timestamp,
) -> namedtuple:
"""
Utility function to test if input data frame is missing any timestamps relative to expected timestamps
generated based on the first_time_stamp, last_time_stamp and frequency.
:param pd.DataFrame df: data frame which needs to be tested for missing timestamps
:param str frequency: frequency i.e. sampling frequency of the data, expressed in seconds. A list of acceptable
frequency strings are available here
(https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)
:param str column_name: name of the column which has time series if not the index.
:param pd.Timestamp first_time_stamp: timestamp at which the time_series is expected to start from.
:param pd.Timestamp last_time_stamp: timestamp at which the time_series is expected to end with.
:return: namedtuple with 3 attributes namely flag, raw_data and new_index
1. flag - boolean set to True if there are missing timestamps, else set to False
2. raw_data - input data frame as is without any modifications
3. new_index - pd.DateTimeIndex that can be used to set the new index, defaults to None, assigned a value only
when flag is set to True
:rtype: namedtuple
"""
# Declare a named tuple to hold results
MissingTimeStampFlag = namedtuple('MissingTimeStampFlag', ['flag', 'raw_data', 'new_index'])
result = {
'flag': None,
'raw_data': df.copy(deep=True),
'new_index': None
}
# Generate expected timestamps
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, frequency=frequency)
# Get actual timestamps
if column_name:
df.set_index(column_name, inplace=True)
df.sort_index(inplace=True)
actual_timestamps = df.index.values
# Check if they are the same
comparison_index = expected_timestamps.difference(actual_timestamps)
if comparison_index.empty:
result['flag'] = True
result['new_index'] = expected_timestamps
else:
result['flag'] = False
# Return the result as a Named Tuple
return MissingTimeStampFlag._make(result)
|
def _flag_missing_timestamps(
df: pd.DataFrame,
frequency: str,
column_name: str,
first_time_stamp: pd.Timestamp,
last_time_stamp: pd.Timestamp,
) -> namedtuple:
"""
Utility function to test if input data frame is missing any timestamps relative to expected timestamps
generated based on the first_time_stamp, last_time_stamp and frequency.
:param pd.DataFrame df: data frame which needs to be tested for missing timestamps
:param str frequency: frequency i.e. sampling frequency of the data, expressed in seconds. A list of acceptable
frequency strings are available here
(https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)
:param str column_name: name of the column which has time series if not the index.
:param pd.Timestamp first_time_stamp: timestamp at which the time_series is expected to start from.
:param pd.Timestamp last_time_stamp: timestamp at which the time_series is expected to end with.
:return: namedtuple with 3 attributes namely flag, raw_data and new_index
1. flag - boolean set to True if there are missing timestamps, else set to False
2. raw_data - input data frame as is without any modifications
3. new_index - pd.DateTimeIndex that can be used to set the new index, defaults to None, assigned a value only
when flag is set to True
:rtype: namedtuple
"""
# Declare a named tuple to hold results
MissingTimeStampFlag = namedtuple('MissingTimeStampFlag', ['flag', 'raw_data', 'new_index'])
result = {
'flag': None,
'raw_data': df.copy(deep=True),
'new_index': None
}
# Generate expected timestamps
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, frequency=frequency)
# Get actual timestamps
if column_name:
df.set_index(column_name, inplace=True)
df = df.sort_index()
actual_timestamps = df.index.values
# Check if they are the same
comparison_index = expected_timestamps.difference(actual_timestamps)
if comparison_index.empty:
result['flag'] = True
result['new_index'] = expected_timestamps
else:
result['flag'] = False
# Return the result as a Named Tuple
return MissingTimeStampFlag._make(result)
|
55,081 |
def _extract_su2su2_prefactors(U, V):
"""U, V are SU(4) matrices for which there exists A, B, C, D such that
(A \otimes B) V (C \otimes D) = U. The problem is to find A, B, C, D in SU(2)
in an analytic and fully differentiable manner.
This decomposition is possible when U and V are in the same double coset of
SU(4), meaning there exists G, H in SO(4) s.t. G (Edag V E) H = (Edag U
E). This is guaranteed here by how V was constructed using the
_select_rotation_angles method. Then, we can use the fact that E SO(4) Edag
gives us something in SU(2) x SU(2) to give A, B, C, D.
"""
# A lot of the work here happens in the magic basis. Essentially, we
# don't look explicitly at some U = G V H, but rather at
# E^\dagger U E = G E^\dagger V E H
# so that we can recover
# U = (E G E^\dagger) V (E H E^\dagger) = (A \otimes B) V (C \otimes D).
# There is some math in the paper explaining how when we define U in this way,
# we can simultaneously diagonalize functions of U and V to ensure they are
# in the same coset and recover the decomposition.
u = qml.math.linalg.multi_dot([Edag, U, E])
v = qml.math.linalg.multi_dot([Edag, V, E])
uuT = qml.math.dot(u, qml.math.T(u))
vvT = qml.math.dot(v, qml.math.T(v))
# First, we find a matrix p (hopefully) in SO(4) s.t. p^T u u^T p is diagonal.
# Since uuT is complex and symmetric, both its real / imag parts share a set
# of real-valued eigenvectors.
ev_p, p = qml.math.linalg.eig(uuT.real)
# We also do this for v, i.e., find q (hopefully) in SO(4) s.t. q^T v v^T q is diagonal.
ev_q, q = qml.math.linalg.eig(vvT.real)
# If determinant of p is not 1, it is in O(4) but not SO(4), and has
# determinant -1. We can transform it to SO(4) by simply negating one
# of the columns.
if not qml.math.isclose(qml.math.linalg.det(p), 1.0):
p[:, -1] = -p[:, -1]
# Next, we are going to reorder the columns of q so that the order of the
# eigenvalues matches those of p.
p_product = qml.math.linalg.multi_dot([qml.math.T(p), uuT, p])
q_product = qml.math.linalg.multi_dot([qml.math.T(q), vvT, q])
p_diag = qml.math.diag(p_product)
q_diag = qml.math.diag(q_product)
new_q_order = []
for idx, eigval in enumerate(p_diag):
are_close = [qml.math.isclose(x, eigval) for x in q_diag]
if any(are_close):
new_q_order.append(qml.math.argmax(are_close))
# Get the permutation matrix needed to reshuffle the columns
q_perm = _perm_matrix_from_sequence(new_q_order)
q = qml.math.linalg.multi_dot([q, qml.math.T(q_perm)])
# Depending on the sign of the permutation, it may be that q is in O(4) but
# not SO(4). Again we can fix this by simply negating a column.
q_in_so4 = qml.math.isclose(qml.math.linalg.det(q), 1.0)
if not q_in_so4:
q[:, -1] = -q[:, -1]
# Now, we should have p, q in SO(4) such that p^T u u^T p = q^T v v^T q.
# Then (v^\dag q p^T u)(v^\dag q p^T u)^T = I.
# So we can set G = p q^T, H = v^\dag q p^T u to obtain G v H = u.
G = qml.math.dot(p, qml.math.T(q))
H = qml.math.linalg.multi_dot([qml.math.conj(qml.math.T(v)), q, qml.math.T(p), u])
# These are still in SO(4) though - we want to convert things into SU(2) x SU(2)
# so use the entangler. Since u = E^\dagger U E and v = E^\dagger V E where U, V
# are the target matrices, we can reshuffle as in the docstring above,
# U = (E G E^\dagger) V (E H E^\dagger) = (A \otimes B) V (C \otimes D)
# where A, B, C, D are in SU(2) x SU(2).
AB = qml.math.linalg.multi_dot([E, G, Edag])
CD = qml.math.linalg.multi_dot([E, H, Edag])
# Now, we just need to extract the constituent tensor products.
A, B = _su2su2_to_tensor_products(AB)
C, D = _su2su2_to_tensor_products(CD)
return A, B, C, D
|
def _extract_su2su2_prefactors(U, V):
"""U, V are SU(4) matrices for which there exists A, B, C, D such that
(A \otimes B) V (C \otimes D) = U. The problem is to find A, B, C, D in SU(2)
in an analytic and fully differentiable manner.
This decomposition is possible when U and V are in the same double coset of
SU(4), meaning there exists G, H in SO(4) s.t. G (Edag V E) H = (Edag U
E). This is guaranteed here by how V was constructed using the
_select_rotation_angles method. Then, we can use the fact that E SO(4) Edag
gives us something in SU(2) x SU(2) to give A, B, C, D.
"""
# A lot of the work here happens in the magic basis. Essentially, we
# don't look explicitly at some U = G V H, but rather at
# E^\dagger U E = G E^\dagger V E H
# so that we can recover
# U = (E G E^\dagger) V (E H E^\dagger) = (A \otimes B) V (C \otimes D).
# There is some math in the paper explaining how when we define U in this way,
# we can simultaneously diagonalize functions of U and V to ensure they are
# in the same coset and recover the decomposition.
u = qml.math.linalg.multi_dot([Edag, U, E])
v = qml.math.linalg.multi_dot([Edag, V, E])
uuT = qml.math.dot(u, qml.math.T(u))
vvT = qml.math.dot(v, qml.math.T(v))
# First, we find a matrix p (hopefully) in SO(4) s.t. p^T u u^T p is diagonal.
# Since uuT is complex and symmetric, both its real / imag parts share a set
# of real-valued eigenvectors.
_, p = qml.math.linalg.eig(qml.math.real(uuT))
# We also do this for v, i.e., find q (hopefully) in SO(4) s.t. q^T v v^T q is diagonal.
_, q = qml.math.linalg.eig(qml.math.real(vvT))
# If determinant of p is not 1, it is in O(4) but not SO(4), and has
# determinant -1. We can transform it to SO(4) by simply negating one
# of the columns.
if not qml.math.isclose(qml.math.linalg.det(p), 1.0):
p[:, -1] = -p[:, -1]
# Next, we are going to reorder the columns of q so that the order of the
# eigenvalues matches those of p.
p_product = qml.math.linalg.multi_dot([qml.math.T(p), uuT, p])
q_product = qml.math.linalg.multi_dot([qml.math.T(q), vvT, q])
p_diag = qml.math.diag(p_product)
q_diag = qml.math.diag(q_product)
new_q_order = []
for idx, eigval in enumerate(p_diag):
are_close = [qml.math.isclose(x, eigval) for x in q_diag]
if any(are_close):
new_q_order.append(qml.math.argmax(are_close))
# Get the permutation matrix needed to reshuffle the columns
q_perm = _perm_matrix_from_sequence(new_q_order)
q = qml.math.linalg.multi_dot([q, qml.math.T(q_perm)])
# Depending on the sign of the permutation, it may be that q is in O(4) but
# not SO(4). Again we can fix this by simply negating a column.
q_in_so4 = qml.math.isclose(qml.math.linalg.det(q), 1.0)
if not q_in_so4:
q[:, -1] = -q[:, -1]
# Now, we should have p, q in SO(4) such that p^T u u^T p = q^T v v^T q.
# Then (v^\dag q p^T u)(v^\dag q p^T u)^T = I.
# So we can set G = p q^T, H = v^\dag q p^T u to obtain G v H = u.
G = qml.math.dot(p, qml.math.T(q))
H = qml.math.linalg.multi_dot([qml.math.conj(qml.math.T(v)), q, qml.math.T(p), u])
# These are still in SO(4) though - we want to convert things into SU(2) x SU(2)
# so use the entangler. Since u = E^\dagger U E and v = E^\dagger V E where U, V
# are the target matrices, we can reshuffle as in the docstring above,
# U = (E G E^\dagger) V (E H E^\dagger) = (A \otimes B) V (C \otimes D)
# where A, B, C, D are in SU(2) x SU(2).
AB = qml.math.linalg.multi_dot([E, G, Edag])
CD = qml.math.linalg.multi_dot([E, H, Edag])
# Now, we just need to extract the constituent tensor products.
A, B = _su2su2_to_tensor_products(AB)
C, D = _su2su2_to_tensor_products(CD)
return A, B, C, D
|
54,879 |
def mixed_dtype_fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):
# args = _cast_if_autocast_enabled(input, weight, bias, normalized_shape, eps)
args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormAffineMixedDtypesFunction.apply(*args)
|
def mixed_dtype_fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormAffineMixedDtypesFunction.apply(*args)
|
51,531 |
def mesh_join_np(verts, edges, pols, out_np):
lens = [0]
for v in verts:
lens.append(lens[-1] + v.shape[0])
v_out = np.concatenate(verts)
e_out, p_out = np.array([]), np.array([])
if len(edges[0]) > 0:
e_out = np.concatenate([edg + l for edg, l in zip(edges, lens)])
if len(pols[0]) > 0 and out_np[2]:
if pols[0].dtype == object:
p_out = [np.array(p) + l for pol, l in zip(pols, lens) for p in pol]
else:
p_out = np.concatenate([pol + l for pol, l in zip(pols, lens)])
else:
p_out = [[v + l for v in p] for pol, l in zip(pols, lens) for p in pol]
return v_out, e_out, p_out
|
def mesh_join_np(verts, edges, pols, out_np):
accum_vert_lens = np.add.accumulate([len(v) for v in chain([[]], verts)])
is_edges_first_object = len(edges[0]) > 0
if is_edges_first_object:
e_out = np.concatenate([edg + l for edg, l in zip(edges, accum_vert_lens)])
# is it really edge??
else:
e_out = np.array([], np.int32)
is_pols_first_object = len(pols[0]) > 0
is_something_else = out_np[2]
if is_pols_first_object and is_something_else:
is_array_of_lists = pols[0].dtype == object
if is_array_of_lists:
p_out = [np.array(p) + l for pol, l in zip(pols, accum_vert_lens) for p in pol]
else:
p_out = np.concatenate([pol + l for pol, l in zip(pols, accum_vert_lens)])
else:
p_out = [[v + l for v in p] for pol, l in zip(pols, accum_vert_lens) for p in pol]
v_out = np.concatenate(verts)
return v_out, e_out, p_out
|
19,917 |
def test_maintains_rev_quoting_style(tmpdir, out_of_date, store):
fmt = (
'repos:\n'
'- repo: {0}\n'
' rev: "{1}"\n'
' hooks:\n'
' - id: foo\n'
'- repo: {0}\n'
' rev: \'{1}\'\n'
' hooks:\n'
' - id: foo\n'
)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(fmt.format(out_of_date.path, out_of_date.original_rev))
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
expected = fmt.format(out_of_date.path, out_of_date.head_rev)
assert cfg.read() == expected
|
def test_maintains_rev_quoting_style(tmpdir, out_of_date, store):
fmt = (
'repos:\n'
'- repo: {0}\n'
' rev: "{1}"\n'
' hooks:\n'
' - id: foo\n'
'- repo: {0}\n'
" rev: '{1}'\n"
' hooks:\n'
' - id: foo\n'
)
cfg = tmpdir.join(C.CONFIG_FILE)
cfg.write(fmt.format(out_of_date.path, out_of_date.original_rev))
assert autoupdate(str(cfg), store, freeze=False, tags_only=False) == 0
expected = fmt.format(out_of_date.path, out_of_date.head_rev)
assert cfg.read() == expected
|
32,412 |
def generate_dbotscore(response: Dict) -> List:
"""Creates CommandResult object based on the contents of 'response' argument
and provides DBotScore objects.
Parameters
----------
response : dict
Object returned by ANYRUN API call in 'get_report' function.
Returns
-------
List
A list of CommandResults objects.
"""
data = response.get('data', {})
analysis = data.get('analysis', {})
main_object = analysis.get('content', {}).get('mainObject', {})
submission_type = main_object.get('type')
submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type
threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold()
reputation_map = {
"shared": Common.DBotScore.NONE,
"unknown": Common.DBotScore.NONE,
"whitelisted": Common.DBotScore.GOOD,
"malicious": Common.DBotScore.BAD,
"suspicious": Common.DBotScore.SUSPICIOUS
}
returned_data = []
main_entity = None
main_entity_type = None
# Add the hash or URL first
if submission_type == 'hash':
hashes = main_object.get('hashes', {})
info = main_object.get('info', {})
file_type = info.get('file')
exif = info.get('exif', {})
main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5')
main_entity_type = FeedIndicatorType.File
dbot_score = Common.DBotScore(
indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'),
indicator_type=DBotScoreType.FILE,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
returned_data.append(CommandResults(
indicator=Common.File(
dbot_score=dbot_score,
md5=hashes.get('md5'),
sha1=hashes.get('sha1'),
sha256=hashes.get('sha256'),
file_type=file_type,
associated_file_names=exif.get('OriginalFileName')
)
))
else:
main_entity = main_object.get('url')
main_entity_type = FeedIndicatorType.URL
url_outputs = {
'Data': main_object.get('url')
}
dbot_score = Common.DBotScore(
indicator=main_object.get('url'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
if dbot_score.score >= 2:
url_outputs['Malicious'] = {
'Vendor': 'ANYRUN',
'Description': threat_text
}
returned_data.append(CommandResults(
outputs_prefix='URL',
outputs_key_field=['Data'],
outputs=url_outputs,
indicator=Common.URL(
url=main_object.get('url'),
dbot_score=dbot_score,
)
))
# Check if network information is available in the report
if 'network' in data:
network_data = data.get('network')
# Then add all the network-related indicators - 'connections'
if 'connections' in network_data:
connections = network_data.get('connections')
for current_connection in connections:
reputation = current_connection.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_connection.get('IP'),
indicator_type=DBotScoreType.IP,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_connection.get('IP'),
entity_b_type=FeedIndicatorType.IP,
brand="ANYRUN"
)]
ip_indicator = Common.IP(
ip=current_connection.get('IP'),
asn=current_connection.get('ASN'),
port=current_connection.get('Port'),
geo_country=current_connection.get('Country'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_connection.get('IP') not in [
x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_connection.get('IP')}",
[{
"Description": f"This IP was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=ip_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'dnsRequests'
if 'dnsRequests' in network_data:
for current_dnsRequests in network_data.get('dnsRequests'):
reputation = current_dnsRequests.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_dnsRequests.get('Domain'),
indicator_type=DBotScoreType.DOMAIN,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_dnsRequests.get('Domain'),
entity_b_type=FeedIndicatorType.Domain,
brand="ANYRUN"
)]
if "IP" in current_dnsRequests:
for ip in current_dnsRequests.get('IP', []):
relationships.append(
EntityRelationship(
name=EntityRelationship.Relationships.RESOLVES_TO,
entity_a=current_dnsRequests.get('Domain'),
entity_a_type=FeedIndicatorType.Domain,
entity_b=ip,
entity_b_type=FeedIndicatorType.IP
)
)
domain_ip_dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name="ANYRUN",
score=Common.DBotScore.NONE
)
domain_ip_indicator = Common.IP(
ip=ip,
dbot_score=domain_ip_dbot_score
)
returned_data.append(CommandResults(
indicator=domain_ip_indicator,
readable_output=tableToMarkdown(
f"{ip}",
[{
"Description": f"This IP was resovled from {current_dnsRequests.get('Domain')}"
}]
)
))
domain_indicator = Common.Domain(
domain=current_dnsRequests.get('Domain'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_dnsRequests.get('Domain') not in [
x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_dnsRequests.get('Domain')}",
[{
"Description": f"This domain was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=domain_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'httpRequests'
if 'httpRequests' in network_data:
for current_httpRequests in network_data.get('httpRequests'):
reputation = current_httpRequests['Reputation']
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_httpRequests.get('URL'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_httpRequests.get('URL'),
entity_b_type=FeedIndicatorType.URL,
brand="ANYRUN"
)]
url_indicator = Common.URL(
url=current_httpRequests.get('URL'),
geo_country=current_httpRequests.get('Country'),
port=current_httpRequests.get('Port'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_httpRequests.get('URL') not in [
x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_httpRequests.get('URL')}",
[{
"Description": f"This URL was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=url_indicator,
relationships=relationships
))
if 'mitre' in data:
mitre_data = data.get('mitre')
for item in mitre_data:
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.RELATED_TO,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=item.get('name'),
entity_b_type='Attack Pattern'
)]
attack_indicator = Common.AttackPattern(
stix_id=None,
value=item.get('name'),
mitre_id=item.get('id')
)
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{item.get('name')}",
[{
"Description": f"This Attack Pattern was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=attack_indicator,
relationships=relationships
))
return returned_data
|
def generate_dbotscore(response: Dict) -> List:
"""Creates CommandResult object based on the contents of 'response' argument
and provides DBotScore objects.
Parameters
----------
response : dict
Object returned by ANYRUN API call in 'get_report' function.
Returns
-------
List
A list of CommandResults objects.
"""
data = response.get('data', {})
analysis = data.get('analysis', {})
main_object = analysis.get('content', {}).get('mainObject', {})
submission_type = main_object.get('type')
submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type
threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold()
reputation_map = {
"shared": Common.DBotScore.NONE,
"unknown": Common.DBotScore.NONE,
"whitelisted": Common.DBotScore.GOOD,
"malicious": Common.DBotScore.BAD,
"suspicious": Common.DBotScore.SUSPICIOUS
}
returned_data = []
main_entity = None
main_entity_type = None
# Add the hash or URL first
if submission_type == 'hash':
hashes = main_object.get('hashes', {})
info = main_object.get('info', {})
file_type = info.get('file')
exif = info.get('exif', {})
main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5')
main_entity_type = FeedIndicatorType.File
dbot_score = Common.DBotScore(
indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'),
indicator_type=DBotScoreType.FILE,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
returned_data.append(CommandResults(
indicator=Common.File(
dbot_score=dbot_score,
md5=hashes.get('md5'),
sha1=hashes.get('sha1'),
sha256=hashes.get('sha256'),
file_type=file_type,
associated_file_names=exif.get('OriginalFileName')
)
))
else:
main_entity = main_object.get('url')
main_entity_type = FeedIndicatorType.URL
url_outputs = {
'Data': main_object.get('url')
}
dbot_score = Common.DBotScore(
indicator=main_object.get('url'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
if dbot_score.score >= 2:
url_outputs['Malicious'] = {
'Vendor': 'ANYRUN',
'Description': threat_text
}
returned_data.append(CommandResults(
outputs_prefix='URL',
outputs_key_field=['Data'],
outputs=url_outputs,
indicator=Common.URL(
url=main_object.get('url'),
dbot_score=dbot_score,
)
))
# Check if network information is available in the report
if 'network' in data:
network_data = data.get('network')
# Then add all the network-related indicators - 'connections'
if 'connections' in network_data:
connections = network_data.get('connections')
for current_connection in connections:
reputation = current_connection.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_connection.get('IP'),
indicator_type=DBotScoreType.IP,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_connection.get('IP'),
entity_b_type=FeedIndicatorType.IP,
brand="ANYRUN"
)]
ip_indicator = Common.IP(
ip=current_connection.get('IP'),
asn=current_connection.get('ASN'),
port=current_connection.get('Port'),
geo_country=current_connection.get('Country'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_connection.get('IP') not in [
x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_connection.get('IP')}",
[{
"Description": f"This IP was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=ip_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'dnsRequests'
if 'dnsRequests' in network_data:
for current_dnsRequests in network_data.get('dnsRequests'):
reputation = current_dnsRequests.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_dnsRequests.get('Domain'),
indicator_type=DBotScoreType.DOMAIN,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.HOSTS,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_dnsRequests.get('Domain'),
entity_b_type=FeedIndicatorType.Domain,
brand="ANYRUN"
)]
if "IP" in current_dnsRequests:
for ip in current_dnsRequests.get('IP', []):
relationships.append(
EntityRelationship(
name=EntityRelationship.Relationships.RESOLVES_TO,
entity_a=current_dnsRequests.get('Domain'),
entity_a_type=FeedIndicatorType.Domain,
entity_b=ip,
entity_b_type=FeedIndicatorType.IP
)
)
domain_ip_dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name="ANYRUN",
score=Common.DBotScore.NONE
)
domain_ip_indicator = Common.IP(
ip=ip,
dbot_score=domain_ip_dbot_score
)
returned_data.append(CommandResults(
indicator=domain_ip_indicator,
readable_output=tableToMarkdown(
f"{ip}",
[{
"Description": f"This IP was resovled from {current_dnsRequests.get('Domain')}"
}]
)
))
domain_indicator = Common.Domain(
domain=current_dnsRequests.get('Domain'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_dnsRequests.get('Domain') not in [
x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_dnsRequests.get('Domain')}",
[{
"Description": f"This domain was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=domain_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'httpRequests'
if 'httpRequests' in network_data:
for current_httpRequests in network_data.get('httpRequests'):
reputation = current_httpRequests['Reputation']
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_httpRequests.get('URL'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_httpRequests.get('URL'),
entity_b_type=FeedIndicatorType.URL,
brand="ANYRUN"
)]
url_indicator = Common.URL(
url=current_httpRequests.get('URL'),
geo_country=current_httpRequests.get('Country'),
port=current_httpRequests.get('Port'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_httpRequests.get('URL') not in [
x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_httpRequests.get('URL')}",
[{
"Description": f"This URL was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=url_indicator,
relationships=relationships
))
if 'mitre' in data:
mitre_data = data.get('mitre')
for item in mitre_data:
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.RELATED_TO,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=item.get('name'),
entity_b_type='Attack Pattern'
)]
attack_indicator = Common.AttackPattern(
stix_id=None,
value=item.get('name'),
mitre_id=item.get('id')
)
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{item.get('name')}",
[{
"Description": f"This Attack Pattern was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=attack_indicator,
relationships=relationships
))
return returned_data
|
1,959 |
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape (n)
Array to be orthogonalized
W : ndarray of shape (p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
|
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape (n,)
Array to be orthogonalized
W : ndarray of shape (p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
|
5,338 |
def validate(config):
_config = {}
list(map(_config.update, config))
if isinstance(_config["salt_fun"], str):
# a simple str is taking as the single function with no args / kwargs
fun = _config["salt_fun"]
if fun not in __salt__:
return False, "{} not in __salt__".format(fun)
else:
for entry in _config["salt_fun"]:
if isinstance(entry, dict):
# check dict is of correct form
fun, args_kwargs_dict = list(entry.items())[0]
for key in args_kwargs_dict.keys():
if key == "args":
if not isinstance(args_kwargs_dict[key], list):
return (
False,
"args key for fun {} must be list".format(fun),
)
elif key == "kwargs":
if not isinstance(args_kwargs_dict[key], list):
return (
False,
"kwargs key for fun {} must be list of key value pairs".format(
fun
),
)
for key_value in args_kwargs_dict[key]:
if not isinstance(key_value, dict):
return (
False,
"{} is not a key / value pair".format(key_value),
)
else:
return (
False,
"key {} not allowed under fun {}".format(key, fun),
)
else:
# entry must be function itself
fun = entry
if fun not in __salt__:
return False, "{} not in __salt__".format(fun)
return True, "valid config"
|
def validate(config):
_config = {}
list(map(_config.update, config))
if isinstance(_config["salt_fun"], str):
# a simple str is taking as the single function with no args / kwargs
fun = _config["salt_fun"]
if fun not in __salt__:
return False, "{} not in __salt__".format(fun)
else:
for entry in _config["salt_fun"]:
if isinstance(entry, dict):
# check dict is of correct form
fun, args_kwargs_dict = list(entry.items())[0]
for key in args_kwargs_dict:
if key == "args":
if not isinstance(args_kwargs_dict[key], list):
return (
False,
"args key for fun {} must be list".format(fun),
)
elif key == "kwargs":
if not isinstance(args_kwargs_dict[key], list):
return (
False,
"kwargs key for fun {} must be list of key value pairs".format(
fun
),
)
for key_value in args_kwargs_dict[key]:
if not isinstance(key_value, dict):
return (
False,
"{} is not a key / value pair".format(key_value),
)
else:
return (
False,
"key {} not allowed under fun {}".format(key, fun),
)
else:
# entry must be function itself
fun = entry
if fun not in __salt__:
return False, "{} not in __salt__".format(fun)
return True, "valid config"
|
3,908 |
def check_planarity(G, counterexample=False):
"""Check if a graph is planar and return a Planar Embedding if true,
a counterexample otherwise.
A graph is planar iff it can be drawn in a plane without
any edge intersections.
Parameters
----------
G : NetworkX graph
counterexample : bool
A Kuratowski subgraph (to proof non planarity) is only returned if set
to true.
Returns
-------
(is_planar, certificate) : (bool, NetworkX graph) tuple
is_planar is true if the graph is planar.
If the graph is planar `certificate` is a PlanarEmbedding
otherwise it is a Kuratowski subgraph.
Examples
--------
This function can be considered as the primary interface for the planar embedding tools since
-if the graph is planar- it returns a PlanarEmbedding which can be then used to call other methods of this class.
Consider the following example:
>>> G = nx.Graph([(0,1),(0,2)])
>>> isPlanar, P = nx.check_planarity(G)
>>> print(isPlanar)
True
Once the PlanarEmbedding object is created using `check_planarity()` as above, the other features of the class can be used safely.
Let's see this on action by extending our initial example:
>>> G = nx.Graph([(0,1),(0,2)])
>>> isPlanar, P = nx.check_planarity(G)
>>> P.get_data()
{0: [1, 2], 1: [0], 2: [0]}
Notes
-----
A (combinatorial) embedding consists of cyclic orderings of the incident
edges at each vertex. Given such an embedding there are multiple approaches
discussed in literature to drawing the graph (subject to various
constraints, e.g. integer coordinates), see e.g. [2].
The planarity check algorithm and extraction of the combinatorial embedding
is based on the Left-Right Planarity Test [1].
A counterexample is only generated if the corresponding parameter is set,
because the complexity of the counterexample generation is higher.
References
----------
.. [1] Ulrik Brandes:
The Left-Right Planarity Test
2009
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208
.. [2] Takao Nishizeki, Md Saidur Rahman:
Planar graph drawing
Lecture Notes Series on Computing: Volume 12
2004
"""
planarity_state = LRPlanarity(G)
embedding = planarity_state.lr_planarity()
if embedding is None:
# graph is not planar
if counterexample:
return False, get_counterexample(G)
else:
return False, None
else:
# graph is planar
return True, embedding
|
def check_planarity(G, counterexample=False):
"""Check if a graph is planar and return a Planar Embedding if true,
a counterexample otherwise.
A graph is planar iff it can be drawn in a plane without
any edge intersections.
Parameters
----------
G : NetworkX graph
counterexample : bool
A Kuratowski subgraph (to proof non planarity) is only returned if set
to true.
Returns
-------
(is_planar, certificate) : (bool, NetworkX graph) tuple
is_planar is true if the graph is planar.
If the graph is planar `certificate` is a PlanarEmbedding
otherwise it is a Kuratowski subgraph.
Examples
--------
This function can be considered as the primary interface for the planar embedding tools since
-if the graph is planar- it returns a PlanarEmbedding which can be then used to call other methods of this class.
Consider the following example:
>>> G = nx.Graph([(0,1),(0,2)])
>>> isPlanar, P = nx.check_planarity(G)
>>> print(isPlanar)
True
When `G` is planar, a `PlanarEmbedding` instance is returned:
>>> G = nx.Graph([(0,1),(0,2)])
>>> isPlanar, P = nx.check_planarity(G)
>>> P.get_data()
{0: [1, 2], 1: [0], 2: [0]}
Notes
-----
A (combinatorial) embedding consists of cyclic orderings of the incident
edges at each vertex. Given such an embedding there are multiple approaches
discussed in literature to drawing the graph (subject to various
constraints, e.g. integer coordinates), see e.g. [2].
The planarity check algorithm and extraction of the combinatorial embedding
is based on the Left-Right Planarity Test [1].
A counterexample is only generated if the corresponding parameter is set,
because the complexity of the counterexample generation is higher.
References
----------
.. [1] Ulrik Brandes:
The Left-Right Planarity Test
2009
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208
.. [2] Takao Nishizeki, Md Saidur Rahman:
Planar graph drawing
Lecture Notes Series on Computing: Volume 12
2004
"""
planarity_state = LRPlanarity(G)
embedding = planarity_state.lr_planarity()
if embedding is None:
# graph is not planar
if counterexample:
return False, get_counterexample(G)
else:
return False, None
else:
# graph is planar
return True, embedding
|
48,453 |
def test_cache_flaky_pagination(cache_dir, monkeypatch):
responses = get_collection_versions()
cache_file = os.path.join(cache_dir, 'api.json')
api = get_test_galaxy_api('https://galaxy.server.com/api/', 'v2', no_cache=False)
# First attempt, fail midway through
mock_open = MagicMock(
side_effect=[
StringIO(to_text(json.dumps(responses[0]))),
StringIO(to_text(json.dumps(responses[1]))),
urllib_error.HTTPError(responses[1]['next'], 500, 'Error', {}, StringIO()),
StringIO(to_text(json.dumps(responses[3]))),
]
)
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
with pytest.raises(GalaxyError):
actual_versions = api.get_collection_versions('namespace', 'collection')
with open(cache_file) as fd:
final_cache = json.loads(fd.read())
assert final_cache == {
'version': 1,
'galaxy.server.com:': {
'modified': {
'namespace.collection': responses[0]['modified']
}
}
}
# Reset API
api = get_test_galaxy_api('https://galaxy.server.com/api/', 'v2', no_cache=False)
# Second attempt is successful so cache should be populated
mock_open = MagicMock(
side_effect=[
StringIO(to_text(json.dumps(r)))
for r in responses
]
)
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual_versions = api.get_collection_versions('namespace', 'collection')
with open(cache_file) as fd:
final_cache = json.loads(fd.read())
cached_server = final_cache['galaxy.server.com:']
cached_collection = cached_server['/api/v2/collections/namespace/collection/versions/']
cached_versions = [r['version'] for r in cached_collection['results']]
assert cached_versions == actual_versions == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5']
|
def test_cache_flaky_pagination(cache_dir, monkeypatch):
responses = get_collection_versions()
cache_file = os.path.join(cache_dir, 'api.json')
api = get_test_galaxy_api('https://galaxy.server.com/api/', 'v2', no_cache=False)
# First attempt, fail midway through
mock_open = MagicMock(
side_effect=[
StringIO(to_text(json.dumps(responses[0]))),
StringIO(to_text(json.dumps(responses[1]))),
urllib_error.HTTPError(responses[1]['next'], 500, 'Error', {}, StringIO()),
StringIO(to_text(json.dumps(responses[3]))),
]
)
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
with pytest.raises(GalaxyError):
actual_versions = api.get_collection_versions('namespace', 'collection')
with open(cache_file) as fd:
final_cache = json.loads(fd.read())
assert final_cache == {
'version': 1,
'galaxy.server.com:': {
'modified': {
'namespace.collection': responses[0]['modified']
}
}
}
# Reset API
api = get_test_galaxy_api('https://galaxy.server.com/api/', 'v2', no_cache=False)
# Second attempt is successful so cache should be populated
mock_open = MagicMock(
side_effect=[
StringIO(to_text(json.dumps(r)))
for r in responses
]
)
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual_versions = api.get_collection_versions('namespace', 'collection')
with open(cache_file) as fd:
final_cache = json.loads(fd.read())
cached_server = final_cache['galaxy.server.com:']
cached_collection = cached_server['/api/v2/collections/namespace/collection/versions/']
cached_versions = [r['version'] for r in cached_collection['results']]
assert cached_versions == actual_versions == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.