id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
5,753 |
def directed_hausdorff(u, v, seed=0):
"""
Compute the directed Hausdorff distance between two 2-D arrays.
Distances between pairs are calculated using a Euclidean metric.
Parameters
----------
u : (M,N) array_like
Input array.
v : (O,N) array_like
Input array.
seed : int or None
Local `numpy.random.RandomState` seed. Default is 0, a random
shuffling of u and v that guarantees reproducibility.
Returns
-------
d : double
The directed Hausdorff distance between arrays `u` and `v`,
index_1 : int
index of point contributing to Hausdorff pair in `u`
index_2 : int
index of point contributing to Hausdorff pair in `v`
Raises
------
ValueError
An exception is thrown if `u` and `v` do not have
the same number of columns.
Notes
-----
Uses the early break technique and the random sampling approach
described by [1]_. Although worst-case performance is ``O(m * o)``
(as with the brute force algorithm), this is unlikely in practice
as the input data would have to require the algorithm to explore
every single point interaction, and after the algorithm shuffles
the input points at that. The best case performance is O(m), which
is satisfied by selecting an inner loop distance that is less than
cmax and leads to an early break as often as possible. The authors
have formally shown that the average runtime is closer to O(m).
.. versionadded:: 0.19.0
References
----------
.. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
calculating the exact Hausdorff distance." IEEE Transactions On
Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
2015.
See Also
--------
scipy.spatial.procrustes : Another similarity test for two data sets
Examples
--------
Find the directed Hausdorff distance between two 2-D arrays of
coordinates:
>>> from scipy.spatial.distance import directed_hausdorff
>>> u = np.array([(1.0, 0.0),
... (0.0, 1.0),
... (-1.0, 0.0),
... (0.0, -1.0)])
>>> v = np.array([(2.0, 0.0),
... (0.0, 2.0),
... (-2.0, 0.0),
... (0.0, -4.0)])
>>> directed_hausdorff(u, v)[0]
2.23606797749979
>>> directed_hausdorff(v, u)[0]
3.0
Find the general (symmetric) Hausdorff distance between two 2-D
arrays of coordinates:
>>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
3.0
Find the indices of the points that generate the Hausdorff distance
(the Hausdorff pair):
>>> directed_hausdorff(v, u)[1:]
(3, 3)
"""
u = np.asarray(u, dtype=np.float64, order='c')
v = np.asarray(v, dtype=np.float64, order='c')
if u.shape[1] != v.shape[1]:
raise ValueError('u and v need to have the same '
'number of columns')
result = _hausdorff.directed_hausdorff(u, v, seed)
return result
|
def directed_hausdorff(u, v, seed=0):
"""
Compute the directed Hausdorff distance between two ``(M, N)`` arrays.
Distances between pairs are calculated using a Euclidean metric.
Parameters
----------
u : (M,N) array_like
Input array.
v : (O,N) array_like
Input array.
seed : int or None
Local `numpy.random.RandomState` seed. Default is 0, a random
shuffling of u and v that guarantees reproducibility.
Returns
-------
d : double
The directed Hausdorff distance between arrays `u` and `v`,
index_1 : int
index of point contributing to Hausdorff pair in `u`
index_2 : int
index of point contributing to Hausdorff pair in `v`
Raises
------
ValueError
An exception is thrown if `u` and `v` do not have
the same number of columns.
Notes
-----
Uses the early break technique and the random sampling approach
described by [1]_. Although worst-case performance is ``O(m * o)``
(as with the brute force algorithm), this is unlikely in practice
as the input data would have to require the algorithm to explore
every single point interaction, and after the algorithm shuffles
the input points at that. The best case performance is O(m), which
is satisfied by selecting an inner loop distance that is less than
cmax and leads to an early break as often as possible. The authors
have formally shown that the average runtime is closer to O(m).
.. versionadded:: 0.19.0
References
----------
.. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
calculating the exact Hausdorff distance." IEEE Transactions On
Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
2015.
See Also
--------
scipy.spatial.procrustes : Another similarity test for two data sets
Examples
--------
Find the directed Hausdorff distance between two 2-D arrays of
coordinates:
>>> from scipy.spatial.distance import directed_hausdorff
>>> u = np.array([(1.0, 0.0),
... (0.0, 1.0),
... (-1.0, 0.0),
... (0.0, -1.0)])
>>> v = np.array([(2.0, 0.0),
... (0.0, 2.0),
... (-2.0, 0.0),
... (0.0, -4.0)])
>>> directed_hausdorff(u, v)[0]
2.23606797749979
>>> directed_hausdorff(v, u)[0]
3.0
Find the general (symmetric) Hausdorff distance between two 2-D
arrays of coordinates:
>>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
3.0
Find the indices of the points that generate the Hausdorff distance
(the Hausdorff pair):
>>> directed_hausdorff(v, u)[1:]
(3, 3)
"""
u = np.asarray(u, dtype=np.float64, order='c')
v = np.asarray(v, dtype=np.float64, order='c')
if u.shape[1] != v.shape[1]:
raise ValueError('u and v need to have the same '
'number of columns')
result = _hausdorff.directed_hausdorff(u, v, seed)
return result
|
34,241 |
def collect_entity_errors(
entity_results: List[EntityEvaluationResult],
merged_targets: List[Text],
merged_predictions: List[Text],
error_filename: Text,
):
errors = []
offset = 0
for entity_result in entity_results:
error = False
for i in range(offset, offset + len(entity_result.tokens)):
if merged_targets[i] != merged_predictions[i]:
error = True
break
if error:
errors.append(
{
"text": entity_result.message,
"entities": entity_result.entity_targets,
"predicted_entities": entity_result.entity_predictions,
}
)
offset += len(entity_result.tokens)
if errors:
utils.write_json_to_file(error_filename, errors)
logger.info("Incorrect enntity predictions saved to {}.".format(error_filename))
logger.debug(
"\n\nThese intent examples could not be classified "
"correctly: \n{}".format(errors)
)
else:
logger.info("No incorrect entity prediction found.")
|
def collect_entity_errors(
entity_results: List[EntityEvaluationResult],
merged_targets: List[Text],
merged_predictions: List[Text],
error_filename: Text,
):
errors = []
offset = 0
for entity_result in entity_results:
error = False
for i in range(offset, offset + len(entity_result.tokens)):
if merged_targets[i] != merged_predictions[i]:
error = True
break
if error:
errors.append(
{
"text": entity_result.message,
"entities": entity_result.entity_targets,
"predicted_entities": entity_result.entity_predictions,
}
)
offset += len(entity_result.tokens)
if errors:
utils.write_json_to_file(error_filename, errors)
logger.info("Incorrect entity predictions saved to {}.".format(error_filename))
logger.debug(
"\n\nThese intent examples could not be classified "
"correctly: \n{}".format(errors)
)
else:
logger.info("No incorrect entity prediction found.")
|
50,636 |
def load_config(config_file):
config_path = config_file if config_file else ".ansible-lint"
config_path = os.path.abspath(config_path)
if not os.path.exists(config_path):
return
try:
with open(config_path, "r") as stream:
config = yaml.safe_load(stream)
except yaml.YAMLError as e:
print(e, file=sys.stderr)
sys.exit(2)
# TODO(ssbarnea): implement schema validation for config file
if isinstance(config, list):
print("Invalid configuration '%s', expected dictionary." % config_path, file=sys.stderr)
sys.exit(2)
config_dir = os.path.dirname(config_path)
expand_to_normalized_paths(config, config_dir)
return config
|
def load_config(config_file):
config_path = config_file if config_file else ".ansible-lint"
config_path = os.path.abspath(config_path)
if not os.path.exists(config_path):
return
try:
with open(config_path, "r") as stream:
config = yaml.safe_load(stream)
except yaml.YAMLError as e:
print(e, file=sys.stderr)
sys.exit(2)
# TODO(ssbarnea): implement schema validation for config file
if isinstance(config, list):
print(
"Invalid configuration '{cfg!s}', expected YAML mapping in the config file.".
format(cfg=config_path),
file=sys.stderr,
)
sys.exit(2)
config_dir = os.path.dirname(config_path)
expand_to_normalized_paths(config, config_dir)
return config
|
15,540 |
def purge_old_data(instance, purge_days: int, repack: bool) -> bool:
"""Purge events and states older than purge_days ago.
Cleans up an timeframe of an hour, based on the oldest record.
"""
purge_before = dt_util.utcnow() - timedelta(days=purge_days)
_LOGGER.debug("Purging states and events before target %s", purge_before)
try:
with session_scope(session=instance.get_session()) as session:
# Purge a max of 1 hour, based on the oldest states or events record
batch_purge_before = purge_before
query = session.query(States).order_by(States.last_updated.asc()).limit(1)
states = execute(query, to_native=True, validate_entity_ids=False)
if states:
batch_purge_before = min(
batch_purge_before,
states[0].last_updated + timedelta(hours=1),
)
query = session.query(Events).order_by(Events.time_fired.asc()).limit(1)
events = execute(query, to_native=True)
if events:
batch_purge_before = min(
batch_purge_before,
events[0].time_fired + timedelta(hours=1),
)
_LOGGER.debug("Purging states and events before %s", batch_purge_before)
# Update old_state_id to NULL before deleting to ensure
# the delete does not fail due to a foreign key constraint
# since some databases (MSSQL) cannot do the ON DELETE CASCADE
# for us.
disconnected_rows = (
session.query(States)
.filter(
States.old_state_id.in_(
session.query(States.state_id)
.filter(States.last_updated < batch_purge_before)
.subquery()
)
)
.update({"old_state_id": None}, synchronize_session=False)
)
_LOGGER.debug("Updated %s states to remove old_state_id", disconnected_rows)
deleted_rows = (
session.query(States)
.filter(States.last_updated < batch_purge_before)
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s states", deleted_rows)
deleted_rows = (
session.query(Events)
.filter(Events.time_fired < batch_purge_before)
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s events", deleted_rows)
# If states or events purging isn't processing the purge_before yet,
# return false, as we are not done yet.
if batch_purge_before != purge_before:
_LOGGER.debug("Purging hasn't fully completed yet")
return False
# Recorder runs is small, no need to batch run it
deleted_rows = (
session.query(RecorderRuns)
.filter(RecorderRuns.start < purge_before)
.filter(RecorderRuns.run_id != instance.run_info.run_id)
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s recorder_runs", deleted_rows)
if repack:
# Execute sqlite or postgresql vacuum command to free up space on disk
if instance.engine.driver in ("pysqlite", "postgresql"):
_LOGGER.debug("Vacuuming SQL DB to free space")
instance.engine.execute("VACUUM")
# Optimize mysql / mariadb tables to free up space on disk
elif instance.engine.driver in ("mysqldb", "pymysql"):
_LOGGER.debug("Optimizing SQL DB to free space")
instance.engine.execute("OPTIMIZE TABLE states, events, recorder_runs")
except OperationalError as err:
# Retry when one of the following MySQL errors occurred:
# 1205: Lock wait timeout exceeded; try restarting transaction
# 1206: The total number of locks exceeds the lock table size
# 1213: Deadlock found when trying to get lock; try restarting transaction
if instance.engine.driver in ("mysqldb", "pymysql") and err.orig.args[0] in (
1205,
1206,
1213,
):
_LOGGER.info("%s; purge not completed, retrying", err.orig.args[1])
time.sleep(instance.db_retry_wait)
return False
_LOGGER.warning("Error purging history: %s", err)
except SQLAlchemyError as err:
_LOGGER.warning("Error purging history: %s", err)
return True
|
def purge_old_data(instance, purge_days: int, repack: bool) -> bool:
"""Purge events and states older than purge_days ago.
Cleans up an timeframe of an hour, based on the oldest record.
"""
purge_before = dt_util.utcnow() - timedelta(days=purge_days)
_LOGGER.debug("Purging states and events before target %s", purge_before)
try:
with session_scope(session=instance.get_session()) as session:
# Purge a max of 1 hour, based on the oldest states or events record
batch_purge_before = purge_before
query = session.query(States).order_by(States.last_updated.asc()).limit(1)
states = execute(query, to_native=True, validate_entity_ids=False)
if states:
batch_purge_before = min(
batch_purge_before,
states[0].last_updated + timedelta(hours=1),
)
query = session.query(Events).order_by(Events.time_fired.asc()).limit(1)
events = execute(query, to_native=True)
if events:
batch_purge_before = min(
batch_purge_before,
events[0].time_fired + timedelta(hours=1),
)
_LOGGER.debug("Purging states and events before %s", batch_purge_before)
# Update old_state_id to NULL before deleting to ensure
# the delete does not fail due to a foreign key constraint
# since some databases (MSSQL) cannot do the ON DELETE SET NULL
# for us.
disconnected_rows = (
session.query(States)
.filter(
States.old_state_id.in_(
session.query(States.state_id)
.filter(States.last_updated < batch_purge_before)
.subquery()
)
)
.update({"old_state_id": None}, synchronize_session=False)
)
_LOGGER.debug("Updated %s states to remove old_state_id", disconnected_rows)
deleted_rows = (
session.query(States)
.filter(States.last_updated < batch_purge_before)
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s states", deleted_rows)
deleted_rows = (
session.query(Events)
.filter(Events.time_fired < batch_purge_before)
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s events", deleted_rows)
# If states or events purging isn't processing the purge_before yet,
# return false, as we are not done yet.
if batch_purge_before != purge_before:
_LOGGER.debug("Purging hasn't fully completed yet")
return False
# Recorder runs is small, no need to batch run it
deleted_rows = (
session.query(RecorderRuns)
.filter(RecorderRuns.start < purge_before)
.filter(RecorderRuns.run_id != instance.run_info.run_id)
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s recorder_runs", deleted_rows)
if repack:
# Execute sqlite or postgresql vacuum command to free up space on disk
if instance.engine.driver in ("pysqlite", "postgresql"):
_LOGGER.debug("Vacuuming SQL DB to free space")
instance.engine.execute("VACUUM")
# Optimize mysql / mariadb tables to free up space on disk
elif instance.engine.driver in ("mysqldb", "pymysql"):
_LOGGER.debug("Optimizing SQL DB to free space")
instance.engine.execute("OPTIMIZE TABLE states, events, recorder_runs")
except OperationalError as err:
# Retry when one of the following MySQL errors occurred:
# 1205: Lock wait timeout exceeded; try restarting transaction
# 1206: The total number of locks exceeds the lock table size
# 1213: Deadlock found when trying to get lock; try restarting transaction
if instance.engine.driver in ("mysqldb", "pymysql") and err.orig.args[0] in (
1205,
1206,
1213,
):
_LOGGER.info("%s; purge not completed, retrying", err.orig.args[1])
time.sleep(instance.db_retry_wait)
return False
_LOGGER.warning("Error purging history: %s", err)
except SQLAlchemyError as err:
_LOGGER.warning("Error purging history: %s", err)
return True
|
23,843 |
def deduce_subsystem(conanfile, scope):
""" used by:
- EnvVars: to decide is using : ; as path separator, translate paths to subsystem
and decide to generate a .bat or .sh
- Autotools: to define the full abs path to the "configure" script
- GnuDeps: to map all the paths from dependencies
- Aggregation of envfiles: to map each aggregated path to the subsystem
- unix_path: util for recipes
"""
if scope.startswith("build"):
if hasattr(conanfile, "settings_build"):
the_os = conanfile.settings_build.get_safe("os")
subsystem = conanfile.settings_build.get_safe("os.subsystem")
else:
the_os = platform.system() # FIXME: Temporary fallback until 2.0
subsystem = None
else:
the_os = conanfile.settings.get_safe("os")
subsystem = conanfile.settings.get_safe("os.subsystem")
if not str(the_os).startswith("Windows"):
return None
if subsystem is None and not scope.startswith("build"): # "run" scope do not follow win_bash
return WINDOWS
if subsystem is None: # Not defined by settings, so native windows
if not conanfile.win_bash:
return WINDOWS
subsystem = conanfile.conf.get("tools.microsoft.bash:subsystem")
if not subsystem:
raise ConanException("The config 'tools.microsoft.bash:subsystem' is "
"needed to run commands in a Windows subsystem")
return subsystem
|
def deduce_subsystem(conanfile, scope):
""" used by:
- EnvVars: to decide if using : ; as path separator, translate paths to subsystem
and decide to generate a .bat or .sh
- Autotools: to define the full abs path to the "configure" script
- GnuDeps: to map all the paths from dependencies
- Aggregation of envfiles: to map each aggregated path to the subsystem
- unix_path: util for recipes
"""
if scope.startswith("build"):
if hasattr(conanfile, "settings_build"):
the_os = conanfile.settings_build.get_safe("os")
subsystem = conanfile.settings_build.get_safe("os.subsystem")
else:
the_os = platform.system() # FIXME: Temporary fallback until 2.0
subsystem = None
else:
the_os = conanfile.settings.get_safe("os")
subsystem = conanfile.settings.get_safe("os.subsystem")
if not str(the_os).startswith("Windows"):
return None
if subsystem is None and not scope.startswith("build"): # "run" scope do not follow win_bash
return WINDOWS
if subsystem is None: # Not defined by settings, so native windows
if not conanfile.win_bash:
return WINDOWS
subsystem = conanfile.conf.get("tools.microsoft.bash:subsystem")
if not subsystem:
raise ConanException("The config 'tools.microsoft.bash:subsystem' is "
"needed to run commands in a Windows subsystem")
return subsystem
|
22,700 |
def main():
args = sys.argv[1:]
# Default config is pebble
directory_url = os.environ.get('SERVER', 'https://localhost:14000/dir')
http_01_port = int(os.environ.get('HTTP_01_PORT', '5002'))
tls_alpn_01_port = int(os.environ.get('TLS_ALPN_01_PORT', '5001'))
# Execution of certbot in a self-contained workspace
workspace = os.environ.get('WORKSPACE', os.path.join(os.getcwd(), '.certbot_test_workspace'))
if not os.path.exists(workspace):
print('--> Create a workspace for certbot_test: {0}'.format(workspace))
os.mkdir(workspace)
else:
print('--> Use an existing workspace for certbot_test: {0}'.format(workspace))
config_dir = os.path.join(workspace, 'conf')
certbot_test(args, directory_url, http_01_port, tls_alpn_01_port,
config_dir, workspace, True, False)
|
def main():
args = sys.argv[1:]
# Default config is pebble
directory_url = os.environ.get('SERVER', 'https://localhost:14000/dir')
http_01_port = int(os.environ.get('HTTP_01_PORT', '5002'))
tls_alpn_01_port = int(os.environ.get('TLS_ALPN_01_PORT', '5001'))
# Execution of certbot in a self-contained workspace
workspace = os.environ.get('WORKSPACE', os.path.join(os.getcwd(), '.certbot_test_workspace'))
if not os.path.exists(workspace):
print('--> Creating a workspace for certbot_test: {0}'.format(workspace))
os.mkdir(workspace)
else:
print('--> Use an existing workspace for certbot_test: {0}'.format(workspace))
config_dir = os.path.join(workspace, 'conf')
certbot_test(args, directory_url, http_01_port, tls_alpn_01_port,
config_dir, workspace, True, False)
|
29,162 |
def can_assign_voiceartist(handler):
"""Decorator to check whether the user can assign voiceartist in
given exploration.
Args:
handler: function. The function to be decorated.
Returns:
function. The newly decorated function that now also checks if a user
has permission to assign voiceartist to a given exploration.
"""
def test_can_assign_voiceartist(self, entity_type, entity_id, **kwargs):
"""Checks if the user can assign voiceartist to the exploration.
Args:
entity_type: str. The type of activity.
entity_id: str. Entity id of the activity.
**kwargs: dict(str: *). Keyword arguments.
Returns:
*. The return value of the decorated function.
Raises:
NotLoggedInException. The user is not logged in.
PageNotFoundException. The page is not found.
UnauthorizedUserException. The user does not have credentials to
voiceover an exploration.
"""
if not self.user_id:
raise base.UserFacingExceptions.NotLoggedInException
exploration_rights = rights_manager.get_exploration_rights(
entity_id, strict=False)
if exploration_rights is None:
raise base.UserFacingExceptions.PageNotFoundException
if rights_manager.check_can_modify_voiceartist_in_activity(
self.user, exploration_rights):
return handler(self, entity_type, entity_id, **kwargs)
else:
raise base.UserFacingExceptions.UnauthorizedUserException(
'You do not have credentials to assign voiceartist to'
' this exploration.')
test_can_assign_voiceartist.__wrapped__ = True
return test_can_assign_voiceartist
|
def can_assign_voiceartist(handler):
"""Decorator to check whether the user can assign voiceartist in
given exploration.
Args:
handler: function. The function to be decorated.
Returns:
function. The newly decorated function that now also checks if a user
has permission to assign a voice artist to the given entity.
"""
def test_can_assign_voiceartist(self, entity_type, entity_id, **kwargs):
"""Checks if the user can assign voiceartist to the exploration.
Args:
entity_type: str. The type of activity.
entity_id: str. Entity id of the activity.
**kwargs: dict(str: *). Keyword arguments.
Returns:
*. The return value of the decorated function.
Raises:
NotLoggedInException. The user is not logged in.
PageNotFoundException. The page is not found.
UnauthorizedUserException. The user does not have credentials to
voiceover an exploration.
"""
if not self.user_id:
raise base.UserFacingExceptions.NotLoggedInException
exploration_rights = rights_manager.get_exploration_rights(
entity_id, strict=False)
if exploration_rights is None:
raise base.UserFacingExceptions.PageNotFoundException
if rights_manager.check_can_modify_voiceartist_in_activity(
self.user, exploration_rights):
return handler(self, entity_type, entity_id, **kwargs)
else:
raise base.UserFacingExceptions.UnauthorizedUserException(
'You do not have credentials to assign voiceartist to'
' this exploration.')
test_can_assign_voiceartist.__wrapped__ = True
return test_can_assign_voiceartist
|
12,495 |
def analyze_type_callable_member_access(name: str,
typ: FunctionLike,
mx: MemberContext) -> Type:
# Class attribute.
# TODO super?
ret_type = typ.items[0].ret_type
assert isinstance(ret_type, ProperType)
if isinstance(ret_type, TupleType):
ret_type = tuple_fallback(ret_type)
if isinstance(ret_type, Instance):
if not mx.is_operator:
# When Python sees an operator (eg `3 == 4`), it automatically translates that
# into something like `int.__eq__(3, 4)` instead of `(3).__eq__(4)` as an
# optimization.
#
# While it normally it doesn't matter which of the two versions are used, it
# does cause inconsistencies when working with classes. For example, translating
# `int == int` to `int.__eq__(int)` would not work since `int.__eq__` is meant to
# compare two int _instances_. What we really want is `type(int).__eq__`, which
# is meant to compare two types or classes.
#
# This check makes sure that when we encounter an operator, we skip looking up
# the corresponding method in the current instance to avoid this edge case.
# See https://github.com/python/mypy/pull/1787 for more info.
# TODO: do not rely on same type variables being present in all constructor overloads.
result = analyze_class_attribute_access(ret_type, name, mx,
original_vars=typ.items[0].variables)
if result:
return result
# Look up from the 'type' type.
return _analyze_member_access(name, typ.fallback, mx)
else:
assert False, f'Unexpected type {repr(ret_type)}'
|
def analyze_type_callable_member_access(name: str,
typ: FunctionLike,
mx: MemberContext) -> Type:
# Class attribute.
# TODO super?
ret_type = typ.items[0].ret_type
assert isinstance(ret_type, ProperType)
if isinstance(ret_type, TupleType):
ret_type = tuple_fallback(ret_type)
if isinstance(ret_type, Instance):
if not mx.is_operator:
# When Python sees an operator (eg `3 == 4`), it automatically translates that
# into something like `int.__eq__(3, 4)` instead of `(3).__eq__(4)` as an
# optimization.
#
# While it normally it doesn't matter which of the two versions are used, it
# does cause inconsistencies when working with classes. For example, translating
# `int == int` to `int.__eq__(int)` would not work since `int.__eq__` is meant to
# compare two int _instances_. What we really want is `type(int).__eq__`, which
# is meant to compare two types or classes.
#
# This check makes sure that when we encounter an operator, we skip looking up
# the corresponding method in the current instance to avoid this edge case.
# See https://github.com/python/mypy/pull/1787 for more info.
# TODO: do not rely on same type variables being present in all constructor overloads.
result = analyze_class_attribute_access(ret_type, name, mx,
original_vars=typ.items[0].variables)
if result:
return result
# Look up from the 'type' type.
return _analyze_member_access(name, typ.fallback, mx)
else:
assert False, f'Unexpected type {ret_type!r}'
|
22,081 |
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
49,123 |
def roots_quintic(f):
"""
Calculate exact roots of a solvable irreducible quintic with rational coefficients.
Return an empty list if the quintic is reducible or not solvable.
"""
result = []
coeff_5, coeff_4, p_, q_, r_, s_ = f.all_coeffs()
if coeff_5 != 1:
f = Poly(f/coeff_5)
_, coeff_4, p_, q_, r_, s_ = f.all_coeffs()
# Cancel coeff_4 to form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
p = p_ - 2*coeff_4*coeff_4/5
q = q_ - 3*coeff_4*p_/5 + 4*coeff_4**3/25
r = r_ - 2*coeff_4*q_/5 + 3*coeff_4**2*p_/25 - 3*coeff_4**4/125
s = s_ - coeff_4*r_/5 + coeff_4**2*q_/25 - coeff_4**3*p_/125 + 4*coeff_4**5/3125
x = f.gen
f = Poly(x**5 + p*x**3 + q*x**2 + r*x + s)
else:
p, q, r, s = p_, q_, r_, s_
if not all(coeff.is_Rational for coeff in (p, q, r, s)):
return result
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
Stwo = S(2)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / Stwo)
l4 = _quintic_simplify((-alpha - sqrt(disc)) / Stwo)
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / Stwo)
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / Stwo)
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
# Simplifying improves performance a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# hard-coded results for [factor(i) for i in _vsolve(x**5 - a - I*b, x)]
x0 = z**(S(1)/5)
x1 = sqrt(2)
x2 = sqrt(5)
x3 = sqrt(5 - x2)
x4 = I*x2
x5 = x4 + I
x6 = I*x0/4
x7 = x1*sqrt(x2 + 5)
sol = [x0, -x6*(x1*x3 - x5), x6*(x1*x3 + x5), -x6*(x4 + x7 - I), x6*(-x4 + x7 + I)]
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, s in enumerate(sol):
Res[1][i] = _quintic_simplify(s.xreplace({z: R1[0] + I*R1[1]}))
Res[2][i] = _quintic_simplify(s.xreplace({z: R2[0] + I*R2[1]}))
Res[3][i] = _quintic_simplify(s.xreplace({z: R3[0] + I*R3[1]}))
Res[4][i] = _quintic_simplify(s.xreplace({z: R4[0] + I*R4[1]}))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if (comp((r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus).n(), 0, tol) and
comp((r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus).n(), 0, tol)):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2 is not None:
break
else:
return [] # fall back to normal solve
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
saw = set()
for r in result:
r = r.n(2)
if r in saw:
# Roots were identical. Abort, return []
# and fall back to usual solve
return []
saw.add(r)
# Restore to original equation where coeff_4 is nonzero
if coeff_4:
result = [x - coeff_4 / 5 for x in result]
return result
|
def roots_quintic(f):
"""
Calculate exact roots of a solvable irreducible quintic with rational coefficients.
Return an empty list if the quintic is reducible or not solvable.
"""
result = []
coeff_5, coeff_4, p_, q_, r_, s_ = f.all_coeffs()
if coeff_5 != 1:
f = Poly(f/coeff_5)
_, coeff_4, p_, q_, r_, s_ = f.all_coeffs()
# Cancel coeff_4 to form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
p = p_ - 2*coeff_4*coeff_4/5
q = q_ - 3*coeff_4*p_/5 + 4*coeff_4**3/25
r = r_ - 2*coeff_4*q_/5 + 3*coeff_4**2*p_/25 - 3*coeff_4**4/125
s = s_ - coeff_4*r_/5 + coeff_4**2*q_/25 - coeff_4**3*p_/125 + 4*coeff_4**5/3125
x = f.gen
f = Poly(x**5 + p*x**3 + q*x**2 + r*x + s)
else:
p, q, r, s = p_, q_, r_, s_
if not all(coeff.is_Rational for coeff in (p, q, r, s)):
return result
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
Stwo = S(2)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / Stwo)
l4 = _quintic_simplify((-alpha - sqrt(disc)) / Stwo)
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / Stwo)
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / Stwo)
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
# Simplifying improves performance a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# hard-coded results for [factor(i) for i in _vsolve(x**5 - a - I*b, x)]
x0 = z**(S(1)/5)
x1 = sqrt(2)
x2 = sqrt(5)
x3 = sqrt(5 - x2)
x4 = I*x2
x5 = x4 + I
x6 = I*x0/4
x7 = x1*sqrt(x2 + 5)
sol = [x0, -x6*(x1*x3 - x5), x6*(x1*x3 + x5), -x6*(x4 + x7 - I), x6*(-x4 + x7 + I)]
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, s in enumerate(sol):
Res[1][i] = _quintic_simplify(s.xreplace({z: R1[0] + I*R1[1]}))
Res[2][i] = _quintic_simplify(s.xreplace({z: R2[0] + I*R2[1]}))
Res[3][i] = _quintic_simplify(s.xreplace({z: R3[0] + I*R3[1]}))
Res[4][i] = _quintic_simplify(s.xreplace({z: R4[0] + I*R4[1]}))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if (comp((r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus).n(), 0, tol) and
comp((r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus).n(), 0, tol)):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2 is not None:
break
else:
return [] # fall back to normal solve
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
saw = set()
for r in result:
r = r.n(2)
if r in saw:
# Roots were identical. Abort, return []
# and fall back to usual solve
return []
saw.add(r)
# Restore to original equation where coeff_4 is nonzero
if coeff_4:
result = [x - coeff_4 / 5 for x in result]
return result
|
59,262 |
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
# in order to prefer lowercase variables, process environment in
# two passes: first matches any, second pass matches lowercase only
# select only environment variables which end in (after making lowercase) _proxy
candidate_names = [name for name in os.environ.keys() if len(name)>5 and name[-6]=='_'] # fast selection of candidates
environment = [(name, os.environ[name], name.lower()) for name in candidate_names if name[-6:].lower()=='_proxy']
proxies = {}
for name, value, name_lower in environment:
if value and name_lower[-6:] == '_proxy':
proxies[name_lower[:-6]] = value
# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY
# (non-all-lowercase) as it may be set from the web server by a "Proxy:"
# header from the client
# If "proxy" is lowercase, it will still be used thanks to the next block
if 'REQUEST_METHOD' in os.environ:
proxies.pop('http', None)
for name, value, name_lower in environment:
if name[-6:] == '_proxy':
if value:
proxies[name_lower[:-6]] = value
else:
proxies.pop(name_lower[:-6], None)
return proxies
|
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
# in order to prefer lowercase variables, process environment in
# two passes: first matches any, second pass matches lowercase only
# select only environment variables which end in (after making lowercase) _proxy
proxies = {}
environment = []
for name in os.environ.keys():
# fast screen underscore position before more expensive case-folding
if len(name) > 5 and name[-6] == "_" and name[-5:].lower() == "proxy":
value = os.environ[name]
proxy_name = name[:-6].lower()
environment.append((name, value, proxy_name))
if value:
proxies[proxy_name] = value
# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY
# (non-all-lowercase) as it may be set from the web server by a "Proxy:"
# header from the client
# If "proxy" is lowercase, it will still be used thanks to the next block
if 'REQUEST_METHOD' in os.environ:
proxies.pop('http', None)
for name, value, name_lower in environment:
if name[-6:] == '_proxy':
if value:
proxies[name_lower[:-6]] = value
else:
proxies.pop(name_lower[:-6], None)
return proxies
|
3,097 |
def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike:
"""
Helper function for `arr.astype(common_dtype)` but handling all special
cases.
"""
if (
is_categorical_dtype(arr.dtype)
and isinstance(dtype, np.dtype)
and np.issubdtype(dtype, np.integer)
):
# problem case: categorical of int -> gives int as result dtype,
# but categorical can contain NAs -> fall back to object dtype
try:
return arr.astype(dtype, copy=False)
except ValueError:
return arr.astype(object, copy=False)
if is_sparse(arr) and not is_sparse(dtype):
# problem case: SparseArray.astype(dtype) doesn't follow the specified
# dtype exactly, but converts this to Sparse[dtype] -> first manually
# convert to dense array
return arr.to_dense().astype(dtype, copy=False)
if (
isinstance(arr, np.ndarray)
and arr.dtype.kind in ["m", "M"]
and dtype is np.dtype("object")
):
# wrap datetime-likes in EA to ensure astype(object) gives Timestamp/Timedelta
# this can happen when concat_compat is called directly on arrays (when arrays
# are not coming from Index/Series._values), eg in BlockManager.quantile
arr = array(arr)
if is_extension_array_dtype(dtype):
if isinstance(arr, np.ndarray):
# numpy's astype cannot handle ExtensionDtypes
return array(arr, dtype=dtype, copy=False)
return arr.astype(dtype, copy=False)
|
def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike:
"""
Helper function for `arr.astype(common_dtype)` but handling all special
cases.
"""
if (
is_categorical_dtype(arr.dtype)
and isinstance(dtype, np.dtype)
and np.issubdtype(dtype, np.integer)
):
# problem case: categorical of int -> gives int as result dtype,
# but categorical can contain NAs -> fall back to object dtype
try:
return arr.astype(dtype, copy=False)
except ValueError:
return arr.astype(object, copy=False)
if is_sparse(arr.dtype) and not is_sparse(dtype):
# problem case: SparseArray.astype(dtype) doesn't follow the specified
# dtype exactly, but converts this to Sparse[dtype] -> first manually
# convert to dense array
return arr.to_dense().astype(dtype, copy=False)
if (
isinstance(arr, np.ndarray)
and arr.dtype.kind in ["m", "M"]
and dtype is np.dtype("object")
):
# wrap datetime-likes in EA to ensure astype(object) gives Timestamp/Timedelta
# this can happen when concat_compat is called directly on arrays (when arrays
# are not coming from Index/Series._values), eg in BlockManager.quantile
arr = array(arr)
if is_extension_array_dtype(dtype):
if isinstance(arr, np.ndarray):
# numpy's astype cannot handle ExtensionDtypes
return array(arr, dtype=dtype, copy=False)
return arr.astype(dtype, copy=False)
|
10,095 |
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
folder=dict(type='str'),
datacenter=dict(type='str', required=True),
disk_controller=dict(type='list', default=[]),
use_instance_uuid=dict(type='bool', default=False),
gather_disk_controller_facts=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid', 'moid']
]
)
if module.params['folder']:
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
if not vm:
# We unable to find the virtual machine user specified
# Bail out
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
module.fail_json(msg="Unable to manage disks for non-existing"
" virtual machine '%s'." % vm_id)
# VM exists
result = pyv.configure_disk_controllers()
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
|
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
folder=dict(type='str'),
datacenter=dict(type='str', required=True),
disk_controller=dict(type='list', default=[]),
use_instance_uuid=dict(type='bool', default=False),
gather_disk_controller_facts=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid', 'moid']
]
)
if module.params['folder']:
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
if not vm:
# We unable to find the virtual machine user specified
# Bail out
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
module.fail_json(msg="Unable to manage disk controller for non-existing"
" virtual machine '%s'." % vm_id)
# VM exists
result = pyv.configure_disk_controllers()
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
|
42,041 |
def test_multiprocess_with_progbar(
capsys: _pytest.capture.CaptureFixture, storage_url: str
) -> None:
with capsys.disabled():
n_workers = 8
study_name = _STUDY_NAME
optuna.create_study(storage=storage_url, study_name=study_name)
with Pool(n_workers) as pool:
ret = pool.map(run_optimize, [(study_name, storage_url, 1, True)] * n_workers)
comp_cnt = 0
for i in range(n_workers):
if "20/20" in ret[i] and "100%" in ret[i]:
comp_cnt += 1
assert comp_cnt == n_workers
study = optuna.load_study(study_name=study_name, storage=storage_url)
trials = study.trials
assert len(trials) == n_workers * 20
_check_trials(trials)
|
def test_multiprocess_with_progbar(
capsys: _pytest.capture.CaptureFixture, storage_url: str
) -> None:
with capsys.disabled():
n_workers = 8
study_name = _STUDY_NAME
optuna.create_study(storage=storage_url, study_name=study_name)
with Pool(n_workers) as pool:
ret = pool.map(run_optimize, [(study_name, storage_url, n_jobs, True)] * n_workers)
comp_cnt = 0
for i in range(n_workers):
if "20/20" in ret[i] and "100%" in ret[i]:
comp_cnt += 1
assert comp_cnt == n_workers
study = optuna.load_study(study_name=study_name, storage=storage_url)
trials = study.trials
assert len(trials) == n_workers * 20
_check_trials(trials)
|
27,115 |
def upgrade():
"""Change default ``pool_slots`` to ``1`` and make pool_slots not nullable"""
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
session.query(task_instance) \
.filter(task_instance.c.pool_slots.is_(None)) \
.update({task_instance.c.pool_slots: 1}, synchronize_session=False)
session.commit()
with op.batch_alter_table("task_instance", schema=None) as batch_op:
batch_op.alter_column("pool_slots", existing_type=sa.Integer, nullable=False, server_default='1')
|
def upgrade():
"""Change default ``pool_slots`` to ``1`` and make pool_slots not nullable"""
op.get_bind("UPDATE task_instance SET pool_slots = 1 WHERE pool_slots IS NULL")
with op.batch_alter_table("task_instance", schema=None) as batch_op:
batch_op.alter_column("pool_slots", existing_type=sa.Integer, nullable=False, server_default='1')
|
58,540 |
def ddpg_actor_critic_loss(policy, model, _, train_batch):
twin_q = policy.config["twin_q"]
gamma = policy.config["gamma"]
n_step = policy.config["n_step"]
use_huber = policy.config["use_huber"]
huber_threshold = policy.config["huber_threshold"]
l2_reg = policy.config["l2_reg"]
input_dict = {
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": True,
}
input_dict_next = {
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": True,
}
model_out_t, _ = model(input_dict, [], None)
model_out_tp1, _ = model(input_dict_next, [], None)
target_model_out_tp1, _ = policy.target_model(input_dict_next, [], None)
policy.target_q_func_vars = policy.target_model.variables()
# Policy network evaluation.
policy_t = model.get_policy_output(model_out_t)
policy_tp1 = \
policy.target_model.get_policy_output(target_model_out_tp1)
# Action outputs.
if policy.config["smooth_target_policy"]:
target_noise_clip = policy.config["target_noise_clip"]
clipped_normal_sample = tf.clip_by_value(
tf.random.normal(
tf.shape(policy_tp1), stddev=policy.config["target_noise"]),
-target_noise_clip, target_noise_clip)
policy_tp1_smoothed = tf.clip_by_value(
policy_tp1 + clipped_normal_sample,
policy.action_space.low * tf.ones_like(policy_tp1),
policy.action_space.high * tf.ones_like(policy_tp1))
else:
# No smoothing, just use deterministic actions.
policy_tp1_smoothed = policy_tp1
# Q-net(s) evaluation.
# prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
# Q-values for given actions & observations in given current
q_t = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS])
# Q-values for current policy (no noise) in given current state
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if twin_q:
twin_q_t = model.get_twin_q_values(model_out_t,
train_batch[SampleBatch.ACTIONS])
# Target q-net(s) evaluation.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1_smoothed)
if twin_q:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1_smoothed)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = \
(1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)) * \
q_tp1_best
# Compute RHS of bellman equation.
q_t_selected_target = tf.stop_gradient(tf.cast(train_batch[SampleBatch.REWARDS], tf.float32) +
gamma**n_step * q_tp1_best_masked)
# Compute the error (potentially clipped).
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold) + \
huber_loss(twin_td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error) + \
0.5 * tf.math.square(twin_td_error)
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error)
critic_loss = tf.reduce_mean(
tf.cast(train_batch[PRIO_WEIGHTS], tf.float32) * errors)
actor_loss = -tf.reduce_mean(q_t_det_policy)
# Add l2-regularization if required.
if l2_reg is not None:
for var in policy.model.policy_variables():
if "bias" not in var.name:
actor_loss += (l2_reg * tf.nn.l2_loss(var))
for var in policy.model.q_variables():
if "bias" not in var.name:
critic_loss += (l2_reg * tf.nn.l2_loss(var))
# Model self-supervised losses.
if policy.config["use_state_preprocessor"]:
# Expand input_dict in case custom_loss' need them.
input_dict[SampleBatch.ACTIONS] = train_batch[SampleBatch.ACTIONS]
input_dict[SampleBatch.REWARDS] = train_batch[SampleBatch.REWARDS]
input_dict[SampleBatch.DONES] = train_batch[SampleBatch.DONES]
input_dict[SampleBatch.NEXT_OBS] = train_batch[SampleBatch.NEXT_OBS]
if log_once("ddpg_custom_loss"):
logger.warning(
"You are using a state-preprocessor with DDPG and "
"therefore, `custom_loss` will be called on your Model! "
"Please be aware that DDPG now uses the ModelV2 API, which "
"merges all previously separate sub-models (policy_model, "
"q_model, and twin_q_model) into one ModelV2, on which "
"`custom_loss` is called, passing it "
"[actor_loss, critic_loss] as 1st argument. "
"You may have to change your custom loss function to handle "
"this.")
[actor_loss, critic_loss] = model.custom_loss(
[actor_loss, critic_loss], input_dict)
# Store values for stats function.
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.td_error = td_error
policy.q_t = q_t
# Return one loss value (even though we treat them separately in our
# 2 optimizers: actor and critic).
return policy.critic_loss + policy.actor_loss
|
def ddpg_actor_critic_loss(policy, model, _, train_batch):
twin_q = policy.config["twin_q"]
gamma = policy.config["gamma"]
n_step = policy.config["n_step"]
use_huber = policy.config["use_huber"]
huber_threshold = policy.config["huber_threshold"]
l2_reg = policy.config["l2_reg"]
input_dict = {
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": True,
}
input_dict_next = {
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": True,
}
model_out_t, _ = model(input_dict, [], None)
model_out_tp1, _ = model(input_dict_next, [], None)
target_model_out_tp1, _ = policy.target_model(input_dict_next, [], None)
policy.target_q_func_vars = policy.target_model.variables()
# Policy network evaluation.
policy_t = model.get_policy_output(model_out_t)
policy_tp1 = \
policy.target_model.get_policy_output(target_model_out_tp1)
# Action outputs.
if policy.config["smooth_target_policy"]:
target_noise_clip = policy.config["target_noise_clip"]
clipped_normal_sample = tf.clip_by_value(
tf.random.normal(
tf.shape(policy_tp1), stddev=policy.config["target_noise"]),
-target_noise_clip, target_noise_clip)
policy_tp1_smoothed = tf.clip_by_value(
policy_tp1 + clipped_normal_sample,
policy.action_space.low * tf.ones_like(policy_tp1),
policy.action_space.high * tf.ones_like(policy_tp1))
else:
# No smoothing, just use deterministic actions.
policy_tp1_smoothed = policy_tp1
# Q-net(s) evaluation.
# prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
# Q-values for given actions & observations in given current
q_t = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS])
# Q-values for current policy (no noise) in given current state
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if twin_q:
twin_q_t = model.get_twin_q_values(model_out_t,
train_batch[SampleBatch.ACTIONS])
# Target q-net(s) evaluation.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1_smoothed)
if twin_q:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1_smoothed)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = \
(1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)) * \
q_tp1_best
# Compute RHS of bellman equation.
q_t_selected_target = tf.stop_gradient(
tf.cast(train_batch[SampleBatch.REWARDS], tf.float32) +
gamma**n_step * q_tp1_best_masked)
# Compute the error (potentially clipped).
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold) + \
huber_loss(twin_td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error) + \
0.5 * tf.math.square(twin_td_error)
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error)
critic_loss = tf.reduce_mean(
tf.cast(train_batch[PRIO_WEIGHTS], tf.float32) * errors)
actor_loss = -tf.reduce_mean(q_t_det_policy)
# Add l2-regularization if required.
if l2_reg is not None:
for var in policy.model.policy_variables():
if "bias" not in var.name:
actor_loss += (l2_reg * tf.nn.l2_loss(var))
for var in policy.model.q_variables():
if "bias" not in var.name:
critic_loss += (l2_reg * tf.nn.l2_loss(var))
# Model self-supervised losses.
if policy.config["use_state_preprocessor"]:
# Expand input_dict in case custom_loss' need them.
input_dict[SampleBatch.ACTIONS] = train_batch[SampleBatch.ACTIONS]
input_dict[SampleBatch.REWARDS] = train_batch[SampleBatch.REWARDS]
input_dict[SampleBatch.DONES] = train_batch[SampleBatch.DONES]
input_dict[SampleBatch.NEXT_OBS] = train_batch[SampleBatch.NEXT_OBS]
if log_once("ddpg_custom_loss"):
logger.warning(
"You are using a state-preprocessor with DDPG and "
"therefore, `custom_loss` will be called on your Model! "
"Please be aware that DDPG now uses the ModelV2 API, which "
"merges all previously separate sub-models (policy_model, "
"q_model, and twin_q_model) into one ModelV2, on which "
"`custom_loss` is called, passing it "
"[actor_loss, critic_loss] as 1st argument. "
"You may have to change your custom loss function to handle "
"this.")
[actor_loss, critic_loss] = model.custom_loss(
[actor_loss, critic_loss], input_dict)
# Store values for stats function.
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.td_error = td_error
policy.q_t = q_t
# Return one loss value (even though we treat them separately in our
# 2 optimizers: actor and critic).
return policy.critic_loss + policy.actor_loss
|
21,123 |
def setup_package():
root = os.path.abspath(os.path.dirname(__file__))
if len(sys.argv) > 1 and sys.argv[1] == 'clean':
return clean(root)
with chdir(root):
with io.open(os.path.join(root, 'spacy', 'about.py'), encoding='utf8') as f:
about = {}
exec(f.read(), about)
with io.open(os.path.join(root, 'README.rst'), encoding='utf8') as f:
readme = f.read()
include_dirs = [
get_python_inc(plat_specific=True),
os.path.join(root, 'include')]
if (ccompiler.new_compiler().compiler_type == 'msvc'
and msvccompiler.get_build_version() == 9):
include_dirs.append(os.path.join(root, 'include', 'msvc9'))
ext_modules = []
for mod_name in MOD_NAMES:
mod_path = mod_name.replace('.', '/') + '.cpp'
extra_link_args = []
extra_compile_args = []
# ???
# Imported from patch from @mikepb
# See Issue #267. Running blind here...
if sys.platform == 'darwin':
dylib_path = ['..' for _ in range(mod_name.count('.'))]
dylib_path = '/'.join(dylib_path)
dylib_path = '@loader_path/%s/spacy/platform/darwin/lib' % dylib_path
extra_link_args.append('-Wl,-rpath,%s' % dylib_path)
# Try to fix OSX 10.7 problem. Running blind here too.
extra_compile_args.append('-std=c++11')
extra_link_args.append('-std=c++11')
ext_modules.append(
Extension(mod_name, [mod_path],
language='c++', include_dirs=include_dirs,
extra_link_args=extra_link_args,
extra_compile_args=extra_compile_args))
if not is_source_release(root):
generate_cython(root, 'spacy')
setup(
name=about['__title__'],
zip_safe=False,
packages=PACKAGES,
package_data=PACKAGE_DATA,
description=about['__summary__'],
long_description=readme,
author=about['__author__'],
author_email=about['__email__'],
version=about['__version__'],
url=about['__uri__'],
license=about['__license__'],
ext_modules=ext_modules,
scripts=['bin/spacy'],
setup_requires=['wheel>=0.32.0,<0.33.0'],
install_requires=[
'numpy>=1.15.0',
'murmurhash>=0.28.0,<1.1.0',
'cymem>=2.0.2,<2.1.0',
'preshed>=2.0.1,<2.1.0',
'thinc>=6.12.1,<6.13.0',
'plac<1.0.0,>=0.9.6',
'ujson>=1.35',
'dill>=0.2,<0.3',
'regex==2018.01.10',
'requests>=2.13.0,<3.0.0',
'pathlib==1.0.1; python_version < "3.4"'],
extras_require={
'cuda': ['cupy>=4.0'],
'cuda80': ['cupy-cuda80>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'cuda90': ['cupy-cuda90>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'cuda91': ['cupy-cuda91>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'cuda92': ['cupy-cuda92>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'cuda100': ['cupy-cuda100>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'mecab': ['mecab-python3==0.7']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Cython',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering'],
cmdclass = {
'build_ext': build_ext_subclass},
)
|
def setup_package():
root = os.path.abspath(os.path.dirname(__file__))
if len(sys.argv) > 1 and sys.argv[1] == 'clean':
return clean(root)
with chdir(root):
with io.open(os.path.join(root, 'spacy', 'about.py'), encoding='utf8') as f:
about = {}
exec(f.read(), about)
with io.open(os.path.join(root, 'README.rst'), encoding='utf8') as f:
readme = f.read()
include_dirs = [
get_python_inc(plat_specific=True),
os.path.join(root, 'include')]
if (ccompiler.new_compiler().compiler_type == 'msvc'
and msvccompiler.get_build_version() == 9):
include_dirs.append(os.path.join(root, 'include', 'msvc9'))
ext_modules = []
for mod_name in MOD_NAMES:
mod_path = mod_name.replace('.', '/') + '.cpp'
extra_link_args = []
extra_compile_args = []
# ???
# Imported from patch from @mikepb
# See Issue #267. Running blind here...
if sys.platform == 'darwin':
dylib_path = ['..' for _ in range(mod_name.count('.'))]
dylib_path = '/'.join(dylib_path)
dylib_path = '@loader_path/%s/spacy/platform/darwin/lib' % dylib_path
extra_link_args.append('-Wl,-rpath,%s' % dylib_path)
# Try to fix OSX 10.7 problem. Running blind here too.
extra_compile_args.append('-std=c++11')
extra_link_args.append('-std=c++11')
ext_modules.append(
Extension(mod_name, [mod_path],
language='c++', include_dirs=include_dirs,
extra_link_args=extra_link_args,
extra_compile_args=extra_compile_args))
if not is_source_release(root):
generate_cython(root, 'spacy')
setup(
name=about['__title__'],
zip_safe=False,
packages=PACKAGES,
package_data=PACKAGE_DATA,
description=about['__summary__'],
long_description=readme,
author=about['__author__'],
author_email=about['__email__'],
version=about['__version__'],
url=about['__uri__'],
license=about['__license__'],
ext_modules=ext_modules,
scripts=['bin/spacy'],
setup_requires=['wheel>=0.32.0,<0.33.0'],
install_requires=[
'numpy>=1.15.0',
'murmurhash>=0.28.0,<1.1.0',
'cymem>=2.0.2,<2.1.0',
'preshed>=2.0.1,<2.1.0',
'thinc>=6.12.1,<6.13.0',
'plac<1.0.0,>=0.9.6',
'ujson>=1.35',
'dill>=0.2,<0.3',
'regex==2018.01.10',
'requests>=2.13.0,<3.0.0',
'pathlib==1.0.1; python_version < "3.4"'],
extras_require={
'cuda': ['cupy>=4.0'],
'cuda80': ['cupy-cuda80>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'cuda90': ['cupy-cuda90>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'cuda91': ['cupy-cuda91>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'cuda92': ['cupy-cuda92>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'cuda100': ['cupy-cuda100>=4.0', 'thinc_gpu_ops>=0.0.3,<0.1.0'],
'ja': ['mecab-python3==0.7']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Cython',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering'],
cmdclass = {
'build_ext': build_ext_subclass},
)
|
34,572 |
def test_train_nlu_with_responses_or_domain_warns(
tmp_path: Text, monkeypatch: MonkeyPatch,
):
(tmp_path / "training").mkdir()
(tmp_path / "models").mkdir()
data_path = "data/test_nlu_no_responses/nlu_no_responses.yml"
domain_path = "data/test_nlu_no_responses/domain_with_only_responses.yml"
with pytest.warns(None) as records:
train_nlu(
"data/test_config/config_defaults.yml",
data_path,
output=str(tmp_path / "models"),
)
assert any(
"You either need to add a response phrase or correct the intent"
in record.message.args[0]
for record in records
)
with pytest.warns(None) as records:
train_nlu(
"data/test_config/config_defaults.yml",
data_path,
output=str(tmp_path / "models"),
domain=domain_path,
)
assert not any(
"You either need to add a response phrase or correct the intent"
in record.message.args[0]
for record in records
)
|
def test_train_nlu_with_responses_or_domain_warns(
tmp_path: Path, monkeypatch: MonkeyPatch,
):
(tmp_path / "training").mkdir()
(tmp_path / "models").mkdir()
data_path = "data/test_nlu_no_responses/nlu_no_responses.yml"
domain_path = "data/test_nlu_no_responses/domain_with_only_responses.yml"
with pytest.warns(None) as records:
train_nlu(
"data/test_config/config_defaults.yml",
data_path,
output=str(tmp_path / "models"),
)
assert any(
"You either need to add a response phrase or correct the intent"
in record.message.args[0]
for record in records
)
with pytest.warns(None) as records:
train_nlu(
"data/test_config/config_defaults.yml",
data_path,
output=str(tmp_path / "models"),
domain=domain_path,
)
assert not any(
"You either need to add a response phrase or correct the intent"
in record.message.args[0]
for record in records
)
|
32,052 |
def get_custom_field_names():
"""
This function returns all custom fields.
:return: dict of custom fields: id as key and description as value.
"""
custom_id_name_mapping = {}
HEADERS['Accept'] = "application/json"
try:
res = requests.request(
method='GET',
url=BASE_URL + 'rest/api/latest/field',
headers=HEADERS,
verify=USE_SSL,
auth=get_auth(),
)
except Exception as e:
demisto.error(f'Could not get custom fields because got the next exception: {e}')
else:
if res.status_code == 200:
custom_fields_list = res.json()
custom_id_name_mapping = {field.get('id'): field.get('name') for field in custom_fields_list}
else:
demisto.error(f'Could not get custom fields. status code: {res.status_code}. reason: {res.reason}')
finally:
return custom_id_name_mapping
|
def get_custom_field_names():
"""
This function returns all custom fields.
:return: dict of custom fields: id as key and description as value.
"""
custom_id_name_mapping = {}
HEADERS['Accept'] = "application/json"
try:
res = requests.request(
method='GET',
url=BASE_URL + 'rest/api/latest/field',
headers=HEADERS,
verify=USE_SSL,
auth=get_auth(),
)
except Exception as e:
demisto.error(f'Could not get custom fields because got the next exception: {e}')
else:
if res.ok:
custom_fields_list = res.json()
custom_id_name_mapping = {field.get('id'): field.get('name') for field in custom_fields_list}
else:
demisto.error(f'Could not get custom fields. status code: {res.status_code}. reason: {res.reason}')
finally:
return custom_id_name_mapping
|
8,505 |
def user_dictize(
user, context, include_password_hash=False,
include_plugin_extras=False):
if context.get('with_capacity'):
user, capacity = user
result_dict = d.table_dictize(user, context, capacity=capacity)
else:
result_dict = d.table_dictize(user, context)
password_hash = result_dict.pop('password')
del result_dict['reset_key']
result_dict['display_name'] = user.display_name
result_dict['email_hash'] = user.email_hash
result_dict['number_created_packages'] = user.number_created_packages(
include_private_and_draft=context.get(
'count_private_and_draft_datasets', False))
requester = context.get('user')
reset_key = result_dict.pop('reset_key', None)
apikey = result_dict.pop('apikey', None)
email = result_dict.pop('email', None)
plugin_extras = result_dict.pop('plugin_extras', None)
if context.get('keep_email', False):
result_dict['email'] = email
if context.get('keep_apikey', False):
result_dict['apikey'] = apikey
if requester == user.name:
result_dict['apikey'] = apikey
result_dict['email'] = email
if authz.is_sysadmin(requester):
result_dict['apikey'] = apikey
result_dict['email'] = email
if include_password_hash:
result_dict['password_hash'] = password_hash
if include_plugin_extras:
result_dict['plugin_extras'] = copy.deepcopy(
plugin_extras) if plugin_extras else plugin_extras
model = context['model']
session = model.Session
image_url = result_dict.get('image_url')
result_dict['image_display_url'] = image_url
if image_url and not image_url.startswith('http'):
#munge here should not have an effect only doing it incase
#of potential vulnerability of dodgy api input
image_url = munge.munge_filename_legacy(image_url)
result_dict['image_display_url'] = h.url_for_static(
'uploads/user/%s' % result_dict.get('image_url'),
qualified=True
)
return result_dict
|
def user_dictize(
user, context, include_password_hash=False,
include_plugin_extras=False):
if context.get('with_capacity'):
user, capacity = user
result_dict = d.table_dictize(user, context, capacity=capacity)
else:
result_dict = d.table_dictize(user, context)
password_hash = result_dict.pop('password')
del result_dict['reset_key']
result_dict['display_name'] = user.display_name
result_dict['email_hash'] = user.email_hash
result_dict['number_created_packages'] = user.number_created_packages(
include_private_and_draft=context.get(
'count_private_and_draft_datasets', False))
requester = context.get('user')
reset_key = result_dict.pop('reset_key', None)
apikey = result_dict.pop('apikey', None)
email = result_dict.pop('email', None)
plugin_extras = result_dict.pop('plugin_extras', None)
if context.get('keep_email', False):
result_dict['email'] = email
if context.get('keep_apikey', False):
result_dict['apikey'] = apikey
if requester == user.name:
result_dict['apikey'] = apikey
result_dict['email'] = email
if authz.is_sysadmin(requester):
result_dict['apikey'] = apikey
result_dict['email'] = email
if include_password_hash:
result_dict['password_hash'] = password_hash
if include_plugin_extras:
result_dict['plugin_extras'] = copy.deepcopy(
plugin_extras) if plugin_extras else plugin_extras
model = context['model']
session = model.Session
image_url = result_dict.get('image_url')
result_dict['image_display_url'] = image_url
if image_url and not image_url.startswith('http'):
# munge here should not have any effect, only doing it in case
#of potential vulnerability of dodgy api input
image_url = munge.munge_filename_legacy(image_url)
result_dict['image_display_url'] = h.url_for_static(
'uploads/user/%s' % result_dict.get('image_url'),
qualified=True
)
return result_dict
|
30,833 |
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis', {}).get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
27,384 |
def generate_streamlines_3d(
topology_file_path: str, trajectory_file_path: str,
grid_spacing: float, MDA_selection: str,
start_frame: int, end_frame: int,
xmin: float, xmax: float,
ymin: float, ymax: float,
zmin: float, zmax: float,
maximum_delta_magnitude: float = 2.0,
num_cores: int = 'maximum'
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
r"""Produce the x, y and z components of a 3D streamplot data set.
Parameters
----------
topology_file_path : str
Absolute path to the topology file
trajectory_file_path : str
Absolute path to the trajectory file. It will normally be desirable
to filter the trajectory with a tool such as GROMACS
:program:`g_filter` (see :cite:p:`b-Chavent2014`)
grid_spacing : float
The spacing between grid lines (angstroms)
MDA_selection : str
MDAnalysis selection string
start_frame : int
First frame number to parse
end_frame : int
Last frame number to parse
xmin : float
Minimum coordinate boundary for x-axis (angstroms)
xmax : float
Maximum coordinate boundary for x-axis (angstroms)
ymin : float
Minimum coordinate boundary for y-axis (angstroms)
ymax : float
Maximum coordinate boundary for y-axis (angstroms)
maximum_delta_magnitude : float
Absolute value of the largest displacement tolerated for the
centroid of a group of particles ( angstroms). Values above this
displacement will not count in the streamplot (treated as
excessively large displacements crossing the periodic boundary)
num_cores : int or 'maximum' (optional)
The number of cores to use. (Default 'maximum' uses all available
cores)
Returns
-------
dx_array : array of floats
An array object containing the displacements in the x direction
dy_array : array of floats
An array object containing the displacements in the y direction
dz_array : array of floats
An array object containing the displacements in the z direction
Examples
--------
Generate 3D streamlines and visualize in `mayavi`_::
import numpy as np
import MDAnalysis
import MDAnalysis.visualization.streamlines_3D
import mayavi, mayavi.mlab
# assign coordinate system limits and grid spacing:
x_lower,x_upper = -8.73, 1225.96
y_lower,y_upper = -12.58, 1224.34
z_lower,z_upper = -300, 300
grid_spacing_value = 20
x1, y1, z1 = MDAnalysis.visualization.streamlines_3D.generate_streamlines_3d(
'testing.gro', 'testing_filtered.xtc',
xmin=x_lower, xmax=x_upper,
ymin=y_lower, ymax=y_upper,
zmin=z_lower, zmax=z_upper,
grid_spacing=grid_spacing_value, MDA_selection = 'name PO4',
start_frame=2, end_frame=3, num_cores='maximum')
x, y, z = np.mgrid[x_lower:x_upper:x1.shape[0]*1j,
y_lower:y_upper:y1.shape[1]*1j,
z_lower:z_upper:z1.shape[2]*1j]
# plot with mayavi:
fig = mayavi.mlab.figure(bgcolor=(1.0, 1.0, 1.0), size=(800, 800), fgcolor=(0, 0, 0))
for z_value in np.arange(z_lower, z_upper, grid_spacing_value):
st = mayavi.mlab.flow(x, y, z, x1, y1, z1, line_width=1,
seedtype='plane', integration_direction='both')
st.streamline_type = 'tube'
st.tube_filter.radius = 2
st.seed.widget.origin = np.array([ x_lower, y_upper, z_value])
st.seed.widget.point1 = np.array([ x_upper, y_upper, z_value])
st.seed.widget.point2 = np.array([ x_lower, y_lower, z_value])
st.seed.widget.resolution = int(x1.shape[0])
st.seed.widget.enabled = False
mayavi.mlab.axes(extent = [0, 1200, 0, 1200, -300, 300])
fig.scene.z_plus_view()
mayavi.mlab.savefig('test_streamplot_3D.png')
# more compelling examples can be produced for vesicles and other spherical systems
.. image:: test_streamplot_3D.png
See Also
--------
MDAnalysis.visualization.streamlines.generate_streamlines
.. _mayavi: http://docs.enthought.com/mayavi/mayavi/
"""
# work out the number of cores to use:
if num_cores == 'maximum':
num_cores = multiprocessing.cpu_count() # use all available cores
else:
num_cores = num_cores # use the value specified by the user
# assert isinstance(num_cores,(int,long)), "The number of specified cores must (of course) be an integer."
np.seterr(all='warn', over='raise')
parent_cube_dictionary = {} # collect all data from child processes here
def log_result_to_parent(process_dict):
parent_cube_dictionary.update(process_dict)
#step 1: produce tuple of cartesian coordinate limits for the first frame
#tuple_of_limits = determine_container_limits(topology_file_path = topology_file_path,trajectory_file_path =
# trajectory_file_path,buffer_value=buffer_value)
tuple_of_limits = (xmin, xmax, ymin, ymax, zmin, zmax)
#step 2: produce a suitable grid (will assume that grid size / container size does not vary during simulation--or
# at least not beyond the buffer limit, such that this grid can be used for all subsequent frames)
grid = produce_grid(tuple_of_limits=tuple_of_limits, grid_spacing=grid_spacing)
#step 3: split the grid into a dictionary of cube information that can be sent to each core for processing:
list_dictionaries_for_cores, total_cubes, num_sheets, delta_array_shape = split_grid(grid=grid, num_cores=num_cores)
#step 3b: produce required coordinate arrays on a single core to avoid making a universe object on each core:
start_frame_coord_array, end_frame_coord_array = produce_coordinate_arrays_single_process(topology_file_path,
trajectory_file_path,
MDA_selection,
start_frame, end_frame)
#step 4: per process work using the above grid data split
pool = multiprocessing.Pool(num_cores)
for sub_dictionary_of_cube_data in list_dictionaries_for_cores:
pool.apply_async(per_core_work, args=(
start_frame_coord_array, end_frame_coord_array, sub_dictionary_of_cube_data, MDA_selection, start_frame,
end_frame), callback=log_result_to_parent)
pool.close()
pool.join()
#so, at this stage the parent process now has a single dictionary with all the cube objects updated from all
# available cores
#the 3D streamplot (i.e, mayavi flow() function) will require separate 3D np arrays for dx,dy,dz
#the shape of each 3D array will unfortunately have to match the mgrid data structure (bit of a pain): (
# num_sheets - 1, num_sheets - 1, cubes_per_column)
cubes_per_sheet = int(float(total_cubes) / float(num_sheets - 1))
#produce dummy zero arrays for dx,dy,dz of the appropriate shape:
dx_array = np.zeros(delta_array_shape)
dy_array = np.zeros(delta_array_shape)
dz_array = np.zeros(delta_array_shape)
#now use the parent cube dictionary to correctly substitute in dx,dy,dz values
current_sheet = 0 # which is also the current row
y_index_current_sheet = 0 # sub row
z_index_current_column = 0 # column
total_cubes_current_sheet = 0
for cube_number in range(0, total_cubes):
dx_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dx']
dy_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dy']
dz_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dz']
z_index_current_column += 1
total_cubes_current_sheet += 1
if z_index_current_column == delta_array_shape[2]:
# done building current y-column so iterate y value and reset z
z_index_current_column = 0
y_index_current_sheet += 1
if y_index_current_sheet == delta_array_shape[1]: # current sheet is complete
current_sheet += 1
y_index_current_sheet = 0 # restart for new sheet
z_index_current_column = 0
total_cubes_current_sheet = 0
# now set velocity component values greater than a certain cutoff to 0,
# because they tend to reflect spurious values (i.e., PBC jumping)
dx_array[abs(dx_array) >= maximum_delta_magnitude] = 1.0
dy_array[abs(dy_array) >= maximum_delta_magnitude] = 1.0
dz_array[abs(dz_array) >= maximum_delta_magnitude] = 1.0
return (dx_array, dy_array, dz_array)
|
def generate_streamlines_3d(
topology_file_path: str, trajectory_file_path: str,
grid_spacing: float, MDA_selection: str,
start_frame: int, end_frame: int,
xmin: float, xmax: float,
ymin: float, ymax: float,
zmin: float, zmax: float,
maximum_delta_magnitude: float = 2.0,
num_cores: Union[int, str] = 'maximum'
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
r"""Produce the x, y and z components of a 3D streamplot data set.
Parameters
----------
topology_file_path : str
Absolute path to the topology file
trajectory_file_path : str
Absolute path to the trajectory file. It will normally be desirable
to filter the trajectory with a tool such as GROMACS
:program:`g_filter` (see :cite:p:`b-Chavent2014`)
grid_spacing : float
The spacing between grid lines (angstroms)
MDA_selection : str
MDAnalysis selection string
start_frame : int
First frame number to parse
end_frame : int
Last frame number to parse
xmin : float
Minimum coordinate boundary for x-axis (angstroms)
xmax : float
Maximum coordinate boundary for x-axis (angstroms)
ymin : float
Minimum coordinate boundary for y-axis (angstroms)
ymax : float
Maximum coordinate boundary for y-axis (angstroms)
maximum_delta_magnitude : float
Absolute value of the largest displacement tolerated for the
centroid of a group of particles ( angstroms). Values above this
displacement will not count in the streamplot (treated as
excessively large displacements crossing the periodic boundary)
num_cores : int or 'maximum' (optional)
The number of cores to use. (Default 'maximum' uses all available
cores)
Returns
-------
dx_array : array of floats
An array object containing the displacements in the x direction
dy_array : array of floats
An array object containing the displacements in the y direction
dz_array : array of floats
An array object containing the displacements in the z direction
Examples
--------
Generate 3D streamlines and visualize in `mayavi`_::
import numpy as np
import MDAnalysis
import MDAnalysis.visualization.streamlines_3D
import mayavi, mayavi.mlab
# assign coordinate system limits and grid spacing:
x_lower,x_upper = -8.73, 1225.96
y_lower,y_upper = -12.58, 1224.34
z_lower,z_upper = -300, 300
grid_spacing_value = 20
x1, y1, z1 = MDAnalysis.visualization.streamlines_3D.generate_streamlines_3d(
'testing.gro', 'testing_filtered.xtc',
xmin=x_lower, xmax=x_upper,
ymin=y_lower, ymax=y_upper,
zmin=z_lower, zmax=z_upper,
grid_spacing=grid_spacing_value, MDA_selection = 'name PO4',
start_frame=2, end_frame=3, num_cores='maximum')
x, y, z = np.mgrid[x_lower:x_upper:x1.shape[0]*1j,
y_lower:y_upper:y1.shape[1]*1j,
z_lower:z_upper:z1.shape[2]*1j]
# plot with mayavi:
fig = mayavi.mlab.figure(bgcolor=(1.0, 1.0, 1.0), size=(800, 800), fgcolor=(0, 0, 0))
for z_value in np.arange(z_lower, z_upper, grid_spacing_value):
st = mayavi.mlab.flow(x, y, z, x1, y1, z1, line_width=1,
seedtype='plane', integration_direction='both')
st.streamline_type = 'tube'
st.tube_filter.radius = 2
st.seed.widget.origin = np.array([ x_lower, y_upper, z_value])
st.seed.widget.point1 = np.array([ x_upper, y_upper, z_value])
st.seed.widget.point2 = np.array([ x_lower, y_lower, z_value])
st.seed.widget.resolution = int(x1.shape[0])
st.seed.widget.enabled = False
mayavi.mlab.axes(extent = [0, 1200, 0, 1200, -300, 300])
fig.scene.z_plus_view()
mayavi.mlab.savefig('test_streamplot_3D.png')
# more compelling examples can be produced for vesicles and other spherical systems
.. image:: test_streamplot_3D.png
See Also
--------
MDAnalysis.visualization.streamlines.generate_streamlines
.. _mayavi: http://docs.enthought.com/mayavi/mayavi/
"""
# work out the number of cores to use:
if num_cores == 'maximum':
num_cores = multiprocessing.cpu_count() # use all available cores
else:
num_cores = num_cores # use the value specified by the user
# assert isinstance(num_cores,(int,long)), "The number of specified cores must (of course) be an integer."
np.seterr(all='warn', over='raise')
parent_cube_dictionary = {} # collect all data from child processes here
def log_result_to_parent(process_dict):
parent_cube_dictionary.update(process_dict)
#step 1: produce tuple of cartesian coordinate limits for the first frame
#tuple_of_limits = determine_container_limits(topology_file_path = topology_file_path,trajectory_file_path =
# trajectory_file_path,buffer_value=buffer_value)
tuple_of_limits = (xmin, xmax, ymin, ymax, zmin, zmax)
#step 2: produce a suitable grid (will assume that grid size / container size does not vary during simulation--or
# at least not beyond the buffer limit, such that this grid can be used for all subsequent frames)
grid = produce_grid(tuple_of_limits=tuple_of_limits, grid_spacing=grid_spacing)
#step 3: split the grid into a dictionary of cube information that can be sent to each core for processing:
list_dictionaries_for_cores, total_cubes, num_sheets, delta_array_shape = split_grid(grid=grid, num_cores=num_cores)
#step 3b: produce required coordinate arrays on a single core to avoid making a universe object on each core:
start_frame_coord_array, end_frame_coord_array = produce_coordinate_arrays_single_process(topology_file_path,
trajectory_file_path,
MDA_selection,
start_frame, end_frame)
#step 4: per process work using the above grid data split
pool = multiprocessing.Pool(num_cores)
for sub_dictionary_of_cube_data in list_dictionaries_for_cores:
pool.apply_async(per_core_work, args=(
start_frame_coord_array, end_frame_coord_array, sub_dictionary_of_cube_data, MDA_selection, start_frame,
end_frame), callback=log_result_to_parent)
pool.close()
pool.join()
#so, at this stage the parent process now has a single dictionary with all the cube objects updated from all
# available cores
#the 3D streamplot (i.e, mayavi flow() function) will require separate 3D np arrays for dx,dy,dz
#the shape of each 3D array will unfortunately have to match the mgrid data structure (bit of a pain): (
# num_sheets - 1, num_sheets - 1, cubes_per_column)
cubes_per_sheet = int(float(total_cubes) / float(num_sheets - 1))
#produce dummy zero arrays for dx,dy,dz of the appropriate shape:
dx_array = np.zeros(delta_array_shape)
dy_array = np.zeros(delta_array_shape)
dz_array = np.zeros(delta_array_shape)
#now use the parent cube dictionary to correctly substitute in dx,dy,dz values
current_sheet = 0 # which is also the current row
y_index_current_sheet = 0 # sub row
z_index_current_column = 0 # column
total_cubes_current_sheet = 0
for cube_number in range(0, total_cubes):
dx_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dx']
dy_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dy']
dz_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dz']
z_index_current_column += 1
total_cubes_current_sheet += 1
if z_index_current_column == delta_array_shape[2]:
# done building current y-column so iterate y value and reset z
z_index_current_column = 0
y_index_current_sheet += 1
if y_index_current_sheet == delta_array_shape[1]: # current sheet is complete
current_sheet += 1
y_index_current_sheet = 0 # restart for new sheet
z_index_current_column = 0
total_cubes_current_sheet = 0
# now set velocity component values greater than a certain cutoff to 0,
# because they tend to reflect spurious values (i.e., PBC jumping)
dx_array[abs(dx_array) >= maximum_delta_magnitude] = 1.0
dy_array[abs(dy_array) >= maximum_delta_magnitude] = 1.0
dz_array[abs(dz_array) >= maximum_delta_magnitude] = 1.0
return (dx_array, dy_array, dz_array)
|
34,519 |
def test_binary_featurizer_correctly_encodes_non_existing_value():
f = BinarySingleStateFeaturizer()
f._default_feature_states[INTENT] = {"a": 0, "b": 1}
f._default_feature_states[ACTION_NAME] = {"c": 0, "d": 1}
encoded = f.encode_state(
{"user": {"intent": "e"}, "prev_action": {"action_name": "action_listen"}},
interpreter=None,
)
assert list(encoded.keys()) == [INTENT, ACTION_NAME]
assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[0, 0]])).nnz == 0
|
def test_single_state_featurizer_correctly_encodes_non_existing_value():
f = BinarySingleStateFeaturizer()
f._default_feature_states[INTENT] = {"a": 0, "b": 1}
f._default_feature_states[ACTION_NAME] = {"c": 0, "d": 1}
encoded = f.encode_state(
{"user": {"intent": "e"}, "prev_action": {"action_name": "action_listen"}},
interpreter=None,
)
assert list(encoded.keys()) == [INTENT, ACTION_NAME]
assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[0, 0]])).nnz == 0
|
52,808 |
def test_yaml_advanced_validation():
schema = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "lib", "galaxy", "webapps", "galaxy", "job_config_schema.yml")
integration_tests_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "test", "integration")
valid_files = [
ADVANCED_JOB_CONF_YAML,
os.path.join(integration_tests_dir, "delay_job_conf.yml"),
os.path.join(integration_tests_dir, "embedded_pulsar_metadata_job_conf.yml"),
os.path.join(integration_tests_dir, "io_injection_job_conf.yml"),
os.path.join(integration_tests_dir, "resubmission_job_conf.yml"),
os.path.join(integration_tests_dir, "resubmission_default_job_conf.yml"),
]
for valid_file in valid_files:
c = Core(
source_file=valid_file,
schema_files=[schema],
)
c.validate()
|
def test_yaml_advanced_validation():
schema = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, "lib", "galaxy", "webapps", "galaxy", "job_config_schema.yml")
integration_tests_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, "test", "integration")
valid_files = [
ADVANCED_JOB_CONF_YAML,
os.path.join(integration_tests_dir, "delay_job_conf.yml"),
os.path.join(integration_tests_dir, "embedded_pulsar_metadata_job_conf.yml"),
os.path.join(integration_tests_dir, "io_injection_job_conf.yml"),
os.path.join(integration_tests_dir, "resubmission_job_conf.yml"),
os.path.join(integration_tests_dir, "resubmission_default_job_conf.yml"),
]
for valid_file in valid_files:
c = Core(
source_file=valid_file,
schema_files=[schema],
)
c.validate()
|
58,526 |
def reducescatter_multigpu(output_tensor_list,
input_tensor_lists,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Reducescatter a list of tensors across all GPUs.
Args:
output_tensor_list: the resulted list of tensors, with
shape: num_gpus * shape(tensor).
input_tensor_lists: the original tensors, with shape:
num_gpus * world_size * shape(tensor).
group_name (str): the name of the collective group.
op: The reduce operation.
Returns:
None.
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_lists_input(input_tensor_lists)
_check_tensor_list_input(output_tensor_list)
g = _check_and_get_group(group_name)
opts = types.ReduceScatterOptions()
opts.reduceOp = op
g.reducescatter(output_tensor_list, input_tensor_lists, opts)
|
def reducescatter_multigpu(output_tensor_list,
input_tensor_lists,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Reducescatter a list of tensors across all GPUs.
Args:
output_tensor_list: the resulted list of tensors, with
shape: num_gpus * shape(tensor).
input_tensor_lists: the original tensors, with shape:
num_gpus * world_size * shape(tensor).
group_name (str): the name of the collective group.
op: The reduce operation.
Returns:
None.
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_lists_input(input_tensor_lists)
_check_tensor_list_input(output_tensor_list)
g = _check_and_get_group(group_name)
opts = types.ReduceScatterOptions()
opts.reduceOp = op
g.reducescatter(output_tensor_list, input_tensor_lists, opts)
|
24,709 |
def create_node(
node_name: str,
*,
context: Context = None,
cli_args: List[str] = None,
namespace: str = None,
use_global_arguments: bool = True,
enable_rosout: bool = True,
start_parameter_services: bool = True,
parameter_overrides: Union[List[Parameter], None] = None,
allow_undeclared_parameters: bool = False,
automatically_declare_parameters_from_overrides: bool = False
) -> 'Node':
"""
Create an instance of :class:`.Node`.
:param node_name: A name to give to the node.
:param context: The context to associated with the node, or ``None`` for the default global
context.
:param cli_args: Command line arguments to be used by the node. Being specific to a ROS node,
an implicit `--ros-args` scope flag always precedes these arguments.
:param namespace: The namespace prefix to apply to entities associated with the node
(node name, topics, etc).
:param use_global_arguments: ``False`` if the node should ignore process-wide command line
arguments.
:param enable_rosout: ``False`` if the node should ignore rosout logging.
:param start_parameter_services: ``False`` if the node should not create parameter services.
:param parameter_overrides: A list of :class:`.Parameter` which are used to override the
initial values of parameters declared on this node.
:param allow_undeclared_parameters: if True undeclared parameters are allowed, default False.
This option doesn't affect `parameter_overrides`.
:param automatically_declare_parameters_from_overrides: If True, the "parameter overrides" will
be used to implicitly declare parameters on the node during creation, default False.
:return: An instance of the newly created node.
"""
# imported locally to avoid loading extensions on module import
from rclpy.node import Node # noqa: F811
return Node(
node_name, context=context, cli_args=cli_args, namespace=namespace,
use_global_arguments=use_global_arguments,
enable_rosout=enable_rosout,
start_parameter_services=start_parameter_services,
parameter_overrides=parameter_overrides,
allow_undeclared_parameters=allow_undeclared_parameters,
automatically_declare_parameters_from_overrides=(
automatically_declare_parameters_from_overrides
))
|
def create_node(
node_name: str,
*,
context: Context = None,
cli_args: List[str] = None,
namespace: str = None,
use_global_arguments: bool = True,
enable_rosout: bool = True,
start_parameter_services: bool = True,
parameter_overrides: Optional[List[Parameter]] = None,
allow_undeclared_parameters: bool = False,
automatically_declare_parameters_from_overrides: bool = False
) -> 'Node':
"""
Create an instance of :class:`.Node`.
:param node_name: A name to give to the node.
:param context: The context to associated with the node, or ``None`` for the default global
context.
:param cli_args: Command line arguments to be used by the node. Being specific to a ROS node,
an implicit `--ros-args` scope flag always precedes these arguments.
:param namespace: The namespace prefix to apply to entities associated with the node
(node name, topics, etc).
:param use_global_arguments: ``False`` if the node should ignore process-wide command line
arguments.
:param enable_rosout: ``False`` if the node should ignore rosout logging.
:param start_parameter_services: ``False`` if the node should not create parameter services.
:param parameter_overrides: A list of :class:`.Parameter` which are used to override the
initial values of parameters declared on this node.
:param allow_undeclared_parameters: if True undeclared parameters are allowed, default False.
This option doesn't affect `parameter_overrides`.
:param automatically_declare_parameters_from_overrides: If True, the "parameter overrides" will
be used to implicitly declare parameters on the node during creation, default False.
:return: An instance of the newly created node.
"""
# imported locally to avoid loading extensions on module import
from rclpy.node import Node # noqa: F811
return Node(
node_name, context=context, cli_args=cli_args, namespace=namespace,
use_global_arguments=use_global_arguments,
enable_rosout=enable_rosout,
start_parameter_services=start_parameter_services,
parameter_overrides=parameter_overrides,
allow_undeclared_parameters=allow_undeclared_parameters,
automatically_declare_parameters_from_overrides=(
automatically_declare_parameters_from_overrides
))
|
12,289 |
def brmesolve(H, psi0, tlist, a_ops=[], e_ops=[], c_ops=[],
args={}, sec_cutoff=0.1, options=None):
"""
Solves for the dynamics of a system using the Bloch-Redfield master
equation, given an input Hamiltonian, Hermitian bath-coupling terms and
their associated spectrum functions, as well as possible Lindblad collapse
operators.
Parameters
----------
H : Qobj / list
System Hamiltonian given as a Qobj or
nested list in string-based format.
psi0: Qobj
Initial density matrix or state vector (ket).
tlist : array_like
List of times for evaluating evolution
a_ops : list of (a_op, spectra)
Nested list of system operators that couple to the environment,
and the corresponding bath spectra.
a_op : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`
The operator coupling to the environment. Must be hermitian.
spectra : :class:`Coefficient`, str, func
The corresponding bath spectral responce.
Can be a `Coefficient` using an 'w' args, a function of the
frequence or a string. Coefficient build from a numpy array are
understood as a function of ``w`` instead of ``t``. Function are
expected to be of the signature ``f(w)`` or ``f(t, w, **args)``.
The spectra function can depend on ``t`` if the corresponding
``a_op`` is a :cls:`QobjEvo`.
Example:
a_ops = [
(a+a.dag(), ('w>0', args={"w": 0})),
(QobjEvo(a+a.dag()), 'w > exp(-t)'),
(QobjEvo([b+b.dag(), f(t)]), g(w)),
(c+c.dag(), SpectraCoefficient(coefficient(array, tlist=ws))),
]
.. note:
``Cubic_Spline`` have been replaced by numpy array :cls:`Coefficient`.
Whether the ``a_ops`` is time dependent is deceided by the type of the
operator: :cls:`Qobj` vs :cls:`QobjEvo` instead of the type of the
spectra.
e_ops : list of :class:`Qobj` / callback function
Single operator or list of operators for which to evaluate
expectation values or callable or list of callable.
Callable signature must be, `f(t: float, state: Qobj)`.
See :func:`expect` for more detail of operator expectation
c_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format)
List of collapse operators.
args : dict
Dictionary of parameters for time-dependent Hamiltonians and
collapse operators. The key ``w`` is reserved for the spectra function.
sec_cutoff : float {0.1}
Cutoff for secular approximation. Use ``-1`` if secular approximation
is not used when evaluating bath-coupling terms.
options : :class:`qutip.solver.SolverOptions`
Options for the solver.
Returns
-------
result: :class:`qutip.solver.Result`
An instance of the class :class:`qutip.solver.Result`, which contains
either an array of expectation values, for operators given in e_ops,
or a list of states for the times specified by `tlist`.
.. note:
operator_data_type
"""
H = QobjEvo(H, args=args, tlist=tlist)
c_ops = c_ops if c_ops is not None else []
if not isinstance(c_ops, (list, tuple)):
c_ops = [c_ops]
c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops]
new_a_ops = []
for (a_op, spectra) in a_ops:
if isinstance(spectra, str):
new_a_ops.append(
(a_op, coefficient(spectra, args={**args, 'w':0})))
elif isinstance(spectra, InterCoefficient):
new_a_ops.append((a_op, SpectraCoefficient(spectra)))
elif isinstance(spectra, Coefficient):
new_a_ops.append((a_op, spectra))
elif callable(spectra):
sig = inspect.signature(spectra)
if tuple(sig.parameters.keys()) == ("w",):
spec = SpectraCoefficient(coefficient(spectra))
else:
spec = coefficient(spectra, args={**args, 'w':0})
new_a_ops.append((a_op, spec))
else:
raise TypeError("a_ops's spectra not known")
solver = BRSolver(
H, new_a_ops, c_ops, options=options,
use_secular=(sec_cutoff>=0.), sec_cutoff=sec_cutoff,
)
return solver.run(psi0, tlist, e_ops=e_ops)
|
def brmesolve(H, psi0, tlist, a_ops=[], e_ops=[], c_ops=[],
args={}, sec_cutoff=0.1, options=None):
"""
Solves for the dynamics of a system using the Bloch-Redfield master
equation, given an input Hamiltonian, Hermitian bath-coupling terms and
their associated spectral functions, as well as possible Lindblad collapse
operators.
Parameters
----------
H : Qobj / list
System Hamiltonian given as a Qobj or
nested list in string-based format.
psi0: Qobj
Initial density matrix or state vector (ket).
tlist : array_like
List of times for evaluating evolution
a_ops : list of (a_op, spectra)
Nested list of system operators that couple to the environment,
and the corresponding bath spectra.
a_op : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`
The operator coupling to the environment. Must be hermitian.
spectra : :class:`Coefficient`, str, func
The corresponding bath spectral responce.
Can be a `Coefficient` using an 'w' args, a function of the
frequence or a string. Coefficient build from a numpy array are
understood as a function of ``w`` instead of ``t``. Function are
expected to be of the signature ``f(w)`` or ``f(t, w, **args)``.
The spectra function can depend on ``t`` if the corresponding
``a_op`` is a :cls:`QobjEvo`.
Example:
a_ops = [
(a+a.dag(), ('w>0', args={"w": 0})),
(QobjEvo(a+a.dag()), 'w > exp(-t)'),
(QobjEvo([b+b.dag(), f(t)]), g(w)),
(c+c.dag(), SpectraCoefficient(coefficient(array, tlist=ws))),
]
.. note:
``Cubic_Spline`` have been replaced by numpy array :cls:`Coefficient`.
Whether the ``a_ops`` is time dependent is deceided by the type of the
operator: :cls:`Qobj` vs :cls:`QobjEvo` instead of the type of the
spectra.
e_ops : list of :class:`Qobj` / callback function
Single operator or list of operators for which to evaluate
expectation values or callable or list of callable.
Callable signature must be, `f(t: float, state: Qobj)`.
See :func:`expect` for more detail of operator expectation
c_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format)
List of collapse operators.
args : dict
Dictionary of parameters for time-dependent Hamiltonians and
collapse operators. The key ``w`` is reserved for the spectra function.
sec_cutoff : float {0.1}
Cutoff for secular approximation. Use ``-1`` if secular approximation
is not used when evaluating bath-coupling terms.
options : :class:`qutip.solver.SolverOptions`
Options for the solver.
Returns
-------
result: :class:`qutip.solver.Result`
An instance of the class :class:`qutip.solver.Result`, which contains
either an array of expectation values, for operators given in e_ops,
or a list of states for the times specified by `tlist`.
.. note:
operator_data_type
"""
H = QobjEvo(H, args=args, tlist=tlist)
c_ops = c_ops if c_ops is not None else []
if not isinstance(c_ops, (list, tuple)):
c_ops = [c_ops]
c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops]
new_a_ops = []
for (a_op, spectra) in a_ops:
if isinstance(spectra, str):
new_a_ops.append(
(a_op, coefficient(spectra, args={**args, 'w':0})))
elif isinstance(spectra, InterCoefficient):
new_a_ops.append((a_op, SpectraCoefficient(spectra)))
elif isinstance(spectra, Coefficient):
new_a_ops.append((a_op, spectra))
elif callable(spectra):
sig = inspect.signature(spectra)
if tuple(sig.parameters.keys()) == ("w",):
spec = SpectraCoefficient(coefficient(spectra))
else:
spec = coefficient(spectra, args={**args, 'w':0})
new_a_ops.append((a_op, spec))
else:
raise TypeError("a_ops's spectra not known")
solver = BRSolver(
H, new_a_ops, c_ops, options=options,
use_secular=(sec_cutoff>=0.), sec_cutoff=sec_cutoff,
)
return solver.run(psi0, tlist, e_ops=e_ops)
|
43,203 |
def _get_pipeline_subset_def(
pipeline_def: PipelineDefinition,
solids_to_execute: AbstractSet[str],
) -> "PipelineSubsetDefinition":
"""
Build a pipeline which is a subset of another pipeline.
Only includes the solids which are in solids_to_execute.
"""
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
check.set_param(solids_to_execute, "solids_to_execute", of_type=str)
graph = pipeline_def.graph
for solid_name in solids_to_execute:
if not graph.has_solid_named(solid_name):
raise DagsterInvalidSubsetError(
"Pipeline {pipeline_name} has no {node_type} named {name}.".format(
pipeline_name=pipeline_def.name,
name=solid_name,
node_type="ops" if pipeline_def.is_job else "solids",
),
)
# go in topo order to ensure deps dict is ordered
solids = list(
filter(lambda solid: solid.name in solids_to_execute, graph.solids_in_topological_order)
)
deps: Dict[
Union[str, SolidInvocation],
Dict[str, IDependencyDefinition],
] = {_dep_key_of(solid): {} for solid in solids}
for solid in solids:
for input_handle in solid.input_handles():
if graph.dependency_structure.has_direct_dep(input_handle):
output_handle = pipeline_def.dependency_structure.get_direct_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
elif graph.dependency_structure.has_dynamic_fan_in_dep(input_handle):
output_handle = graph.dependency_structure.get_dynamic_fan_in_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][
input_handle.input_def.name
] = DynamicCollectDependencyDefinition(
solid_name=output_handle.solid.name,
output_name=output_handle.output_def.name,
)
elif graph.dependency_structure.has_fan_in_deps(input_handle):
output_handles = graph.dependency_structure.get_fan_in_deps(input_handle)
deps[_dep_key_of(solid)][input_handle.input_def.name] = MultiDependencyDefinition(
[
DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
for output_handle in output_handles
if output_handle.solid.name in solids_to_execute
]
)
# else input is unconnected
try:
sub_pipeline_def = PipelineSubsetDefinition(
name=pipeline_def.name, # should we change the name for subsetted pipeline?
solid_defs=list({solid.definition for solid in solids}),
mode_defs=pipeline_def.mode_definitions,
dependencies=deps,
_parent_pipeline_def=pipeline_def,
tags=pipeline_def.tags,
hook_defs=pipeline_def.hook_defs,
)
return sub_pipeline_def
except DagsterInvalidDefinitionError as exc:
# This handles the case when you construct a subset such that an unsatisfied
# input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,
# we re-raise a DagsterInvalidSubsetError.
raise DagsterInvalidSubsetError(
f"The attempted subset {str_format_set(solids_to_execute)} for {pipeline_def.target_type} "
f"{pipeline_def.name} results in an invalid {pipeline_def.target_type}"
) from exc
|
def _get_pipeline_subset_def(
pipeline_def: PipelineDefinition,
solids_to_execute: AbstractSet[str],
) -> "PipelineSubsetDefinition":
"""
Build a pipeline which is a subset of another pipeline.
Only includes the solids which are in solids_to_execute.
"""
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
check.set_param(solids_to_execute, "solids_to_execute", of_type=str)
graph = pipeline_def.graph
for solid_name in solids_to_execute:
if not graph.has_solid_named(solid_name):
raise DagsterInvalidSubsetError(
"{graph_type} {pipeline_name} has no {node_type} named {name}.".format(
pipeline_name=pipeline_def.name,
name=solid_name,
node_type="ops" if pipeline_def.is_job else "solids",
),
)
# go in topo order to ensure deps dict is ordered
solids = list(
filter(lambda solid: solid.name in solids_to_execute, graph.solids_in_topological_order)
)
deps: Dict[
Union[str, SolidInvocation],
Dict[str, IDependencyDefinition],
] = {_dep_key_of(solid): {} for solid in solids}
for solid in solids:
for input_handle in solid.input_handles():
if graph.dependency_structure.has_direct_dep(input_handle):
output_handle = pipeline_def.dependency_structure.get_direct_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
elif graph.dependency_structure.has_dynamic_fan_in_dep(input_handle):
output_handle = graph.dependency_structure.get_dynamic_fan_in_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][
input_handle.input_def.name
] = DynamicCollectDependencyDefinition(
solid_name=output_handle.solid.name,
output_name=output_handle.output_def.name,
)
elif graph.dependency_structure.has_fan_in_deps(input_handle):
output_handles = graph.dependency_structure.get_fan_in_deps(input_handle)
deps[_dep_key_of(solid)][input_handle.input_def.name] = MultiDependencyDefinition(
[
DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
for output_handle in output_handles
if output_handle.solid.name in solids_to_execute
]
)
# else input is unconnected
try:
sub_pipeline_def = PipelineSubsetDefinition(
name=pipeline_def.name, # should we change the name for subsetted pipeline?
solid_defs=list({solid.definition for solid in solids}),
mode_defs=pipeline_def.mode_definitions,
dependencies=deps,
_parent_pipeline_def=pipeline_def,
tags=pipeline_def.tags,
hook_defs=pipeline_def.hook_defs,
)
return sub_pipeline_def
except DagsterInvalidDefinitionError as exc:
# This handles the case when you construct a subset such that an unsatisfied
# input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,
# we re-raise a DagsterInvalidSubsetError.
raise DagsterInvalidSubsetError(
f"The attempted subset {str_format_set(solids_to_execute)} for {pipeline_def.target_type} "
f"{pipeline_def.name} results in an invalid {pipeline_def.target_type}"
) from exc
|
24,757 |
def _toml_has_config(path):
with open(path) as toml_handle:
try:
content = toml.load(toml_handle)
except TomlDecodeError as error:
print(f"Failed to load '{path}': {str(error)}")
return False
try:
content["tool"]["pylint"]
except KeyError:
return False
return True
|
def _toml_has_config(path):
with open(path) as toml_handle:
try:
content = toml.load(toml_handle)
except TomlDecodeError as error:
print(f"Failed to load '{path}': {error}")
return False
try:
content["tool"]["pylint"]
except KeyError:
return False
return True
|
12,406 |
def is_encrypted(blockdev) -> bool:
"""
Check if a device is an encrypted device. blockdev should have
a /dev/dm-* path.
"""
if not subp.which("cryptsetup"):
LOG.debug("cryptsetup not found. Assuming no encrypted partitions")
return False
is_encrypted = False
with suppress(subp.ProcessExecutionError):
subp.subp(["cryptsetup", "status", blockdev])
is_encrypted = True
LOG.debug(
"Determined that %s is %sencrypted",
blockdev,
"" if is_encrypted else "not",
)
return is_encrypted
|
def is_encrypted(blockdev) -> bool:
"""
Check if a device is an encrypted device. blockdev should have
a /dev/dm-* path.
"""
if not subp.which("cryptsetup"):
LOG.debug("cryptsetup not found. Assuming no encrypted partitions")
return False
is_encrypted = False
with suppress(subp.ProcessExecutionError):
subp.subp(["cryptsetup", "status", blockdev])
is_encrypted = True
LOG.debug(
"Determined that %s is %sencrypted",
blockdev,
"" if is_encrypted else "not ",
)
return is_encrypted
|
4,685 |
def calculate_rms(expected_image, actual_image):
"Calculate the per-pixel errors, then compute the root mean square error."
if expected_image.shape != actual_image.shape:
raise _imageComparisonFailure(
"_image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
# Convert to float to avoid overflowing finite integer types.
return np.sqrt(((expected_image - actual_image).astype(float) ** 2).mean())
|
def calculate_rms(expected_image, actual_image):
"Calculate the per-pixel errors, then compute the root mean square error."
if expected_image.shape != actual_image.shape:
raise ImageComparisonFailure(
"_image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
# Convert to float to avoid overflowing finite integer types.
return np.sqrt(((expected_image - actual_image).astype(float) ** 2).mean())
|
34,312 |
def add_no_plot_param(
parser: argparse.ArgumentParser, default: bool = False, required: bool = False,
):
parser.add_argument(
"--no-plot",
action="store_true",
default=default,
help=f"Don't render plots of confusion matrix and histogram",
required=required,
)
|
def add_no_plot_param(
parser: argparse.ArgumentParser, default: bool = False, required: bool = False,
):
parser.add_argument(
"--disable-plotting",
action="store_true",
default=default,
help=f"Don't render plots of confusion matrix and histogram",
required=required,
)
|
52,750 |
def _prepare_rec(spec, ignorenets, neverignore):
# First of all, let's see if we are supposed to ignore this spec,
# and if so, do so.
if 'addr' in spec and \
spec.get('source') not in neverignore.get(spec['recontype'], []):
for start, stop in ignorenets.get(spec['recontype'], ()):
if start <= utils.force_ip2int(spec['addr']) <= stop:
return None
# Then, let's clean up the records.
# Change Symantec's random user agents (matching SYMANTEC_UA) to
# the constant string 'SymantecRandomUserAgent'.
if spec['recontype'] == 'HTTP_CLIENT_HEADER' and \
spec.get('source') == 'USER-AGENT':
if SYMANTEC_UA.match(spec['value']):
spec['value'] = 'SymantecRandomUserAgent'
elif KASPERSKY_UA.match(spec['value']):
spec['value'] = 'KasperskyWeirdUserAgent'
else:
match = SYMANTEC_SEP_UA.match(spec['value'])
if match is not None:
spec['value'] = '%s%s' % match.groups()
# Change any Digest authorization header to remove non-constant
# information. On one hand we loose the necessary information to
# try to recover the passwords, but on the other hand we store
# specs with different challenges but the same username, realm,
# host and sensor in the same records.
elif (
spec['recontype'] in {'HTTP_CLIENT_HEADER',
'HTTP_CLIENT_HEADER_SERVER'} and
spec.get('source') in {'AUTHORIZATION', 'PROXY-AUTHORIZATION'}
):
value = spec['value']
if value:
authtype = value.split(None, 1)[0]
if authtype.lower() == 'digest':
try:
# we only keep relevant info
spec['value'] = '%s %s' % (authtype, ','.join(
val for val in
_split_digest_auth(value[6:].strip())
if DIGEST_AUTH_INFOS.match(val)
))
except Exception:
utils.LOGGER.warning("Cannot parse digest error for %r",
spec, exc_info=True)
elif ntlm._is_ntlm_message(value):
# NTLM_NEGOTIATE and NTLM_AUTHENTICATE
auth = utils.decode_b64(value.split(' ', 1)[1].encode())
spec['value'] = "NTLM %s" % \
ntlm._ntlm_dict2string(ntlm.ntlm_extract_info(auth))
elif authtype.lower() in {'negotiate', 'kerberos', 'oauth'}:
spec['value'] = authtype
elif (
spec['recontype'] == 'HTTP_SERVER_HEADER' and
spec.get('source') in {'WWW-AUTHENTICATE', 'PROXY-AUTHENTICATE'}
):
value = spec['value']
if value:
authtype = value.split(None, 1)[0]
if authtype.lower() == 'digest':
try:
# we only keep relevant info
spec['value'] = '%s %s' % (authtype, ','.join(
val for val in
_split_digest_auth(value[6:].strip())
if DIGEST_AUTH_INFOS.match(val)
))
except Exception:
utils.LOGGER.warning("Cannot parse digest error for %r",
spec, exc_info=True)
elif ntlm._is_ntlm_message(value):
# NTLM_CHALLENGE
auth = utils.decode_b64(value.split(' ', 1)[1].encode())
spec['value'] = "NTLM %s" % \
ntlm._ntlm_dict2string(ntlm.ntlm_extract_info(auth))
elif authtype.lower() in {'negotiate', 'kerberos', 'oauth'}:
spec['value'] = authtype
# TCP server banners: try to normalize data
elif spec['recontype'] == 'TCP_SERVER_BANNER':
newvalue = value = utils.nmap_decode_data(spec['value'])
for pattern, replace in TCP_SERVER_PATTERNS:
if pattern.search(newvalue):
newvalue = pattern.sub(replace, newvalue)
if newvalue != value:
spec['value'] = utils.nmap_encode_data(newvalue)
# SSL_{CLIENT,SERVER} JA3
elif ((spec['recontype'] == 'SSL_CLIENT' and spec['source'] == 'ja3') or
(spec['recontype'] == 'SSL_SERVER' and
spec['source'].startswith('ja3-'))):
value = spec['value']
spec.setdefault('infos', {})['raw'] = value
spec['value'] = hashlib.new("md5", value.encode()).hexdigest()
if spec['recontype'] == 'SSL_SERVER':
clientvalue = spec['source'][4:]
spec['infos'].setdefault('client', {})['raw'] = clientvalue
spec['source'] = 'ja3-%s' % hashlib.new(
"md5",
clientvalue.encode(),
).hexdigest()
# SSH_{CLIENT,SERVER}_HASSH
elif spec['recontype'] in ['SSH_CLIENT_HASSH', 'SSH_SERVER_HASSH']:
value = spec['value']
spec.setdefault('infos', {})['raw'] = value
spec['value'] = hashlib.new("md5", value.encode()).hexdigest()
# Check DNS Blacklist answer
elif spec['recontype'] == 'DNS_ANSWER':
if any((spec.get('value') or "").endswith(dnsbl)
for dnsbl in config.DNS_BLACKLIST_DOMAINS):
dnsbl_val = spec['value']
match = DNSBL_START.search(dnsbl_val)
if match is not None:
spec['recontype'] = 'DNS_BLACKLIST'
spec['value'] = spec.get('addr')
spec.update({'source': "%s-%s" %
(dnsbl_val[match.end():], spec['source'])})
addr = match.group()
# IPv4
if addr.count('.') == 4:
spec['addr'] = '.'.join(addr.split('.')[3::-1])
# IPv6
else:
spec['addr'] = utils.int2ip6(int(addr
.replace('.', '')[::-1],
16))
return spec
|
def _prepare_rec(spec, ignorenets, neverignore):
# First of all, let's see if we are supposed to ignore this spec,
# and if so, do so.
if 'addr' in spec and \
spec.get('source') not in neverignore.get(spec['recontype'], []):
for start, stop in ignorenets.get(spec['recontype'], ()):
if start <= utils.force_ip2int(spec['addr']) <= stop:
return None
# Then, let's clean up the records.
# Change Symantec's random user agents (matching SYMANTEC_UA) to
# the constant string 'SymantecRandomUserAgent'.
if spec['recontype'] == 'HTTP_CLIENT_HEADER' and \
spec.get('source') == 'USER-AGENT':
if SYMANTEC_UA.match(spec['value']):
spec['value'] = 'SymantecRandomUserAgent'
elif KASPERSKY_UA.match(spec['value']):
spec['value'] = 'KasperskyWeirdUserAgent'
else:
match = SYMANTEC_SEP_UA.match(spec['value'])
if match is not None:
spec['value'] = '%s%s' % match.groups()
# Change any Digest authorization header to remove non-constant
# information. On one hand we loose the necessary information to
# try to recover the passwords, but on the other hand we store
# specs with different challenges but the same username, realm,
# host and sensor in the same records.
elif (
spec['recontype'] in {'HTTP_CLIENT_HEADER',
'HTTP_CLIENT_HEADER_SERVER'} and
spec.get('source') in {'AUTHORIZATION', 'PROXY-AUTHORIZATION'}
):
value = spec['value']
if value:
authtype = value.split(None, 1)[0]
if authtype.lower() == 'digest':
try:
# we only keep relevant info
spec['value'] = '%s %s' % (authtype, ','.join(
val for val in
_split_digest_auth(value[6:].strip())
if DIGEST_AUTH_INFOS.match(val)
))
except Exception:
utils.LOGGER.warning("Cannot parse digest error for %r",
spec, exc_info=True)
elif ntlm._is_ntlm_message(value):
# NTLM_NEGOTIATE and NTLM_AUTHENTICATE
try:
auth = utils.decode_b64(value.split(None, 1)[1].encode())
except (UnicodeDecodeError, TypeError, ValueError):
pass
else:
spec['value'] = "NTLM %s" % \
ntlm._ntlm_dict2string(ntlm.ntlm_extract_info(auth))
elif authtype.lower() in {'negotiate', 'kerberos', 'oauth'}:
spec['value'] = authtype
elif (
spec['recontype'] == 'HTTP_SERVER_HEADER' and
spec.get('source') in {'WWW-AUTHENTICATE', 'PROXY-AUTHENTICATE'}
):
value = spec['value']
if value:
authtype = value.split(None, 1)[0]
if authtype.lower() == 'digest':
try:
# we only keep relevant info
spec['value'] = '%s %s' % (authtype, ','.join(
val for val in
_split_digest_auth(value[6:].strip())
if DIGEST_AUTH_INFOS.match(val)
))
except Exception:
utils.LOGGER.warning("Cannot parse digest error for %r",
spec, exc_info=True)
elif ntlm._is_ntlm_message(value):
# NTLM_CHALLENGE
auth = utils.decode_b64(value.split(' ', 1)[1].encode())
spec['value'] = "NTLM %s" % \
ntlm._ntlm_dict2string(ntlm.ntlm_extract_info(auth))
elif authtype.lower() in {'negotiate', 'kerberos', 'oauth'}:
spec['value'] = authtype
# TCP server banners: try to normalize data
elif spec['recontype'] == 'TCP_SERVER_BANNER':
newvalue = value = utils.nmap_decode_data(spec['value'])
for pattern, replace in TCP_SERVER_PATTERNS:
if pattern.search(newvalue):
newvalue = pattern.sub(replace, newvalue)
if newvalue != value:
spec['value'] = utils.nmap_encode_data(newvalue)
# SSL_{CLIENT,SERVER} JA3
elif ((spec['recontype'] == 'SSL_CLIENT' and spec['source'] == 'ja3') or
(spec['recontype'] == 'SSL_SERVER' and
spec['source'].startswith('ja3-'))):
value = spec['value']
spec.setdefault('infos', {})['raw'] = value
spec['value'] = hashlib.new("md5", value.encode()).hexdigest()
if spec['recontype'] == 'SSL_SERVER':
clientvalue = spec['source'][4:]
spec['infos'].setdefault('client', {})['raw'] = clientvalue
spec['source'] = 'ja3-%s' % hashlib.new(
"md5",
clientvalue.encode(),
).hexdigest()
# SSH_{CLIENT,SERVER}_HASSH
elif spec['recontype'] in ['SSH_CLIENT_HASSH', 'SSH_SERVER_HASSH']:
value = spec['value']
spec.setdefault('infos', {})['raw'] = value
spec['value'] = hashlib.new("md5", value.encode()).hexdigest()
# Check DNS Blacklist answer
elif spec['recontype'] == 'DNS_ANSWER':
if any((spec.get('value') or "").endswith(dnsbl)
for dnsbl in config.DNS_BLACKLIST_DOMAINS):
dnsbl_val = spec['value']
match = DNSBL_START.search(dnsbl_val)
if match is not None:
spec['recontype'] = 'DNS_BLACKLIST'
spec['value'] = spec.get('addr')
spec.update({'source': "%s-%s" %
(dnsbl_val[match.end():], spec['source'])})
addr = match.group()
# IPv4
if addr.count('.') == 4:
spec['addr'] = '.'.join(addr.split('.')[3::-1])
# IPv6
else:
spec['addr'] = utils.int2ip6(int(addr
.replace('.', '')[::-1],
16))
return spec
|
43,126 |
def hgmo_patch(branch: str, revision: str) -> str:
"""Load a patch for a given revision"""
url = f"https://hg.mozilla.org/{branch}/raw-rev/{revision}"
r = requests.get(url)
r.raise_for_status()
return r.text
|
def get_hgmo_patch(branch: str, revision: str) -> str:
"""Load a patch for a given revision"""
url = f"https://hg.mozilla.org/{branch}/raw-rev/{revision}"
r = requests.get(url)
r.raise_for_status()
return r.text
|
30,447 |
def get_dmarc(auth):
"""
Get DMARC validation information
:param auth: authentication header value (if exist), contains the validation result and sender ip.
:return: DMARC validation information
"""
dmarc_context = {
'Validation-Result': 'Unspecified',
'Tags': {'Unspecified': 'Unspecified'},
'Signing-Domain': 'Unspecified'
}
if auth is not None:
result = re.search(r'dmarc=(\w+)', auth)
if result is not None:
dmarc_context['Validation-Result'] = result.group(1).lower()
reason = re.findall(r'dmarc=\w+ \((.+?)\)', auth)
if reason:
tags = reason[0]
tags_data = {}
for tag in tags.split(' '):
values = tag.split('=')
tags_data[values[0]] = values[1]
dmarc_context['Tags'] = tags_data
domain = re.findall(r'dmarc=[\w\W]+header.from=([\w-]+\.[^; ]+)', auth)
if domain:
dmarc_context['Signing-Domain'] = domain[0]
return dmarc_context
|
def get_dmarc(auth):
"""
Get DMARC validation information
:param auth: authentication header value (if exist), contains the validation result and sender ip.
:return: DMARC validation information
"""
dmarc_context = {
'Validation-Result': 'Unspecified',
'Tags': {'Unspecified': 'Unspecified'},
'Signing-Domain': 'Unspecified'
}
if auth is not None:
result = re.search(r'dmarc=(\w+)', auth)
if result is not None:
dmarc_context['Validation-Result'] = result.group(1).lower()
reason = re.findall(r'dmarc=\w+ \((.+?)\)', auth)
if reason:
tags = reason[0]
tags_data = {}
for tag in tags.split(' '):
values = tag.split('=')
tags_data[values[0]] = values[1]
dmarc_context['Tags'] = tags_data
domain = re.findall(r'dmarc=.+header.from=([\w-]+\.[^; ]+)', auth)
if domain:
dmarc_context['Signing-Domain'] = domain[0]
return dmarc_context
|
42,004 |
def _run_iteration(
zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0
) -> Tuple[Dict[complex, Union[int, float]], float]:
max_fractional_delta = 0.0
for coord in coordinates:
current_val = zmap.get(coord, None)
max_neighbor = -np.inf
min_neighbor = np.inf
sum_neighbors = 0
n_neighbors = 0
for offset in NEIGHBOR_OFFSETS:
neighbor = zmap.get(coord + offset, None)
if neighbor is None:
# off the edge or not filled in
continue
sum_neighbors += neighbor # type: ignore
n_neighbors += 1
if current_val is not None:
max_neighbor = max(max_neighbor, neighbor)
min_neighbor = min(min_neighbor, neighbor)
# fill value is just mean of its neighbors
new_val = sum_neighbors / n_neighbors
if current_val is None:
zmap[coord] = new_val
max_fractional_delta = 1.0
else:
zmap[coord] = (1 + overshoot) * new_val - overshoot * current_val
if max_neighbor > min_neighbor:
fractional_delta = abs(new_val - current_val) / (max_neighbor - min_neighbor)
max_fractional_delta = max(overshoot, fractional_delta)
return zmap, max_fractional_delta
|
def _run_iteration(
zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0
) -> float:
max_fractional_delta = 0.0
for coord in coordinates:
current_val = zmap.get(coord, None)
max_neighbor = -np.inf
min_neighbor = np.inf
sum_neighbors = 0
n_neighbors = 0
for offset in NEIGHBOR_OFFSETS:
neighbor = zmap.get(coord + offset, None)
if neighbor is None:
# off the edge or not filled in
continue
sum_neighbors += neighbor # type: ignore
n_neighbors += 1
if current_val is not None:
max_neighbor = max(max_neighbor, neighbor)
min_neighbor = min(min_neighbor, neighbor)
# fill value is just mean of its neighbors
new_val = sum_neighbors / n_neighbors
if current_val is None:
zmap[coord] = new_val
max_fractional_delta = 1.0
else:
zmap[coord] = (1 + overshoot) * new_val - overshoot * current_val
if max_neighbor > min_neighbor:
fractional_delta = abs(new_val - current_val) / (max_neighbor - min_neighbor)
max_fractional_delta = max(overshoot, fractional_delta)
return zmap, max_fractional_delta
|
43,683 |
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.bit_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
20,554 |
def main():
# Initialization
size_data = 61
size_label = 1 # put zero for labels that are single points.
dice_acceptable = 0.39 # computed DICE should be 0.931034
test_passed = 0
remove_temp_files = 1
verbose = 1
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvr:')
except getopt.GetoptError:
usage()
raise SystemExit(2)
for opt, arg in opts:
if opt == '-h':
usage()
return
elif opt in ('-v'):
verbose = int(arg)
elif opt in ('-r'):
remove_temp_files = int(arg)
path_tmp = tmp_create(basename="test_ants")
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# Initialise numpy volumes
data_src = np.zeros((size_data, size_data, size_data), dtype=np.int16)
data_dest = np.zeros((size_data, size_data, size_data), dtype=np.int16)
# add labels for src image (curved).
# Labels can be big (more than single point), because when applying NN interpolation, single points might disappear
data_src[20 - size_label:20 + size_label + 1, 20 - size_label:20 + size_label + 1, 10 - size_label:10 + size_label + 1] = 1
data_src[30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1] = 2
data_src[20 - size_label:20 + size_label + 1, 20 - size_label:20 + size_label + 1, 50 - size_label:50 + size_label + 1] = 3
# add labels for dest image (straight).
# Here, no need for big labels (bigger than single point) because these labels will not be re-interpolated.
data_dest[30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1, 10 - size_label:10 + size_label + 1] = 1
data_dest[30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1] = 2
data_dest[30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1, 50 - size_label:50 + size_label + 1] = 3
# save as nifti
img_src = nib.Nifti1Image(data_src, np.eye(4))
nib.save(img_src, 'data_src.nii.gz')
img_dest = nib.Nifti1Image(data_dest, np.eye(4))
nib.save(img_dest, 'data_dest.nii.gz')
# Estimate rigid transformation
printv('\nEstimate rigid transformation between paired landmarks...', verbose)
# TODO fixup isct_ants* parsers
run_proc(['isct_antsRegistration',
'-d', '3',
'-t', 'syn[1,3,1]',
'-m', 'MeanSquares[data_dest.nii.gz,data_src.nii.gz,1,3]',
'-f', '2',
'-s', '0',
'-o', '[src2reg,data_src_reg.nii.gz]',
'-c', '5',
'-v', '1',
'-n', 'NearestNeighbor'], verbose, is_sct_binary=True)
# # Apply rigid transformation
# printv('\nApply rigid transformation to curved landmarks...', verbose)
# sct_apply_transfo.main(["-i", "data_src.nii.gz", "-o", "data_src_rigid.nii.gz", "-d", "data_dest.nii.gz", "-w", "curve2straight_rigid.txt", "-p", "nn"])
#
# # Estimate b-spline transformation curve --> straight
# printv('\nEstimate b-spline transformation: curve --> straight...', verbose)
# run_proc('isct_ANTSLandmarksBSplineTransform data_dest.nii.gz data_src_rigid.nii.gz warp_curve2straight_intermediate.nii.gz 5x5x5 3 2 0', verbose)
#
# # Concatenate rigid and non-linear transformations...
# printv('\nConcatenate rigid and non-linear transformations...', verbose)
# cmd = 'isct_ComposeMultiTransform 3 warp_curve2straight.nii.gz -R data_dest.nii.gz warp_curve2straight_intermediate.nii.gz curve2straight_rigid.txt'
# printv('>> '+cmd, verbose)
# run_proc(cmd)
#
# # Apply deformation to input image
# printv('\nApply transformation to input image...', verbose)
# sct_apply_transfo(["-i", "data_src.nii.gz", "-o", "data_src_warp.nii.gz", "-d", "data_dest.nii.gz", "-w", "warp_curve2straight.nii.gz", "-p", "nn"])
#
# Compute DICE coefficient between src and dest
printv('\nCompute DICE coefficient...', verbose)
sct_dice_coefficient.main([
"-i", "data_dest.nii.gz",
"-d", "data_src_reg.nii.gz",
"-o", "dice.txt",
])
with open("dice.txt", "r") as file_dice:
dice = float(file_dice.read().replace('3D Dice coefficient = ', ''))
printv('Dice coeff = ' + str(dice) + ' (should be above ' + str(dice_acceptable) + ')', verbose)
# Check if DICE coefficient is above acceptable value
if dice > dice_acceptable:
test_passed = 1
# come back
os.chdir(curdir)
# Delete temporary files
if remove_temp_files == 1:
printv('\nDelete temporary files...', verbose)
rmtree(path_tmp)
# output result for parent function
if test_passed:
printv('\nTest passed!\n', verbose)
else:
printv('\nTest failed!\n', verbose)
raise SystemExit(1)
|
def main():
# Initialization
size_data = 61
size_label = 1 # put zero for labels that are single points.
dice_acceptable = 0.39 # computed DICE should be 0.931034
test_passed = 0
remove_temp_files = 1
verbose = 1
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvr:')
except getopt.GetoptError:
usage()
raise SystemExit(2)
for opt, arg in opts:
if opt == '-h':
usage()
return
elif opt in ('-v'):
verbose = int(arg)
elif opt in ('-r'):
remove_temp_files = int(arg)
path_tmp = tmp_create(basename="test_ants")
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# Initialise numpy volumes
data_src = np.zeros((size_data, size_data, size_data), dtype=np.int16)
data_dest = np.zeros((size_data, size_data, size_data), dtype=np.int16)
# add labels for src image (curved).
# Labels can be big (more than single point), because when applying NN interpolation, single points might disappear
data_src[20 - size_label:20 + size_label + 1, 20 - size_label:20 + size_label + 1, 10 - size_label:10 + size_label + 1] = 1
data_src[30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1] = 2
data_src[20 - size_label:20 + size_label + 1, 20 - size_label:20 + size_label + 1, 50 - size_label:50 + size_label + 1] = 3
# add labels for dest image (straight).
# Here, no need for big labels (bigger than single point) because these labels will not be re-interpolated.
data_dest[30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1, 10 - size_label:10 + size_label + 1] = 1
data_dest[30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1] = 2
data_dest[30 - size_label:30 + size_label + 1, 30 - size_label:30 + size_label + 1, 50 - size_label:50 + size_label + 1] = 3
# save as nifti
img_src = nib.Nifti1Image(data_src, np.eye(4))
nib.save(img_src, 'data_src.nii.gz')
img_dest = nib.Nifti1Image(data_dest, np.eye(4))
nib.save(img_dest, 'data_dest.nii.gz')
# Estimate rigid transformation
printv('\nEstimate rigid transformation between paired landmarks...', verbose)
# TODO fixup isct_ants* parsers
run_proc(['isct_antsRegistration',
'-d', '3',
'-t', 'syn[1,3,1]',
'-m', 'MeanSquares[data_dest.nii.gz,data_src.nii.gz,1,3]',
'-f', '2',
'-s', '0',
'-o', '[src2reg,data_src_reg.nii.gz]',
'-c', '5',
'-v', '1',
'-n', 'NearestNeighbor'], verbose, is_sct_binary=True)
# # Apply rigid transformation
# printv('\nApply rigid transformation to curved landmarks...', verbose)
# sct_apply_transfo.main(["-i", "data_src.nii.gz", "-o", "data_src_rigid.nii.gz", "-d", "data_dest.nii.gz", "-w", "curve2straight_rigid.txt", "-p", "nn"])
#
# # Estimate b-spline transformation curve --> straight
# printv('\nEstimate b-spline transformation: curve --> straight...', verbose)
# run_proc('isct_ANTSLandmarksBSplineTransform data_dest.nii.gz data_src_rigid.nii.gz warp_curve2straight_intermediate.nii.gz 5x5x5 3 2 0', verbose)
#
# # Concatenate rigid and non-linear transformations...
# printv('\nConcatenate rigid and non-linear transformations...', verbose)
# cmd = 'isct_ComposeMultiTransform 3 warp_curve2straight.nii.gz -R data_dest.nii.gz warp_curve2straight_intermediate.nii.gz curve2straight_rigid.txt'
# printv('>> '+cmd, verbose)
# run_proc(cmd)
#
# # Apply deformation to input image
# printv('\nApply transformation to input image...', verbose)
# sct_apply_transfo.main(["-i", "data_src.nii.gz", "-o", "data_src_warp.nii.gz", "-d", "data_dest.nii.gz", "-w", "warp_curve2straight.nii.gz", "-p", "nn"])
#
# Compute DICE coefficient between src and dest
printv('\nCompute DICE coefficient...', verbose)
sct_dice_coefficient.main([
"-i", "data_dest.nii.gz",
"-d", "data_src_reg.nii.gz",
"-o", "dice.txt",
])
with open("dice.txt", "r") as file_dice:
dice = float(file_dice.read().replace('3D Dice coefficient = ', ''))
printv('Dice coeff = ' + str(dice) + ' (should be above ' + str(dice_acceptable) + ')', verbose)
# Check if DICE coefficient is above acceptable value
if dice > dice_acceptable:
test_passed = 1
# come back
os.chdir(curdir)
# Delete temporary files
if remove_temp_files == 1:
printv('\nDelete temporary files...', verbose)
rmtree(path_tmp)
# output result for parent function
if test_passed:
printv('\nTest passed!\n', verbose)
else:
printv('\nTest failed!\n', verbose)
raise SystemExit(1)
|
8,864 |
def subreddit_sorting(bot, trigger, s, sorting):
if sorting == 'new':
submissions = list(s.new())
elif sorting == 'top':
submissions = list(s.top())
elif sorting == 'hot':
submissions = list(s.hot())
elif sorting == 'controversial':
submissions = list(s.controversial())
elif sorting == 'gilded':
submissions = list(s.gilded())
elif sorting == 'rising':
submissions = list(s.rising())
elif sorting == 'sticky':
try:
submissions = [s.sticky()]
except prawcore.exceptions.NotFound:
bot.say("r/" + s.display_name + " appears to not have a stickied post!")
return
elif sorting == 'random':
submissions = [s.random()] or []
else:
return
if not len(submissions):
bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!")
return NOLIMIT
if sorting != 'sticky':
submissions_filter = []
for submission in submissions:
if not submission.stickied:
submissions_filter.append(submission)
submissions = submissions_filter
submission = submissions[0]
link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission)
say_post_info(
bot, trigger, re.match(post_url, link).group(1), False, True)
|
def subreddit_sorting(bot, trigger, s, sorting):
if sorting == 'new':
submissions = list(s.new())
elif sorting == 'top':
submissions = list(s.top(limit=10))
elif sorting == 'hot':
submissions = list(s.hot())
elif sorting == 'controversial':
submissions = list(s.controversial())
elif sorting == 'gilded':
submissions = list(s.gilded())
elif sorting == 'rising':
submissions = list(s.rising())
elif sorting == 'sticky':
try:
submissions = [s.sticky()]
except prawcore.exceptions.NotFound:
bot.say("r/" + s.display_name + " appears to not have a stickied post!")
return
elif sorting == 'random':
submissions = [s.random()] or []
else:
return
if not len(submissions):
bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!")
return NOLIMIT
if sorting != 'sticky':
submissions_filter = []
for submission in submissions:
if not submission.stickied:
submissions_filter.append(submission)
submissions = submissions_filter
submission = submissions[0]
link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission)
say_post_info(
bot, trigger, re.match(post_url, link).group(1), False, True)
|
23,110 |
def test_Axes():
pdf = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
df = dd.from_pandas(pdf, npartitions=2)
assert len(df) == 2
assert all(assert_eq(d, p) for d, p in zip(df.axes, pdf.axes))
|
def test_axes():
pdf = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
df = dd.from_pandas(pdf, npartitions=2)
assert len(df) == 2
assert all(assert_eq(d, p) for d, p in zip(df.axes, pdf.axes))
|
35,626 |
def check_integrity(fpath: str, checksum: Optional[str] = None) -> bool:
"""Check the integrity of a download.
Args:
fpath (str): file to checksum
checksum (str): checksum to verify
Returns:
(bool): True if file exists and ``checksum=None``, or if file exists and
checksum matches, otherwise False
"""
if not os.path.isfile(fpath):
return False
if checksum is None:
return True
return verify_checksum(fpath, checksum)
|
def check_integrity(fpath: str, checksum: Optional[str] = None) -> bool:
"""Check the integrity of a download.
Args:
fpath (str): file to checksum
checksum (str): checksum to verify. The hashing algorithm that is used is implicitly determined by the length of the hash.
Returns:
(bool): True if file exists and ``checksum=None``, or if file exists and
checksum matches, otherwise False
"""
if not os.path.isfile(fpath):
return False
if checksum is None:
return True
return verify_checksum(fpath, checksum)
|
20,251 |
def environment_specific_index(base_name):
env = settings.DEPLOY_ENVIRONMENT
if not env or env.lower() in ('local', 'production'):
return base_name
else:
return f'{env}-{base_name}'.lower()
|
def environment_specific_index(base_name):
env = settings.DEPLOY_ENVIRONMENT.lower()
if not env or env.lower() in ('local', 'production'):
return base_name
else:
return f'{env}-{base_name}'.lower()
|
8,386 |
def test_create_from_quantities():
spec = Spectrum1D(spectral_axis=np.arange(1, 50) * u.nm,
flux=np.random.randn(49) * u.Jy)
assert isinstance(spec.spectral_axis, u.Quantity)
assert spec.spectral_axis.unit == u.nm
assert spec.spectral_axis.size == 49
# Mis-matched lengths should raise and exception
with pytest.raises(ValueError) as e_info:
spec = Spectrum1D(spectral_axis=np.arange(1, 50) * u.nm,
flux=np.random.randn(48) * u.Jy)
|
def test_create_from_quantities():
spec = Spectrum1D(spectral_axis=np.arange(1, 50) * u.nm,
flux=np.random.randn(49) * u.Jy)
assert isinstance(spec.spectral_axis, u.Quantity)
assert spec.spectral_axis.unit == u.nm
assert spec.spectral_axis.size == 49
# Mis-matched lengths should raise an exception
with pytest.raises(ValueError) as e_info:
spec = Spectrum1D(spectral_axis=np.arange(1, 50) * u.nm,
flux=np.random.randn(48) * u.Jy)
|
54,088 |
def get_service_data(service: Optional[Union[str, io.IOBase]]) -> Dict[str, Any]:
service = service or os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
if not service:
cloudsdk_config = os.environ.get('CLOUDSDK_CONFIG')
sdkpath = (cloudsdk_config
or os.path.join(os.path.expanduser('~'), '.config',
'gcloud'))
service = os.path.join(sdkpath, 'application_default_credentials.json')
set_explicitly = bool(cloudsdk_config)
else:
set_explicitly = True
try:
with open(service, 'r') as f:
data: Dict[str, Any] = json.loads(f.read())
return data
except FileNotFoundError:
if set_explicitly:
# only warn users if they have explicitly set the service_file path
raise
return {}
except TypeError:
data: Dict[str, Any] = json.loads(service.read())
return data
except Exception: # pylint: disable=broad-except
return {}
|
def get_service_data(
service: Optional[Union[str, io.IOBase]]) -> Dict[str, Any]:
service = service or os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
if not service:
cloudsdk_config = os.environ.get('CLOUDSDK_CONFIG')
sdkpath = (cloudsdk_config
or os.path.join(os.path.expanduser('~'), '.config',
'gcloud'))
service = os.path.join(sdkpath, 'application_default_credentials.json')
set_explicitly = bool(cloudsdk_config)
else:
set_explicitly = True
try:
with open(service, 'r') as f:
data: Dict[str, Any] = json.loads(f.read())
return data
except FileNotFoundError:
if set_explicitly:
# only warn users if they have explicitly set the service_file path
raise
return {}
except TypeError:
data: Dict[str, Any] = json.loads(service.read())
return data
except Exception: # pylint: disable=broad-except
return {}
|
57,607 |
def fetch_exchange(zone_key1='CR', zone_key2='PA', session=None,
target_datetime=None, logger=logging.getLogger(__name__)) -> dict:
"""
Requests the last known power exchange (in MW) between two countries.
"""
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
r = session or requests.session()
url = EXCHANGE_URL
response = r.get(url)
assert response.status_code == 200
df = pd.read_html(response.text)[0]
# A positive value on website indicates a flow from country specified to PA.
net_flow_cr = round(float(df[4][1]) + float(df[4][3]) + float(df[4][5]) + float(df[1][8]) + float(df[1][10]), 2)
net_flow_ni = round(float(df[4][8]) + float(df[4][10]) + float(df[4][13]) + float(df[4][15]), 2)
net_flow_hn = round(float(df[1][13]) + float(df[1][15]) + float(df[1][18]) + float(df[1][20]) + float(df[1][23]), 2)
net_flow_sv = round(float(df[4][18]) + float(df[4][20]) + float(df[1][26]) + float(df[1][28]), 2)
net_flow_gt = round(float(df[4][23]) + float(df[4][26]) + float(df[4][28]) + float(df[1][31]), 2)
net_flows = {
"CR->PA": net_flow_cr, # Costa Rica to Panama
"GT->PA": net_flow_gt, # Guatemala to Panama
"HN->PA": net_flow_hn, # Honduras to Panama
"NI->PA": net_flow_ni, # Nicaragua to Panama
"PA->SV": net_flow_sv, # Panama to El Salvador
}
# Invert sign of flows to account for correct flow direction
net_flows["PA->SV"] = -1 * net_flows["PA->SV"]
if sorted_zone_keys not in net_flows:
raise NotImplementedError(
'This exchange pair is not implemented: {}'.format(sorted_zone_keys))
data = {
'datetime': arrow.now(TIMEZONE).datetime,
'netFlow': net_flows[sorted_zone_keys],
'sortedZoneKeys': sorted_zone_keys,
'source': url
}
return data
|
def fetch_exchange(zone_key1='CR', zone_key2='PA', session=None,
target_datetime=None, logger=logging.getLogger(__name__)) -> dict:
"""
Requests the last known power exchange (in MW) between two countries.
"""
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
r = session or requests.session()
url = EXCHANGE_URL
response = r.get(url)
assert response.status_code == 200
df = pd.read_html(response.text)[0]
# A positive value on website indicates a flow from country specified to PA.
net_flow_cr = round(float(df[4][1]) + float(df[4][3]) + float(df[4][5]) + float(df[1][8]) + float(df[1][10]), 2)
net_flow_ni = round(float(df[4][8]) + float(df[4][10]) + float(df[4][13]) + float(df[4][15]), 2)
net_flow_hn = round(float(df[1][13]) + float(df[1][15]) + float(df[1][18]) + float(df[1][20]) + float(df[1][23]), 2)
net_flow_sv = round(float(df[4][18]) + float(df[4][20]) + float(df[1][26]) + float(df[1][28]), 2)
net_flow_gt = round(float(df[4][23]) + float(df[4][26]) + float(df[4][28]) + float(df[1][31]), 2)
net_flows = {
"CR->PA": net_flow_cr, # Costa Rica to Panama
"GT->PA": net_flow_gt, # Guatemala to Panama
"HN->PA": net_flow_hn, # Honduras to Panama
"NI->PA": net_flow_ni, # Nicaragua to Panama
"PA->SV": -1 * net_flow_sv, # Panama to El Salvador - invert sign to account for direction in alphabetical order
}
if sorted_zone_keys not in net_flows:
raise NotImplementedError(
'This exchange pair is not implemented: {}'.format(sorted_zone_keys))
data = {
'datetime': arrow.now(TIMEZONE).datetime,
'netFlow': net_flows[sorted_zone_keys],
'sortedZoneKeys': sorted_zone_keys,
'source': url
}
return data
|
13,672 |
def _generate_not_activated_message(user):
"""
Generates the message displayed on the sign-in screen when a learner attempts to access the
system with an inactive account.
"""
support_url = configuration_helpers.get_value(
'SUPPORT_SITE_LINK',
settings.SUPPORT_SITE_LINK
)
platform_name = configuration_helpers.get_value(
'PLATFORM_NAME',
settings.PLATFORM_NAME
)
not_activated_msg_template = _(u'In order to sign in, you need to activate your account.<br /><br />'
'We just sent an activation link to <strong>{email}</strong>. If ' # pylint: disable=unicode-format-string
'you do not receive an email, check your spam folders or '
'<a href="{support_url}">contact {platform} Support</a>.') # pylint: disable=unicode-format-string
not_activated_message = not_activated_msg_template.format(
email=user.email,
support_url=support_url,
platform=platform_name
)
return not_activated_message
|
def _generate_not_activated_message(user):
"""
Generates the message displayed on the sign-in screen when a learner attempts to access the
system with an inactive account.
"""
support_url = configuration_helpers.get_value(
'SUPPORT_SITE_LINK',
settings.SUPPORT_SITE_LINK
)
platform_name = configuration_helpers.get_value(
'PLATFORM_NAME',
settings.PLATFORM_NAME
)
not_activated_msg_template = _(u'In order to sign in, you need to activate your account.<br /><br />'
'We just sent an activation link to <strong>{email}</strong>. If ' # pylint: disable=unicode-format-string
'you do not receive an email, check your spam folders or '
u'<a href="{support_url}">contact {platform} Support</a>.')
not_activated_message = not_activated_msg_template.format(
email=user.email,
support_url=support_url,
platform=platform_name
)
return not_activated_message
|
21,144 |
def train_while_improving(
nlp: Language,
optimizer: Optimizer,
train_data,
evaluate,
*,
dropout: float,
eval_frequency: int,
accumulate_gradient: int,
patience: int,
max_steps: int,
raw_text: List[Dict[str, str]],
exclude: List[str],
):
"""Train until an evaluation stops improving. Works as a generator,
with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`,
where info is a dict, and is_best_checkpoint is in [True, False, None] --
None indicating that the iteration was not evaluated as a checkpoint.
The evaluation is conducted by calling the evaluate callback, which should
Positional arguments:
nlp: The spaCy pipeline to evaluate.
optimizer: The optimizer callable.
train_data (Iterable[Batch]): A generator of batches, with the training
data. Each batch should be a Sized[Tuple[Input, Annot]]. The training
data iterable needs to take care of iterating over the epochs and
shuffling.
evaluate (Callable[[], Tuple[float, Any]]): A callback to perform evaluation.
The callback should take no arguments and return a tuple
`(main_score, other_scores)`. The main_score should be a float where
higher is better. other_scores can be any object.
Every iteration, the function yields out a tuple with:
* batch: A list of Example objects.
* info: A dict with various information about the last update (see below).
* is_best_checkpoint: A value in None, False, True, indicating whether this
was the best evaluation so far. You should use this to save the model
checkpoints during training. If None, evaluation was not conducted on
that iteration. False means evaluation was conducted, but a previous
evaluation was better.
The info dict provides the following information:
epoch (int): How many passes over the data have been completed.
step (int): How many steps have been completed.
score (float): The main score form the last evaluation.
other_scores: : The other scores from the last evaluation.
loss: The accumulated losses throughout training.
checkpoints: A list of previous results, where each result is a
(score, step, epoch) tuple.
"""
if isinstance(dropout, float):
dropouts = thinc.schedules.constant(dropout)
else:
dropouts = dropout
results = []
losses = {}
if raw_text:
random.shuffle(raw_text)
raw_examples = [
Example.from_dict(nlp.make_doc(rt["text"]), {}) for rt in raw_text
]
raw_batches = util.minibatch(raw_examples, size=8)
for step, (epoch, batch) in enumerate(train_data):
dropout = next(dropouts)
for subbatch in subdivide_batch(batch, accumulate_gradient):
nlp.update(
subbatch, drop=dropout, losses=losses, sgd=False, exclude=exclude
)
if raw_text:
# If raw text is available, perform 'rehearsal' updates,
# which use unlabelled data to reduce overfitting.
raw_batch = list(next(raw_batches))
nlp.rehearse(raw_batch, sgd=optimizer, losses=losses, exclude=exclude)
# TODO: refactor this so we don't have to run it separately in here
for name, proc in nlp.pipeline:
if (
name not in exclude
and hasattr(proc, "model")
and isinstance(proc.model, Model)
):
proc.model.finish_update(optimizer)
optimizer.step_schedules()
if not (step % eval_frequency):
if optimizer.averages:
with nlp.use_params(optimizer.averages):
score, other_scores = evaluate()
else:
score, other_scores = evaluate()
results.append((score, step))
is_best_checkpoint = score == max(results)[0]
else:
score, other_scores = (None, None)
is_best_checkpoint = None
info = {
"epoch": epoch,
"step": step,
"score": score,
"other_scores": other_scores,
"losses": losses,
"checkpoints": results,
}
yield batch, info, is_best_checkpoint
if is_best_checkpoint is not None:
losses = {}
# Stop if no improvement in `patience` updates (if specified)
best_score, best_step = max(results)
if patience and (step - best_step) >= patience:
break
# Stop if we've exhausted our max steps (if specified)
if max_steps and step >= max_steps:
break
|
def train_while_improving(
nlp: Language,
optimizer: Optimizer,
train_data,
evaluate,
*,
dropout: float,
eval_frequency: int,
accumulate_gradient: int,
patience: int,
max_steps: int,
raw_text: List[Dict[str, str]],
exclude: List[str],
):
"""Train until an evaluation stops improving. Works as a generator,
with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`,
where info is a dict, and is_best_checkpoint is in [True, False, None] --
None indicating that the iteration was not evaluated as a checkpoint.
The evaluation is conducted by calling the evaluate callback, which should
Positional arguments:
nlp: The spaCy pipeline to evaluate.
optimizer: The optimizer callable.
train_data (Iterable[Batch]): A generator of batches, with the training
data. Each batch should be a Sized[Tuple[Input, Annot]]. The training
data iterable needs to take care of iterating over the epochs and
shuffling.
evaluate (Callable[[], Tuple[float, Any]]): A callback to perform evaluation.
The callback should take no arguments and return a tuple
`(main_score, other_scores)`. The main_score should be a float where
higher is better. other_scores can be any object.
Every iteration, the function yields out a tuple with:
* batch: A list of Example objects.
* info: A dict with various information about the last update (see below).
* is_best_checkpoint: A value in None, False, True, indicating whether this
was the best evaluation so far. You should use this to save the model
checkpoints during training. If None, evaluation was not conducted on
that iteration. False means evaluation was conducted, but a previous
evaluation was better.
The info dict provides the following information:
epoch (int): How many passes over the data have been completed.
step (int): How many steps have been completed.
score (float): The main score form the last evaluation.
other_scores: : The other scores from the last evaluation.
loss: The accumulated losses throughout training.
checkpoints: A list of previous results, where each result is a
(score, step, epoch) tuple.
"""
if isinstance(dropout, float):
dropouts = thinc.schedules.constant(dropout)
else:
dropouts = dropout
results = []
losses = {}
if raw_text:
random.shuffle(raw_text)
raw_examples = [
Example.from_dict(nlp.make_doc(rt["text"]), {}) for rt in raw_text
]
raw_batches = util.minibatch(raw_examples, size=8)
for step, (epoch, batch) in enumerate(train_data):
dropout = next(dropouts)
for subbatch in subdivide_batch(batch, accumulate_gradient):
nlp.update(
subbatch, drop=dropout, losses=losses, sgd=False, exclude=exclude
)
if raw_text:
# If raw text is available, perform 'rehearsal' updates,
# which use unlabelled data to reduce overfitting.
raw_batch = list(next(raw_batches))
nlp.rehearse(raw_batch, sgd=optimizer, losses=losses, exclude=exclude)
# TODO: refactor this so we don't have to run it separately in here
for name, proc in nlp.pipeline:
if (
name not in exclude
and hasattr(proc, "model")
and proc.model not in (True, False, None)
):
proc.model.finish_update(optimizer)
optimizer.step_schedules()
if not (step % eval_frequency):
if optimizer.averages:
with nlp.use_params(optimizer.averages):
score, other_scores = evaluate()
else:
score, other_scores = evaluate()
results.append((score, step))
is_best_checkpoint = score == max(results)[0]
else:
score, other_scores = (None, None)
is_best_checkpoint = None
info = {
"epoch": epoch,
"step": step,
"score": score,
"other_scores": other_scores,
"losses": losses,
"checkpoints": results,
}
yield batch, info, is_best_checkpoint
if is_best_checkpoint is not None:
losses = {}
# Stop if no improvement in `patience` updates (if specified)
best_score, best_step = max(results)
if patience and (step - best_step) >= patience:
break
# Stop if we've exhausted our max steps (if specified)
if max_steps and step >= max_steps:
break
|
25,697 |
def repeat_as_list(x: TensorType, n: int):
"""
:param x: Array/Tensor to be repeated
:param n: Integer with the number of repetitions
:return: List of n repetitions of Tensor x
"""
return tf.unstack(tf.repeat(tf.expand_dims(x, axis=0), n, axis=0), axis=0)
|
def repeat_as_list(x: TensorType, n: int):
"""
:param x: Array/Tensor to be repeated
:param n: Integer with the number of repetitions
:return: List of n repetitions of Tensor x
"""
return [x for _ in range(n)]
|
46,664 |
def info_from_p2p_addr(addr: multiaddr.Multiaddr) -> PeerInfo:
if not addr:
raise InvalidAddrError("addr should not be None")
if not isinstance(addr, multiaddr.Multiaddr):
raise InvalidAddrError(f"addr={addr} should be in type Multiaddr")
parts = addr.split()
if not parts:
raise InvalidAddrError()
p2p_part = parts[-1]
last_protocol_code = p2p_part.protocols()[0].code
if last_protocol_code != multiaddr.protocols.P_P2P:
raise InvalidAddrError(
f"the last protocol should be P_P2P instead of {last_protocol_code}"
)
# make sure the /p2p value parses as a peer.ID
peer_id_str = p2p_part.value_for_protocol(multiaddr.protocols.P_P2P)
peer_id = id_b58_decode(peer_id_str)
# we might have received just an / p2p part, which means there's no addr.
if len(parts) > 1:
addr = multiaddr.Multiaddr.join(*parts[:-1])
peer_data = PeerData()
peer_data.addrs = [addr]
peer_data.protocols = [p.code for p in addr.protocols()]
return PeerInfo(peer_id, peer_data)
|
def info_from_p2p_addr(addr: multiaddr.Multiaddr) -> PeerInfo:
if not addr:
raise InvalidAddrError("addr should not be None")
if not isinstance(addr, multiaddr.Multiaddr):
raise InvalidAddrError(f"addr={addr} should be in type Multiaddr")
parts = addr.split()
if not parts:
raise InvalidAddrError()
p2p_part = parts[-1]
last_protocol_code = p2p_part.protocols()[0].code
if last_protocol_code != multiaddr.protocols.P_P2P:
raise InvalidAddrError(
f"The last protocol should be `P_P2P` instead of `{last_protocol_code}`"
)
# make sure the /p2p value parses as a peer.ID
peer_id_str = p2p_part.value_for_protocol(multiaddr.protocols.P_P2P)
peer_id = id_b58_decode(peer_id_str)
# we might have received just an / p2p part, which means there's no addr.
if len(parts) > 1:
addr = multiaddr.Multiaddr.join(*parts[:-1])
peer_data = PeerData()
peer_data.addrs = [addr]
peer_data.protocols = [p.code for p in addr.protocols()]
return PeerInfo(peer_id, peer_data)
|
57,634 |
def testIssue_597(env):
env.expect("JSON.SET", "test", ".", "[0]").ok()
env.expect("JSON.SET", "test", ".[0]", "[0]", "NX").noError()
env.expect("JSON.SET", "test", ".[1]", "[0]", "NX").raiseError()
|
def testIssue_597(env):
env.expect("JSON.SET", "test", ".", "[0]").ok()
env.assertEqual(env.execute_command("JSON.SET", "test", ".[0]", "[0]", "NX"), None)
env.expect("JSON.SET", "test", ".[1]", "[0]", "NX").raiseError()
# make sure value was not changed
env.expect("JSON.GET", "test", ".").equal('[0]')
|
43,968 |
def generate_scf(mol, n_steps=50, tol=1e-8):
r"""Return a function that performs the self-consistent-field iterations.
Args:
mol (Molecule): the molecule object
n_steps (int): the number of iterations
tol (float): convergence tolerance
Returns:
function: function that performs the self-consistent-field iterations
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True),
>>> mol = Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> v_fock, coeffs, fock_matrix, h_core, repulsion_tensor = generate_hartree_fock(mol)(*args)
>>> v_fock
array([-0.67578019, 0.94181155])
"""
def scf(*args):
r"""Perform the self-consistent-field iterations.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
tuple(array[float]): eigenvalues of the Fock matrix, molecular orbital coefficients,
Fock matrix, core matrix
"""
basis_functions = mol.basis_set
charges = mol.nuclear_charges
r = mol.coordinates
n_electron = mol.n_electrons
if r.requires_grad:
repulsion_tensor = generate_repulsion_tensor(basis_functions)(*args[1:])
s = generate_overlap_matrix(basis_functions)(*args[1:])
else:
repulsion_tensor = generate_repulsion_tensor(basis_functions)(*args)
s = generate_overlap_matrix(basis_functions)(*args)
h_core = generate_core_matrix(basis_functions, charges, r)(*args)
w, v = anp.linalg.eigh(s)
x = v @ anp.diag(anp.array([1 / anp.sqrt(i) for i in w])) @ v.T
v_fock, w_fock = anp.linalg.eigh(x.T @ h_core @ x)
coeffs = x @ w_fock
p = molecular_density_matrix(n_electron, coeffs)
for _ in range(n_steps):
j = anp.einsum("pqrs,rs->pq", repulsion_tensor, p)
k = anp.einsum("psqr,rs->pq", repulsion_tensor, p)
fock_matrix = h_core + 2 * j - k
v_fock, w_fock = anp.linalg.eigh(x.T @ fock_matrix @ x)
coeffs = x @ w_fock
p_update = molecular_density_matrix(n_electron, coeffs)
if anp.linalg.norm(p_update - p) <= tol:
break
p = p_update
return v_fock, coeffs, fock_matrix, h_core, repulsion_tensor
return scf
|
def generate_scf(mol, n_steps=50, tol=1e-8):
r"""Return a function that performs the self-consistent-field iterations.
Args:
mol (Molecule): the molecule object
n_steps (int): the number of iterations
tol (float): convergence tolerance
Returns:
function: function that performs the self-consistent-field calculations
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True),
>>> mol = Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> v_fock, coeffs, fock_matrix, h_core, repulsion_tensor = generate_hartree_fock(mol)(*args)
>>> v_fock
array([-0.67578019, 0.94181155])
"""
def scf(*args):
r"""Perform the self-consistent-field iterations.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
tuple(array[float]): eigenvalues of the Fock matrix, molecular orbital coefficients,
Fock matrix, core matrix
"""
basis_functions = mol.basis_set
charges = mol.nuclear_charges
r = mol.coordinates
n_electron = mol.n_electrons
if r.requires_grad:
repulsion_tensor = generate_repulsion_tensor(basis_functions)(*args[1:])
s = generate_overlap_matrix(basis_functions)(*args[1:])
else:
repulsion_tensor = generate_repulsion_tensor(basis_functions)(*args)
s = generate_overlap_matrix(basis_functions)(*args)
h_core = generate_core_matrix(basis_functions, charges, r)(*args)
w, v = anp.linalg.eigh(s)
x = v @ anp.diag(anp.array([1 / anp.sqrt(i) for i in w])) @ v.T
v_fock, w_fock = anp.linalg.eigh(x.T @ h_core @ x)
coeffs = x @ w_fock
p = molecular_density_matrix(n_electron, coeffs)
for _ in range(n_steps):
j = anp.einsum("pqrs,rs->pq", repulsion_tensor, p)
k = anp.einsum("psqr,rs->pq", repulsion_tensor, p)
fock_matrix = h_core + 2 * j - k
v_fock, w_fock = anp.linalg.eigh(x.T @ fock_matrix @ x)
coeffs = x @ w_fock
p_update = molecular_density_matrix(n_electron, coeffs)
if anp.linalg.norm(p_update - p) <= tol:
break
p = p_update
return v_fock, coeffs, fock_matrix, h_core, repulsion_tensor
return scf
|
21,757 |
def _get_in_flight_counts() -> Dict[Tuple[str, ...], float]:
"""Returns a count of all in flight requests by (method, server_name)"""
# Cast to a list to prevent it changing while the Prometheus
# thread is collecting metrics
with _in_flight_requests_lock:
reqs = list(_in_flight_requests)
for rm in reqs:
rm.update_metrics()
# Map from (method, name) -> int, the number of in flight requests of that
# type. The key type is Tuple[str, str], but we leavethe length unspecified
# for compatability with LaterGauge's annotations.
counts: Dict[Tuple[str, ...], float] = {}
for rm in reqs:
key = (rm.method, rm.name)
counts[key] = counts.get(key, 0.0) + 1.0
return counts
|
def _get_in_flight_counts() -> Dict[Tuple[str, ...], float]:
"""Returns a count of all in flight requests by (method, server_name)"""
# Cast to a list to prevent it changing while the Prometheus
# thread is collecting metrics
with _in_flight_requests_lock:
reqs = list(_in_flight_requests)
for rm in reqs:
rm.update_metrics()
# Map from (method, name) -> int, the number of in flight requests of that
# type. The key type is Tuple[str, str], but we leave the length unspecified
# for compatability with LaterGauge's annotations.
counts: Dict[Tuple[str, ...], float] = {}
for rm in reqs:
key = (rm.method, rm.name)
counts[key] = counts.get(key, 0.0) + 1.0
return counts
|
5,727 |
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
alternative="two-sided"):
"""Calculate the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test; default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Notes
-----
The null hypothesis is determined as ``numpy.mean(a, axis) - popmean = 0``.
This is significant when determining which side of the t-distribution
the one-sided statistic should be taken from (``less`` or ``greater``).
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50, 2), random_state=rng)
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs, 5.0)
Ttest_1sampResult(statistic=array([-2.09794637, -1.75977004]), pvalue=array([0.04108952, 0.08468867]))
>>> stats.ttest_1samp(rvs, 0.0)
Ttest_1sampResult(statistic=array([1.64495065, 1.62095307]), pvalue=array([0.10638103, 0.11144602]))
Examples using axis and non-scalar dimension for population mean.
>>> result = stats.ttest_1samp(rvs, [5.0, 0.0])
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs.T, [5.0, 0.0], axis=1)
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs, [[5.0], [0.0]])
>>> result.statistic
array([[-2.09794637, -1.75977004],
[ 1.64495065, 1.62095307]])
>>> result.pvalue
array([[0.04108952, 0.08468867],
[0.10638103, 0.11144602]])
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if alternative != 'two-sided':
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by one-sided alternatives.")
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_1sampResult(t, prob)
|
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
alternative="two-sided"):
"""Calculate the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test; default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Notes
-----
The null hypothesis is determined as ``np.mean(a, axis) - popmean = 0``.
This is significant when determining which side of the t-distribution
the one-sided statistic should be taken from (``less`` or ``greater``).
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50, 2), random_state=rng)
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs, 5.0)
Ttest_1sampResult(statistic=array([-2.09794637, -1.75977004]), pvalue=array([0.04108952, 0.08468867]))
>>> stats.ttest_1samp(rvs, 0.0)
Ttest_1sampResult(statistic=array([1.64495065, 1.62095307]), pvalue=array([0.10638103, 0.11144602]))
Examples using axis and non-scalar dimension for population mean.
>>> result = stats.ttest_1samp(rvs, [5.0, 0.0])
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs.T, [5.0, 0.0], axis=1)
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs, [[5.0], [0.0]])
>>> result.statistic
array([[-2.09794637, -1.75977004],
[ 1.64495065, 1.62095307]])
>>> result.pvalue
array([[0.04108952, 0.08468867],
[0.10638103, 0.11144602]])
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if alternative != 'two-sided':
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by one-sided alternatives.")
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_1sampResult(t, prob)
|
57,840 |
def get_users_preference_command(client):
result = client.get_users_preference()
if not result.get('success'):
raise DemistoException(result['message'])
del result['success']
table_header = list(result.keys())
display_title = "User's Preference"
markdown = tableToMarkdown(display_title, result, headers=table_header)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.User.Preference',
outputs=result
)
|
def get_users_preference_command(client):
result = client.get_users_preference()
if not result.get('success'):
raise DemistoException(result['message'])
del result['success']
table_header = list(result.keys())
display_title = "User's Preference"
if not result or len(result) == 0:
markdown = 'No users preference found.'
else:
markdown = tableToMarkdown(display_title, result, headers=table_header)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.User.Preference',
outputs=result
)
|
2,668 |
def test_pipeline_invalid_parameters():
# Test the various init parameters of the pipeline in fit
# method
pipeline = Pipeline([(1, 1)])
with pytest.raises(TypeError):
pipeline.fit([[1]], [1])
# Check that we can't fit pipelines with objects without fit
# method
msg = (
"Last step of Pipeline should implement fit "
"or be the string 'passthrough'"
".*NoFit.*"
)
pipeline = Pipeline([("clf", NoFit())])
with pytest.raises(TypeError, match=msg):
pipeline.fit([[1]], [1])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([("svc", clf)])
assert pipe.get_params(deep=True) == dict(
svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False)
)
# Check that params are set
pipe.set_params(svc__a=0.1)
assert clf.a == 0.1
assert clf.b is None
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([("anova", filter1), ("svc", clf)])
# Check that estimators are not cloned on pipeline construction
assert pipe.named_steps["anova"] is filter1
assert pipe.named_steps["svc"] is clf
# Check that we can't fit with non-transformers on the way
# Note that NoTrans implements fit, but not transform
msg = "All intermediate steps should be transformers.*\\bNoTrans\\b.*"
pipeline = Pipeline([("t", NoTrans()), ("svc", clf)])
with pytest.raises(TypeError, match=msg):
pipeline.fit([[1]], [1])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert clf.C == 0.1
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
msg = re.escape(
"Invalid parameter 'C' for estimator SelectKBest(). Valid parameters are: ['k',"
" 'score_func']."
)
with pytest.raises(ValueError, match=msg):
pipe.set_params(anova__C=0.1)
# Test clone
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
pipe2 = clone(pipe)
assert not pipe.named_steps["svc"] is pipe2.named_steps["svc"]
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop("svc")
params.pop("anova")
params2.pop("svc")
params2.pop("anova")
assert params == params2
|
def test_pipeline_invalid_parameters():
# Test the various init parameters of the pipeline in fit
# method
pipeline = Pipeline([(1, 1)])
with pytest.raises(TypeError):
pipeline.fit([[1]], [1])
# Check that we can't fit pipelines with objects without fit
# method
msg = (
"Last step of Pipeline should implement fit "
"or be the string 'passthrough'"
".*NoFit.*"
)
pipeline = Pipeline([("clf", NoFit())])
with pytest.raises(TypeError, match=msg):
pipeline.fit([[1]], [1])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([("svc", clf)])
assert pipe.get_params(deep=True) == dict(
svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False)
)
# Check that params are set
pipe.set_params(svc__a=0.1)
assert clf.a == 0.1
assert clf.b is None
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([("anova", filter1), ("svc", clf)])
# Check that estimators are not cloned on pipeline construction
assert pipe.named_steps["anova"] is filter1
assert pipe.named_steps["svc"] is clf
# Check that we can't fit with non-transformers on the way
# Note that NoTrans implements fit, but not transform
msg = "All intermediate steps should be transformers.*\\bNoTrans\\b.*"
pipeline = Pipeline([("t", NoTrans()), ("svc", clf)])
with pytest.raises(TypeError, match=msg):
pipeline.fit([[1]], [1])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert clf.C == 0.1
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
msg = re.escape(
"Invalid parameter 'C' for estimator SelectKBest(). Valid parameters are: ['k',"
" 'score_func']."
)
with pytest.raises(ValueError, match=msg):
pipe.set_params(anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert not pipe.named_steps["svc"] is pipe2.named_steps["svc"]
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop("svc")
params.pop("anova")
params2.pop("svc")
params2.pop("anova")
assert params == params2
|
39,669 |
def main():
module = ForemanEntityApypieAnsibleModule(
entity_spec=dict(
name=dict(required=True),
compute_attributes=dict(type='nested_list', entity_spec=compute_attribute_entity_spec),
),
argument_spec=dict(
updated_name=dict(),
),
)
entity_dict = module.clean_params()
updated_name = entity_dict.get('updated_name')
compute_attributes = entity_dict.pop('compute_attributes', None)
if compute_attributes is not None and module.desired_absent:
module.fail_json(msg='compute_attributes not allowed when state=absent')
module.connect()
entity = module.find_resource_by_name('compute_profiles', name=entity_dict['name'], failsafe=True)
if module.state == 'present' and updated_name:
entity_dict['name'] = updated_name
changed, compute_profile = module.ensure_entity('compute_profiles', entity_dict, entity)
# Apply changes on underlying compute attributes only when present
if module.state == 'present' and compute_attributes is not None:
# Update or create compute attributes
scope = {'compute_profile_id': compute_profile['id']}
for ca_entity_dict in compute_attributes:
ca_entity_dict['compute_resource'] = module.find_resource_by_name(
'compute_resources', name=ca_entity_dict['compute_resource'], failsafe=False, thin=False)
ca_entities = ca_entity_dict['compute_resource'].get('compute_attributes', [])
ca_entity = next((item for item in ca_entities if item.get('compute_profile_id') == compute_profile['id']), None)
changed |= module.ensure_entity_state('compute_attributes', ca_entity_dict, ca_entity, entity_spec=compute_attribute_entity_spec, params=scope)
module.exit_json(changed=changed)
|
def main():
module = ForemanEntityAnsibleModule(
entity_spec=dict(
name=dict(required=True),
compute_attributes=dict(type='nested_list', entity_spec=compute_attribute_entity_spec),
),
argument_spec=dict(
updated_name=dict(),
),
)
entity_dict = module.clean_params()
updated_name = entity_dict.get('updated_name')
compute_attributes = entity_dict.pop('compute_attributes', None)
if compute_attributes is not None and module.desired_absent:
module.fail_json(msg='compute_attributes not allowed when state=absent')
module.connect()
entity = module.find_resource_by_name('compute_profiles', name=entity_dict['name'], failsafe=True)
if module.state == 'present' and updated_name:
entity_dict['name'] = updated_name
changed, compute_profile = module.ensure_entity('compute_profiles', entity_dict, entity)
# Apply changes on underlying compute attributes only when present
if module.state == 'present' and compute_attributes is not None:
# Update or create compute attributes
scope = {'compute_profile_id': compute_profile['id']}
for ca_entity_dict in compute_attributes:
ca_entity_dict['compute_resource'] = module.find_resource_by_name(
'compute_resources', name=ca_entity_dict['compute_resource'], failsafe=False, thin=False)
ca_entities = ca_entity_dict['compute_resource'].get('compute_attributes', [])
ca_entity = next((item for item in ca_entities if item.get('compute_profile_id') == compute_profile['id']), None)
changed |= module.ensure_entity_state('compute_attributes', ca_entity_dict, ca_entity, entity_spec=compute_attribute_entity_spec, params=scope)
module.exit_json(changed=changed)
|
30,407 |
def elasticsearch_builder():
"""Builds an Elasticsearch obj with the necessary credentials, proxy settings and secure connection.
"""
if USERNAME:
if PROXY:
return Elasticsearch(hosts=[SERVER], connection_class=RequestsHttpConnection,
http_auth=(USERNAME, PASSWORD), verify_certs=INSECURE, proxies=handle_proxy())
else:
return Elasticsearch(hosts=[SERVER], connection_class=RequestsHttpConnection,
http_auth=(USERNAME, PASSWORD), verify_certs=INSECURE)
else:
if PROXY:
return Elasticsearch(hosts=[SERVER], connection_class=RequestsHttpConnection,
verify_certs=INSECURE, proxies=handle_proxy())
else:
return Elasticsearch(hosts=[SERVER], connection_class=RequestsHttpConnection, verify_certs=INSECURE)
|
def elasticsearch_builder():
"""Build an Elasticsearch obj with the necessary credentials, proxy settings and secure connection."""
if USERNAME:
if PROXY:
return Elasticsearch(hosts=[SERVER], connection_class=RequestsHttpConnection,
http_auth=(USERNAME, PASSWORD), verify_certs=INSECURE, proxies=handle_proxy())
else:
return Elasticsearch(hosts=[SERVER], connection_class=RequestsHttpConnection,
http_auth=(USERNAME, PASSWORD), verify_certs=INSECURE)
else:
if PROXY:
return Elasticsearch(hosts=[SERVER], connection_class=RequestsHttpConnection,
verify_certs=INSECURE, proxies=handle_proxy())
else:
return Elasticsearch(hosts=[SERVER], connection_class=RequestsHttpConnection, verify_certs=INSECURE)
|
15,816 |
def _has_external_address(ip_str: str):
ip_addr = ip_address(ip_str)
return (
not ip_addr.is_multicast
and not ip_addr.is_loopback
and not ip_addr.is_link_local
)
|
def _has_external_address(ip_str: str) -> bool:
ip_addr = ip_address(ip_str)
return (
not ip_addr.is_multicast
and not ip_addr.is_loopback
and not ip_addr.is_link_local
)
|
24,505 |
def create_basic_auth(config):
# Since this is the default case, only activate when all fields are explicitly set
if config['username'] is not None and config['password'] is not None:
if config['use_legacy_auth_encoding']:
return config['username'], config['password']
else:
return ensure_bytes(config['username']), ensure_bytes(config['password'])
|
def create_basic_auth(config):
# Since this is the default case, only activate when all fields are explicitly set
if config.get('username') is not None and config.get('password') is not None:
if config['use_legacy_auth_encoding']:
return config['username'], config['password']
else:
return ensure_bytes(config['username']), ensure_bytes(config['password'])
|
8,441 |
def _jwst_s3d_loader(filename, **kwargs):
"""
Loader for JWST s3d 3D rectified spectral data in FITS format.
Parameters
----------
filename : str
The path to the FITS file
Returns
-------
SpectrumList
The spectra contained in the file.
"""
spectra = []
# Get a list of GWCS objects from the slits
with asdf.open(filename) as af:
wcslist = [af.tree["meta"]["wcs"]]
with fits.open(filename, memmap=False) as hdulist:
primary_header = hdulist["PRIMARY"].header
hdulist_sci = [hdu for hdu in hdulist if hdu.name == "SCI"]
for hdu, wcs in zip(hdulist_sci, wcslist):
# Get flux
try:
flux_unit = u.Unit(hdu.header["BUNIT"])
except (ValueError, KeyError):
flux_unit = None
# The spectral axis is first. We need it last
flux_array = hdu.data.T
flux = Quantity(flux_array, unit=flux_unit)
# Get the wavelength array from the GWCS object which returns a
# tuple of (RA, Dec, lambda)
grid = grid_from_bounding_box(wcs.bounding_box)
_, _, lam = wcs(*grid)
_, _, lam_unit = wcs.output_frame.unit
wavelength_array = lam[:, 0, 0]
wavelength = Quantity(wavelength_array, unit=lam_unit)
# Merge primary and slit headers and dump into meta
slit_header = hdu.header
header = primary_header.copy()
header.extend(slit_header, strip=True, update=True)
meta = {k: v for k, v in header.items()}
spec = Spectrum1D(flux=flux, spectral_axis=wavelength, meta=meta)
spectra.append(spec)
return SpectrumList(spectra)
|
def _jwst_s3d_loader(filename, **kwargs):
"""
Loader for JWST s3d 3D rectified spectral data in FITS format.
Parameters
----------
filename : str
The path to the FITS file
Returns
-------
SpectrumList
The spectra contained in the file.
"""
spectra = []
# Get a list of GWCS objects from the slits
with asdf.open(filename) as af:
wcslist = [af.tree["meta"]["wcs"]]
with fits.open(filename, memmap=False) as hdulist:
primary_header = hdulist["PRIMARY"].header
hdulist_sci = [hdu for hdu in hdulist if hdu.name == "SCI"]
for hdu, wcs in zip(hdulist_sci, wcslist):
# Get flux
try:
flux_unit = u.Unit(hdu.header["BUNIT"])
except (ValueError, KeyError):
flux_unit = None
# The spectral axis is first. We need it last
flux_array = hdu.data.T
flux = Quantity(flux_array, unit=flux_unit)
# Get the wavelength array from the GWCS object which returns a
# tuple of (RA, Dec, lambda)
grid = grid_from_bounding_box(wcs.bounding_box)
_, _, lam = wcs(*grid)
_, _, lam_unit = wcs.output_frame.unit
wavelength_array = lam[:, 0, 0]
wavelength = Quantity(wavelength_array, unit=lam_unit)
# Merge primary and slit headers and dump into meta
slit_header = hdu.header
header = primary_header.copy()
header.extend(slit_header, strip=True, update=True)
meta = dict(header)
spec = Spectrum1D(flux=flux, spectral_axis=wavelength, meta=meta)
spectra.append(spec)
return SpectrumList(spectra)
|
8,798 |
def guarded_pow(left, right):
# Only handle ints because floats will overflow anyway.
if not isinstance(left, numbers.Integral):
pass
elif not isinstance(right, numbers.Integral):
pass
elif pow_complexity(left, right) < 0.5:
# Value 0.5 is arbitrary and based on a estimated runtime of 0.5s
# on a fairly decent laptop processor.
pass
else:
raise ValueError("Pow expression too complex to calculate.")
return operator.pow(left, right)
|
def guarded_pow(left, right):
# Only handle ints because floats will overflow anyway.
if not isinstance(left, numbers.Integral):
pass
elif not isinstance(right, numbers.Integral):
pass
elif pow_complexity(left, right) < 0.5:
# Value 0.5 is arbitrary and based on an estimated runtime of 0.5s
# on a fairly decent laptop processor.
pass
else:
raise ValueError("Pow expression too complex to calculate.")
return operator.pow(left, right)
|
31,189 |
def test_module(client):
ts_from = ts_to = round(float(datetime.timestamp(datetime.utcnow())))
result = client.get_incidents(ts_from, ts_to)
if not result.get('success'):
raise DemistoException(result['message'])
demisto.results("ok")
|
def test_module(client):
ts_from = ts_to = round(datetime.timestamp(datetime.utcnow()))
result = client.get_incidents(ts_from, ts_to)
if not result.get('success'):
raise DemistoException(result['message'])
demisto.results("ok")
|
5,834 |
def _get_fs(fs, nyq):
"""
Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
"""
if nyq is None and fs is None:
fs = 2
elif nyq is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
msg = ("Keyword argument 'nyq' is deprecated in favour of fs and will"
" be removed in SciPy 1.11.0.")
warnings.warn(msg, DeprecationWarning, stacklevel=3)
fs = 2*nyq
return fs
|
def _get_fs(fs, nyq):
"""
Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
"""
if nyq is None and fs is None:
fs = 2
elif nyq is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
msg = ("Keyword argument 'nyq' is deprecated in favour of 'fs' and will"
" be removed in SciPy 1.11.0.")
warnings.warn(msg, DeprecationWarning, stacklevel=3)
fs = 2*nyq
return fs
|
55,048 |
def out_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the Hamiltonian which imposes the constraint that each node has
an outflow of at most one.
The out flow constraint is, for all :math:`i`:
.. math:: \sum_{j,(i,j)\in E}x_{ij} \leq 1,
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
\left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right)
where :math:`V` are the graph vertices and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has an outflow of at most one.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the out flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_out_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
def out_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `out flow constraint <https://1qbit.com/whitepaper/arbitrage/>`__
Hamiltonian for the maximum-weighted cycle problem.
The out flow constraint is, for all :math:`i`:
.. math:: \sum_{j,(i,j)\in E}x_{ij} \leq 1,
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
\left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right)
where :math:`V` are the graph vertices and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has an outflow of at most one.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the out flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_out_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
950 |
def express(expr, system, system2=None, variables=False):
"""
Global function for 'express' functionality.
Re-expresses a Vector, Dyadic or scalar(sympyfiable) in the given
coordinate system.
If 'variables' is True, then the coordinate variables (base scalars)
of other coordinate systems present in the vector/scalar field or
dyadic are also substituted in terms of the base scalars of the
given system.
Parameters
==========
expr : Vector/Dyadic/scalar(sympyfiable)
The expression to re-express in CoordSys3D 'system'
system: CoordSys3D
The coordinate system the expr is to be expressed in
system2: CoordSys3D
The other coordinate system required for re-expression
(only for a Dyadic Expr)
variables : boolean
Specifies whether to substitute the coordinate variables present
in expr, in terms of those of parameter system
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import Symbol, cos, sin
>>> N = CoordSys3D('N')
>>> q = Symbol('q')
>>> B = N.orient_new_axis('B', q, N.k)
>>> from sympy.vector import express
>>> express(B.i, N)
(cos(q))*N.i + (sin(q))*N.j
>>> express(N.x, B, variables=True)
B.x*cos(q) - B.y*sin(q)
>>> d = N.i.outer(N.i)
>>> express(d, B, N) == (cos(q))*(B.i|N.i) + (-sin(q))*(B.j|N.i)
True
"""
if expr in (0, Vector.zero):
return expr
if not isinstance(system, CoordSys3D):
raise TypeError("system should be a CoordSys3D \
instance")
if isinstance(expr, Vector):
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
# Given expr is a Vector
if variables:
# If variables attribute is True, substitute
# the coordinate variables in the Vector
system_list = {x.system for x in expr.atoms(BaseScalar, BaseVector)
if x.system != system}
subs_dict = {}
for f in system_list:
subs_dict.update(f.scalar_map(system))
expr = expr.subs(subs_dict)
# Re-express in this coordinate system
outvec = Vector.zero
parts = expr.separate()
for x in parts:
if x != system:
temp = system.rotation_matrix(x) * parts[x].to_matrix(x)
outvec += matrix_to_vector(temp, system)
else:
outvec += parts[x]
return outvec
elif isinstance(expr, Dyadic):
if system2 is None:
system2 = system
if not isinstance(system2, CoordSys3D):
raise TypeError("system2 should be a CoordSys3D \
instance")
outdyad = Dyadic.zero
var = variables
for k, v in expr.components.items():
outdyad += (express(v, system, variables=var) *
(express(k.args[0], system, variables=var) |
express(k.args[1], system2, variables=var)))
return outdyad
else:
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
if variables:
# Given expr is a scalar field
system_set = set()
expr = sympify(expr)
# Substitute all the coordinate variables
for x in expr.atoms(BaseScalar):
if x.system != system:
system_set.add(x.system)
subs_dict = {}
for f in system_set:
subs_dict.update(f.scalar_map(system))
return expr.subs(subs_dict)
return expr
|
def express(expr, system, system2=None, variables=False):
"""
Global function for 'express' functionality.
Re-expresses a Vector, Dyadic or scalar(sympyfiable) in the given
coordinate system.
If 'variables' is True, then the coordinate variables (base scalars)
of other coordinate systems present in the vector/scalar field or
dyadic are also substituted in terms of the base scalars of the
given system.
Parameters
==========
expr : Vector/Dyadic/scalar(sympyfiable)
The expression to re-express in CoordSys3D 'system'
system: CoordSys3D
The coordinate system the expr is to be expressed in
system2: CoordSys3D
The other coordinate system required for re-expression
(only for a Dyadic Expr)
variables : boolean
Specifies whether to substitute the coordinate variables present
in expr, in terms of those of parameter system
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import Symbol, cos, sin
>>> N = CoordSys3D('N')
>>> q = Symbol('q')
>>> B = N.orient_new_axis('B', q, N.k)
>>> from sympy.vector import express
>>> express(B.i, N)
(cos(q))*N.i + (sin(q))*N.j
>>> express(N.x, B, variables=True)
B.x*cos(q) - B.y*sin(q)
>>> d = N.i.outer(N.i)
>>> express(d, B, N) == (cos(q))*(B.i|N.i) + (-sin(q))*(B.j|N.i)
True
"""
if expr in (0, Vector.zero):
return expr
if not isinstance(system, CoordSys3D):
raise TypeError("system should be a CoordSys3D \
instance")
if isinstance(expr, Vector):
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
# Given expr is a Vector
if variables:
# If variables attribute is True, substitute
# the coordinate variables in the Vector
system_list = {x.system for x in expr.atoms(BaseScalar, BaseVector)} - {system}
subs_dict = {}
for f in system_list:
subs_dict.update(f.scalar_map(system))
expr = expr.subs(subs_dict)
# Re-express in this coordinate system
outvec = Vector.zero
parts = expr.separate()
for x in parts:
if x != system:
temp = system.rotation_matrix(x) * parts[x].to_matrix(x)
outvec += matrix_to_vector(temp, system)
else:
outvec += parts[x]
return outvec
elif isinstance(expr, Dyadic):
if system2 is None:
system2 = system
if not isinstance(system2, CoordSys3D):
raise TypeError("system2 should be a CoordSys3D \
instance")
outdyad = Dyadic.zero
var = variables
for k, v in expr.components.items():
outdyad += (express(v, system, variables=var) *
(express(k.args[0], system, variables=var) |
express(k.args[1], system2, variables=var)))
return outdyad
else:
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
if variables:
# Given expr is a scalar field
system_set = set()
expr = sympify(expr)
# Substitute all the coordinate variables
for x in expr.atoms(BaseScalar):
if x.system != system:
system_set.add(x.system)
subs_dict = {}
for f in system_set:
subs_dict.update(f.scalar_map(system))
return expr.subs(subs_dict)
return expr
|
30,867 |
def dict_safe_get(dict_object, keys, default_return_value=None, return_type=None, raise_return_type=True):
"""Recursive safe get query (for nested dicts and lists), If keys found return value otherwise return None or default value.
Example:
>>> dict = {"something" : {"test": "A"}}
>>> dict_safe_get(dict,['something', 'test'])
>>> 'A'
>>> dict_safe_get(dict,['something', 'else'],'default value')
>>> 'default value'
:type dict_object: ``dict``
:param dict_object: dictionary to query.
:type keys: ``list``
:param keys: keys for recursive get.
:type default_return_value: ``object``
:param default_return_value: Value to return when no key available.
:type return_type: ``object``
:param return_type: Excepted return type.
:type raise_return_type: ``bool``
:param raise_return_type: True if raising value error when value didn't match excepted return type.
:rtype: ``object``
:return:: Value from nested query.
"""
return_value = dict_object
for key in keys:
try:
return_value = return_value[key]
except (KeyError, TypeError, IndexError, AttributeError):
return_value = default_return_value
break
if return_type and not isinstance(return_value, return_type):
if raise_return_type:
raise TypeError("Safe get Error:\nDetails: Return Type Error Excepted return type {0},"
" but actual type from nested dict/list is {1} with value {2}.\n"
"Query: {3}\nQueried object: {4}".format(return_type, type(return_value),
return_value, keys, dict_object))
return_value = default_return_value
return return_value
|
def dict_safe_get(dict_object, keys, default_return_value=None, return_type=None, raise_return_type=True):
"""Recursive safe get query (for nested dicts and lists), If keys found return value otherwise return None or default value.
Example:
>>> dict = {"something" : {"test": "A"}}
>>> dict_safe_get(dict,['something', 'test'])
>>> 'A'
>>> dict_safe_get(dict,['something', 'else'],'default value')
>>> 'default value'
:type dict_object: ``dict``
:param dict_object: dictionary to query.
:type keys: ``list``
:param keys: keys for recursive get.
:type default_return_value: ``object``
:param default_return_value: Value to return when no key available.
:type return_type: ``object``
:param return_type: Excepted return type.
:type raise_return_type: ``bool``
:param raise_return_type: Whether to raise an error when the value didn't match the expected return type.
:rtype: ``object``
:return:: Value from nested query.
"""
return_value = dict_object
for key in keys:
try:
return_value = return_value[key]
except (KeyError, TypeError, IndexError, AttributeError):
return_value = default_return_value
break
if return_type and not isinstance(return_value, return_type):
if raise_return_type:
raise TypeError("Safe get Error:\nDetails: Return Type Error Excepted return type {0},"
" but actual type from nested dict/list is {1} with value {2}.\n"
"Query: {3}\nQueried object: {4}".format(return_type, type(return_value),
return_value, keys, dict_object))
return_value = default_return_value
return return_value
|
41,177 |
def _fast_walsh_hadamard_transform(a: Tuple[float]) -> np.array:
"""Fast Walsh–Hadamard Transform of array a."""
h = 1
a_ = np.array(a)
while h < len(a_):
for i in range(0, len(a_), h * 2):
for j in range(i, i + h):
x = a_[j]
y = a_[j + h]
a_[j] = x + y
a_[j + h] = x - y
h *= 2
return a_
|
def _fast_walsh_hadamard_transform(a: Tuple[float]) -> np.array:
"""Fast Walsh–Hadamard Transform of an array."""
h = 1
a_ = np.array(a)
while h < len(a_):
for i in range(0, len(a_), h * 2):
for j in range(i, i + h):
x = a_[j]
y = a_[j + h]
a_[j] = x + y
a_[j + h] = x - y
h *= 2
return a_
|
26,631 |
def load_schema() -> dict:
"""
Load & return Json Schema for DAG
"""
schema_file_name = 'schema.json'
schema_file = pkgutil.get_data(__name__, schema_file_name)
if schema_file is None:
raise AirflowException("Schema file {} does not exists".format(schema_file_name))
schema = json.loads(schema_file.decode())
return schema
|
def load_dag_schema_json() -> dict:
"""
Load & return Json Schema for DAG
"""
schema_file_name = 'schema.json'
schema_file = pkgutil.get_data(__name__, schema_file_name)
if schema_file is None:
raise AirflowException("Schema file {} does not exists".format(schema_file_name))
schema = json.loads(schema_file.decode())
return schema
|
24,617 |
def test_half_life_unstable_isotopes():
"""Test that `half_life` returns `None` and raises an exception for
all isotopes that do not yet have half-life data."""
for isotope in _data_about_isotopes.keys():
if (
"half_life" not in _data_about_isotopes[isotope].keys()
and not _data_about_isotopes[isotope].keys()
):
with pytest.raises(MissingParticleDataError):
half_life(isotope)
|
def test_half_life_unstable_isotopes():
"""Test that `half_life` returns `None` and raises an exception for
all isotopes that do not yet have half-life data."""
for isotope in _data_about_isotopes:
if (
"half_life" not in _data_about_isotopes[isotope].keys()
and not _data_about_isotopes[isotope].keys()
):
with pytest.raises(MissingParticleDataError):
half_life(isotope)
|
12,243 |
def configure_parser_init(sub_parsers):
help = "Initialize conda for shell interaction."
descr = help
epilog = dals(
"""
Key parts of conda's functionality require that it interact directly with the shell
within which conda is being invoked. The `conda activate` and `conda deactivate` commands
specifically are shell-level commands. That is, they affect the state (e.g. environment
variables) of the shell context being interacted with. Other core commands, like
`conda create` and `conda install`, also necessarily interact with the shell environment.
They're therefore implemented in ways specific to each shell. Each shell must be configured
to make use of them.
This command makes changes to your system that are specific and customized for each shell.
To see the specific files and locations on your system that will be affected before, use
the '--dry-run' flag. To see the exact changes that are being or will be made to each
location, use the '--verbose' flag.
IMPORTANT: After running `conda init`, most shells will need to be closed and restarted for
changes to take effect.
"""
)
# dev_example = dedent("""
# # An example for creating an environment to develop on conda's own code. Clone the
# # conda repo and install a dedicated miniconda within it. Remove all remnants of
# # conda source files in the `site-packages` directory associated with
# # `~/conda/devenv/bin/python`. Write a `conda.pth` file in that `site-packages`
# # directory pointing to source code in `~/conda`, the current working directory.
# # Write commands to stdout, suitable for bash `eval`, that sets up the current
# # shell as a dev environment.
#
# $ CONDA_PROJECT_ROOT="~/conda"
# $ git clone git@github.com:conda/conda "$CONDA_PROJECT_ROOT"
# $ cd "$CONDA_PROJECT_ROOT"
# $ wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
# $ bash Miniconda3-latest-Linux-x86_64.sh -bfp ./devenv
# $ eval "$(./devenv/bin/python -m conda init --dev bash)"
#
#
# """)
p = sub_parsers.add_parser(
'init',
description=descr,
help=help,
epilog=epilog,
)
p.add_argument(
"--dev",
action="store_true",
help=SUPPRESS,
default=NULL,
)
p.add_argument(
"--all",
action="store_true",
help="Initialize all currently available shells.",
default=NULL,
)
setup_type_group = p.add_argument_group('setup type')
setup_type_group.add_argument(
"--install",
action="store_true",
help=SUPPRESS,
default=NULL,
)
setup_type_group.add_argument(
"--user",
action="store_true",
help="Initialize conda for the current user (default).",
default=NULL,
)
setup_type_group.add_argument(
"--no-user",
action="store_false",
help="Don't initialize conda for the current user (default).",
default=NULL,
)
setup_type_group.add_argument(
"--system",
action="store_true",
help="Initialize conda for all users on the system.",
default=NULL,
)
setup_type_group.add_argument(
"--reverse",
action="store_true",
help="Undo effects of last conda init.",
default=NULL,
)
p.add_argument(
'shells',
nargs='*',
choices=COMPATIBLE_SHELLS,
metavar="SHELLS",
help=(
"One or more shells to be initialized. If not given, the default value is 'bash' on "
"unix and 'cmd.exe' & 'powershell' on Windows. Use the '--all' flag to initialize all "
f"shells. Available shells: {sorted(COMPATIBLE_SHELLS)}"
),
default=["cmd.exe", "powershell"] if on_win else ["bash"],
)
if on_win:
p.add_argument(
"--anaconda-prompt",
action="store_true",
help="Add an 'Anaconda Prompt' icon to your desktop.",
default=NULL,
)
add_parser_json(p)
p.add_argument(
"-d", "--dry-run",
action="store_true",
help="Only display what would have been done.",
)
p.set_defaults(func='.main_init.execute')
|
def configure_parser_init(sub_parsers):
help = "Initialize conda for shell interaction."
descr = help
epilog = dals(
"""
Key parts of conda's functionality require that it interact directly with the shell
within which conda is being invoked. The `conda activate` and `conda deactivate` commands
specifically are shell-level commands. That is, they affect the state (e.g. environment
variables) of the shell context being interacted with. Other core commands, like
`conda create` and `conda install`, also necessarily interact with the shell environment.
They're therefore implemented in ways specific to each shell. Each shell must be configured
to make use of them.
This command makes changes to your system that are specific and customized for each shell.
To see the specific files and locations on your system that will be affected before, use
the '--dry-run' flag. To see the exact changes that are being or will be made to each
location, use the '--verbose' flag.
IMPORTANT: After running `conda init`, most shells will need to be closed and restarted for
changes to take effect.
"""
)
# dev_example = dedent("""
# # An example for creating an environment to develop on conda's own code. Clone the
# # conda repo and install a dedicated miniconda within it. Remove all remnants of
# # conda source files in the `site-packages` directory associated with
# # `~/conda/devenv/bin/python`. Write a `conda.pth` file in that `site-packages`
# # directory pointing to source code in `~/conda`, the current working directory.
# # Write commands to stdout, suitable for bash `eval`, that sets up the current
# # shell as a dev environment.
#
# $ CONDA_PROJECT_ROOT="~/conda"
# $ git clone git@github.com:conda/conda "$CONDA_PROJECT_ROOT"
# $ cd "$CONDA_PROJECT_ROOT"
# $ wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
# $ bash Miniconda3-latest-Linux-x86_64.sh -bfp ./devenv
# $ eval "$(./devenv/bin/python -m conda init --dev bash)"
#
#
# """)
p = sub_parsers.add_parser(
'init',
description=descr,
help=help,
epilog=epilog,
)
p.add_argument(
"--dev",
action="store_true",
help=SUPPRESS,
default=NULL,
)
p.add_argument(
"--all",
action="store_true",
help="Initialize all currently available shells.",
default=NULL,
)
setup_type_group = p.add_argument_group('setup type')
setup_type_group.add_argument(
"--install",
action="store_true",
help=SUPPRESS,
default=NULL,
)
setup_type_group.add_argument(
"--user",
action="store_true",
help="Initialize conda for the current user (default).",
default=NULL,
)
setup_type_group.add_argument(
"--no-user",
action="store_false",
help="Don't initialize conda for the current user (default).",
default=NULL,
)
setup_type_group.add_argument(
"--system",
action="store_true",
help="Initialize conda for all users on the system.",
default=NULL,
)
setup_type_group.add_argument(
"--reverse",
action="store_true",
help="Undo effects of last conda init.",
default=NULL,
)
p.add_argument(
'shells',
nargs='*',
choices=COMPATIBLE_SHELLS,
metavar="SHELLS",
help=(
"One or more shells to be initialized. If not given, the default value is 'bash' on "
"unix and 'cmd.exe' & 'powershell' on Windows. Use the '--all' flag to initialize all "
f"shells. Available shells: {', '.join(sorted(COMPATIBLE_SHELLS))}"
),
default=["cmd.exe", "powershell"] if on_win else ["bash"],
)
if on_win:
p.add_argument(
"--anaconda-prompt",
action="store_true",
help="Add an 'Anaconda Prompt' icon to your desktop.",
default=NULL,
)
add_parser_json(p)
p.add_argument(
"-d", "--dry-run",
action="store_true",
help="Only display what would have been done.",
)
p.set_defaults(func='.main_init.execute')
|
1,081 |
def bids_gen_info(bids_event_files,
condition_column='trial_type',
amplitude_column=None,
time_repetition=False,
):
"""Generate subject_info structure from a list of BIDS .tsv event files.
Parameters
----------
bids_event_files : list of str
Filenames of BIDS .tsv event files containing columns including:
'onset', 'duration', and 'trial_type' or the `condition_column` value.
condition_column : str
Column of files in `bids_event_files` based on the values of which
events will be sorted into different regressors
amplitude_column : str
Column of files in `bids_event_files` based on the values of which
to apply amplitudes to events. If unspecified, all events will be
represented with an amplitude of 1.
Returns
-------
list of Bunch
"""
info = []
for bids_event_file in bids_event_files:
with open(bids_event_file) as f:
f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t')
events = [{k: v for k, v in row.items()} for row in f_events]
conditions = list(set([i[condition_column] for i in events]))
runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
for condition in conditions:
selected_events = [i for i in events if i[condition_column]==condition]
onsets = [float(i['onset']) for i in selected_events]
durations = [float(i['duration']) for i in selected_events]
if time_repetition:
decimals = math.ceil(-math.log10(time_repetition))
onsets = [round(i,decimals) for i in onsets]
durations = [round(i,decimals) for i in durations]
if condition:
runinfo.conditions.append(condition)
else:
runinfo.conditions.append('e0')
runinfo.onsets.append(onsets)
runinfo.durations.append(durations)
try:
amplitudes = [float(i[amplitude_column]) for i in selected_events]
runinfo.amplitudes.append(amplitudes)
except KeyError:
runinfo.amplitudes.append([1]*len(onsets))
info.append(runinfo)
return info
|
def bids_gen_info(bids_event_files,
condition_column='trial_type',
amplitude_column=None,
time_repetition=False,
):
"""Generate subject_info structure from a list of BIDS .tsv event files.
Parameters
----------
bids_event_files : list of str
Filenames of BIDS .tsv event files containing columns including:
'onset', 'duration', and 'trial_type' or the `condition_column` value.
condition_column : str
Column of files in `bids_event_files` based on the values of which
events will be sorted into different regressors
amplitude_column : str
Column of files in `bids_event_files` based on the values of which
to apply amplitudes to events. If unspecified, all events will be
represented with an amplitude of 1.
Returns
-------
list of Bunch
"""
info = []
for bids_event_file in bids_event_files:
with open(bids_event_file) as f:
f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t')
events = [{k: v for k, v in row.items()} for row in f_events]
conditions = list(set([i[condition_column] for i in events]))
runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
for condition in conditions:
selected_events = [i for i in events if i[condition_column]==condition]
onsets = [float(i['onset']) for i in selected_events]
durations = [float(i['duration']) for i in selected_events]
if time_repetition:
decimals = math.ceil(-math.log10(time_repetition))
onsets = [round(i,decimals) for i in onsets]
durations = [round(i, decimals) for i in durations]
if condition:
runinfo.conditions.append(condition)
else:
runinfo.conditions.append('e0')
runinfo.onsets.append(onsets)
runinfo.durations.append(durations)
try:
amplitudes = [float(i[amplitude_column]) for i in selected_events]
runinfo.amplitudes.append(amplitudes)
except KeyError:
runinfo.amplitudes.append([1]*len(onsets))
info.append(runinfo)
return info
|
32,512 |
def main():
params = demisto.params()
args = demisto.args()
client = Client(
params.get("base_url"),
verify=params.get("Verify SSL"),
headers={"Authorization": f'Bearer {params.get("API Key")}'},
proxy=params.get("system_proxy", False)
)
commands = {
"test-module": test_module,
"triage-query-samples": query_samples,
"triage-query-search": query_search,
"triage-submit-sample": submit_sample,
"triage-get-sample": get_sample,
"triage-get-sample-summary": get_sample_summary,
"triage-delete-sample": delete_sample,
"triage-set-sample-profile": set_sample_profile,
"triage-get-static-report": get_static_report,
"triage-get-report-triage": get_report_triage,
"triage-get-kernel-monitor": get_kernel_monitor,
"triage-get-pcap": get_pcap,
"triage-get-dumped-file": get_dumped_files,
"triage-get-users": get_users,
"triage-create-user": create_user,
"triage-delete-user": delete_user,
"triage-create-api-key": create_apikey,
"triage-get-api-key": get_apikey,
"triage-delete-api-key": delete_apikey,
"triage-get-profiles": get_profile,
"triage-create-profile": create_profile,
"triage-update-profile": update_profile,
"triage-delete-profile": delete_profile,
}
command = demisto.command()
try:
if command not in commands:
raise IncorrectUsageError(
f"Command '{command}' is not available in this integration"
)
return_results(commands[command](client, **args)) # type: ignore
except Exception as e:
return_error(str(e))
|
def main():
params = demisto.params()
args = demisto.args()
client = Client(
params.get("base_url"),
verify=params.get("Verify SSL"),
headers={"Authorization": f'Bearer {params.get("API Key")}'},
proxy=params.get("proxy", False)
)
commands = {
"test-module": test_module,
"triage-query-samples": query_samples,
"triage-query-search": query_search,
"triage-submit-sample": submit_sample,
"triage-get-sample": get_sample,
"triage-get-sample-summary": get_sample_summary,
"triage-delete-sample": delete_sample,
"triage-set-sample-profile": set_sample_profile,
"triage-get-static-report": get_static_report,
"triage-get-report-triage": get_report_triage,
"triage-get-kernel-monitor": get_kernel_monitor,
"triage-get-pcap": get_pcap,
"triage-get-dumped-file": get_dumped_files,
"triage-get-users": get_users,
"triage-create-user": create_user,
"triage-delete-user": delete_user,
"triage-create-api-key": create_apikey,
"triage-get-api-key": get_apikey,
"triage-delete-api-key": delete_apikey,
"triage-get-profiles": get_profile,
"triage-create-profile": create_profile,
"triage-update-profile": update_profile,
"triage-delete-profile": delete_profile,
}
command = demisto.command()
try:
if command not in commands:
raise IncorrectUsageError(
f"Command '{command}' is not available in this integration"
)
return_results(commands[command](client, **args)) # type: ignore
except Exception as e:
return_error(str(e))
|
49,146 |
def time2str(delta):
"""Return string representing provided datetime.timedelta value in human-readable form."""
res = None
if not isinstance(delta, datetime.timedelta):
raise EasyBuildError("Incorrect value type provided to time2str, should be datetime.timedelta: %s", type(delta))
delta_secs = delta.total_seconds()
hours = int(delta_secs / 3600)
delta_secs -= hours * 3600
mins = int(delta_secs / 60)
secs = int(delta_secs - mins * 60)
res = []
if hours:
res.append('%d %s' % (hours, 'hour' if hours == 1 else 'hours'))
if mins or hours:
res.append('%d %s' % (mins, 'min' if mins == 1 else 'mins'))
res.append('%d %s' % (secs, 'sec' if secs == 1 else 'secs'))
return ' '.join(res)
|
def time2str(delta):
"""Return string representing provided datetime.timedelta value in human-readable form."""
res = None
if not isinstance(delta, datetime.timedelta):
raise EasyBuildError("Incorrect value type provided to time2str, should be datetime.timedelta: %s", type(delta))
delta_secs = delta.total_seconds()
hours, remainder = divmod(delta_secs, 3600)
mins, secs = divmod(remainder, 60)
res = []
if hours:
res.append('%d %s' % (hours, 'hour' if hours == 1 else 'hours'))
if mins or hours:
res.append('%d %s' % (mins, 'min' if mins == 1 else 'mins'))
res.append('%d %s' % (secs, 'sec' if secs == 1 else 'secs'))
return ' '.join(res)
|
42,906 |
def graph_embed_deprecated(A, max_mean_photon=1.0, make_traceless=False, rtol=1e-05, atol=1e-08):
r"""Embed a graph into a Gaussian state.
Given a graph in terms of a symmetric adjacency matrix
(in general with arbitrary complex off-diagonal and real diagonal entries),
returns the squeezing parameters and interferometer necessary for
creating the Gaussian state whose off-diagonal parts are proportional to that matrix.
Uses :func:`takagi`.
Args:
A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph
max_mean_photon (float): Threshold value. It guarantees that the mode with
the largest squeezing has ``max_mean_photon`` as the mean photon number
i.e., :math:`sinh(r_{max})^2 ==` ``max_mean_photon``.
make_traceless (bool): Removes the trace of the input matrix, by performing the transformation
:math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode
the graph.
rtol (float): relative tolerance used when checking if the input matrix is symmetric.
atol (float): absolute tolerance used when checking if the input matrix is symmetric.
Returns:
tuple[array, array]: squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = A.shape
if m != n:
raise ValueError("The matrix is not square.")
if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol):
raise ValueError("The matrix is not symmetric.")
if make_traceless:
A = A - np.trace(A) * np.identity(n) / n
s, U = takagi(A, tol=atol)
sc = np.sqrt(1.0 + 1.0 / max_mean_photon)
vals = -np.arctanh(s / (s[0] * sc))
return vals, U
|
def graph_embed_deprecated(A, max_mean_photon=1.0, make_traceless=False, rtol=1e-05, atol=1e-08):
r"""Embed a graph into a Gaussian state.
Given a graph in terms of a symmetric adjacency matrix
(in general with arbitrary complex off-diagonal and real diagonal entries),
returns the squeezing parameters and interferometer necessary for
creating the Gaussian state whose off-diagonal parts are proportional to that matrix.
Uses :func:`takagi`.
Args:
A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph
max_mean_photon (float): Threshold value. It guarantees that the mode with
the largest squeezing has ``max_mean_photon`` as the mean photon number
i.e., :math:`sinh(r_{max})^2 ==` ``max_mean_photon``.
make_traceless (bool): Removes the trace of the input matrix, by performing the transformation
:math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode
the graph.
rtol (float): relative tolerance used when checking if the input matrix is symmetric
atol (float): absolute tolerance used when checking if the input matrix is symmetric.
Returns:
tuple[array, array]: squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = A.shape
if m != n:
raise ValueError("The matrix is not square.")
if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol):
raise ValueError("The matrix is not symmetric.")
if make_traceless:
A = A - np.trace(A) * np.identity(n) / n
s, U = takagi(A, tol=atol)
sc = np.sqrt(1.0 + 1.0 / max_mean_photon)
vals = -np.arctanh(s / (s[0] * sc))
return vals, U
|
31,825 |
def get_original_alerts_command(client: Client, args: Dict) -> CommandResults:
alert_id_list = argToList(args.get('alert_id_list', []))
raw_response = client.get_original_alerts(alert_id_list)
reply = copy.deepcopy(raw_response)
alerts = reply.get('alerts', [])
for i, alert in enumerate(alerts):
# decode raw_response
try:
alert['original_alert_json'] = safe_load_json(alert.get('original_alert_json', ''))
# some of the returned JSON fields are double encoded, so it needs to be double-decoded.
# example: {"x": "someValue", "y": "{\"z\":\"anotherValue\"}"}
decode_dict_values(alert)
except Exception:
continue
# remove original_alert_json field and add its content to alert.
alert.update(
alert.pop('original_alert_json', None))
updated_alert = filter_general_fields(alert)
filter_vendor_fields(updated_alert)
alerts[i] = updated_alert
return CommandResults(
outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.OriginalAlert',
outputs_key_field='internal_id',
outputs=alerts,
raw_response=raw_response,
)
|
def get_original_alerts_command(client: Client, args: Dict) -> CommandResults:
alert_id_list = argToList(args.get('alert_ids', []))
raw_response = client.get_original_alerts(alert_id_list)
reply = copy.deepcopy(raw_response)
alerts = reply.get('alerts', [])
for i, alert in enumerate(alerts):
# decode raw_response
try:
alert['original_alert_json'] = safe_load_json(alert.get('original_alert_json', ''))
# some of the returned JSON fields are double encoded, so it needs to be double-decoded.
# example: {"x": "someValue", "y": "{\"z\":\"anotherValue\"}"}
decode_dict_values(alert)
except Exception:
continue
# remove original_alert_json field and add its content to alert.
alert.update(
alert.pop('original_alert_json', None))
updated_alert = filter_general_fields(alert)
filter_vendor_fields(updated_alert)
alerts[i] = updated_alert
return CommandResults(
outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.OriginalAlert',
outputs_key_field='internal_id',
outputs=alerts,
raw_response=raw_response,
)
|
7,012 |
def forward_lookup(platforms, job_platform):
"""
Find out which job platform to use given a list of possible platforms and
a task platform string.
Verifies selected platform is present in global.rc file and returns it,
raises error if platfrom is not in global.rc or returns 'localhost' if
no platform is initally selected.
Args:
job_platform (str):
platform item from config [runtime][TASK]platform
platforms (dictionary):
list of possible platforms defined by global.rc
Returns:
platform (str):
string representing a platform from the global config.
Example:
>>> platforms = {
... 'suite server platform': None,
... 'desktop[0-9][0-9]|laptop[0-9][0-9]': None,
... 'sugar': {
... 'remote hosts': 'localhost',
... 'batch system': 'slurm'
... },
... 'hpc': {
... 'remote hosts': ['hpc1', 'hpc2'],
... 'batch system': 'pbs'
... },
... 'hpc1-bg': {
... 'remote hosts': 'hpc1',
... 'batch system': 'background'
... },
... 'hpc2-bg': {
... 'remote hosts': 'hpc2',
... 'batch system': 'background'
... }
... }
>>> job_platform = 'desktop22'
>>> forward_lookup(platforms, job_platform)[0]
'desktop22'
"""
if job_platform is None:
return 'localhost'
for platform in reversed(list(platforms)):
if re.fullmatch(platform, job_platform):
return job_platform, platform
raise PlatformLookupError(
f"No matching platform \"{job_platform}\" found")
|
def forward_lookup(platforms, job_platform):
"""
Find out which job platform to use given a list of possible platforms and
a task platform string.
Verifies selected platform is present in global.rc file and returns it,
raises error if platfrom is not in global.rc or returns 'localhost' if
no platform is initally selected.
Args:
job_platform (str):
platform item from config [runtime][TASK]platform
platforms (dictionary):
list of possible platforms defined by global.rc
Returns:
platform (str):
string representing a platform from the global config.
Example:
>>> platforms = {
... 'suite server platform': None,
... 'desktop[0-9][0-9]|laptop[0-9][0-9]': None,
... 'sugar': {
... 'remote hosts': 'localhost',
... 'batch system': 'slurm'
... },
... 'hpc': {
... 'remote hosts': ['hpc1', 'hpc2'],
... 'batch system': 'pbs'
... },
... 'hpc1-bg': {
... 'remote hosts': 'hpc1',
... 'batch system': 'background'
... },
... 'hpc2-bg': {
... 'remote hosts': 'hpc2',
... 'batch system': 'background'
... }
... }
>>> job_platform = 'desktop22'
>>> forward_lookup(platforms, job_platform)
'desktop22'
"""
if job_platform is None:
return 'localhost'
for platform in reversed(list(platforms)):
if re.fullmatch(platform, job_platform):
return job_platform, platform
raise PlatformLookupError(
f"No matching platform \"{job_platform}\" found")
|
1,596 |
def _incremental_weighted_mean_and_var(X, sample_weight,
last_weighted_mean,
last_weighted_variance,
last_weight_sum):
"""Calculate weighted mean and variance batch update
last_weighted_mean and last_weighted_variance are statistics
computed at the last step by the function. Both must be
initialized to 0.0. In case no scaling is required
last_weighted_variance can be None. The weighted_mean is
always required and returned because necessary for the
calculation of the weighted_variance. last_weight sum is
the sum of weights encountered until now.
Derived from the paper "Incremental calculation of
weighted mean and variance",
by Tony Finch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for statistics update
sample_weight : array-like, shape (n_samples,)
last_weighted_mean : array-like, shape: (n_features,)
last_weighted_variance : array-like, shape: (n_features,)
last_weight_sum : array-like, shape (n_features,)
Returns
-------
updated_weighted_mean : array, shape (n_features,)
updated_weighted_variance : array, shape (n_features,)
If None, only weighted_mean is computed
updated_weight_sum : array, shape (n_features,)
Notes
-----
NaNs in X are ignored.
References
----------
Tony Finch
"Incremental calculation of weighted mean and variance"
University of Cambridge Computing Service, February 2009
"""
# last = stats until now
# new = the current increment
# updated = the aggregated stats
M = np.isnan(X)
sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1)))
new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel()
total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0)
X_0 = np.where(np.isnan(X), 0, X)
new_weighted_mean = \
_safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0)
new_weighted_mean *= total_weight_sum / new_weight_sum
updated_weight_sum = last_weight_sum + new_weight_sum
updated_weighted_mean = (
(last_weight_sum * last_weighted_mean +
new_weight_sum * new_weighted_mean) / updated_weight_sum)
if last_weighted_variance is None:
updated_weighted_variance = None
else:
X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2)
new_weighted_variance = \
_safe_accumulator_op(
np.average, X_0, weights=sample_weight, axis=0)
new_weighted_variance *= total_weight_sum / new_weight_sum
new_element = (
new_weight_sum *
(new_weighted_variance +
(new_weighted_mean - updated_weighted_mean) ** 2))
last_element = (
last_weight_sum *
(last_weighted_variance +
(last_weighted_mean - updated_weighted_mean) ** 2))
updated_weighted_variance = (
new_element + last_element) / updated_weight_sum
return updated_weighted_mean, updated_weighted_variance, updated_weight_sum
|
def _incremental_weighted_mean_and_var(X, sample_weight,
last_weighted_mean,
last_weighted_variance,
last_weight_sum):
"""Calculate weighted mean and variance batch update
last_weighted_mean and last_weighted_variance are statistics
computed at the last step by the function. Both must be
initialized to 0.0. In case no scaling is required
last_weighted_variance can be None. The weighted_mean is
always required and returned because necessary for the
calculation of the weighted_variance. last_weight sum is
the sum of weights encountered until now.
Derived from the paper "Incremental calculation of
weighted mean and variance",
by Tony Finch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for statistics update
sample_weight : array-like, shape (n_samples,)
last_weighted_mean : array-like, shape: (n_features,)
last_weighted_variance : array-like, shape: (n_features,)
last_weight_sum : array-like, shape (n_features,)
Returns
-------
updated_weighted_mean : array, shape (n_features,)
updated_weighted_variance : array, shape (n_features,)
If None, only weighted_mean is computed
updated_weight_sum : array, shape (n_features,)
Notes
-----
NaNs in X are ignored.
References
----------
Tony Finch
"Incremental calculation of weighted mean and variance"
University of Cambridge Computing Service, February 2009
"""
# last = stats until now
# new = the current increment
# updated = the aggregated stats
M = np.isnan(X)
sample_weight_T = np.reshape(sample_weight, (1, -1))
new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel()
total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0)
X_0 = np.where(np.isnan(X), 0, X)
new_weighted_mean = \
_safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0)
new_weighted_mean *= total_weight_sum / new_weight_sum
updated_weight_sum = last_weight_sum + new_weight_sum
updated_weighted_mean = (
(last_weight_sum * last_weighted_mean +
new_weight_sum * new_weighted_mean) / updated_weight_sum)
if last_weighted_variance is None:
updated_weighted_variance = None
else:
X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2)
new_weighted_variance = \
_safe_accumulator_op(
np.average, X_0, weights=sample_weight, axis=0)
new_weighted_variance *= total_weight_sum / new_weight_sum
new_element = (
new_weight_sum *
(new_weighted_variance +
(new_weighted_mean - updated_weighted_mean) ** 2))
last_element = (
last_weight_sum *
(last_weighted_variance +
(last_weighted_mean - updated_weighted_mean) ** 2))
updated_weighted_variance = (
new_element + last_element) / updated_weight_sum
return updated_weighted_mean, updated_weighted_variance, updated_weight_sum
|
625 |
def exact_match(hypothesis, reference):
"""
matches exact words in hypothesis and reference
and returns a word mapping based on the enumerated
word id between hypothesis and reference
:param hypothesis: pre-tokenized hypothesis
:type hypothesis: array-like(str)
:param reference: pre-tokenized reference
:type hypothesis: array-like(str)
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
:rtype: list of 2D tuples, list of 2D tuples, list of 2D tuples
"""
hypothesis_list, reference_list = _generate_enums(hypothesis, reference)
return _match_enums(hypothesis_list, reference_list)
|
def exact_match(hypothesis, reference):
"""
matches exact words in hypothesis and reference
and returns a word mapping based on the enumerated
word id between hypothesis and reference
:param hypothesis: pre-tokenized hypothesis
:type hypothesis: array-like(str)
:param reference: pre-tokenized reference
:type reference: array-like(str)
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
:rtype: list of 2D tuples, list of 2D tuples, list of 2D tuples
"""
hypothesis_list, reference_list = _generate_enums(hypothesis, reference)
return _match_enums(hypothesis_list, reference_list)
|
31,890 |
def get_tag_groups(tag_groups: list) -> list:
"""
Returns the tag groups as a list of the groups names.
Args:
tag_groups: list of all groups
Returns:
The tag groups as a list of the groups names
"""
# Tag_groups is a list of dictionaries, each contains a tag group name and its description
results = []
if len(tag_groups) > 0:
for group in tag_groups:
tag_group_name = group.get('tag_group_name', '')
if tag_group_name:
results.append(tag_group_name)
return results
|
def get_tag_groups(tag_groups: list) -> list:
"""
Returns the tag groups as a list of the groups names.
Args:
tag_groups: list of all groups
Returns:
The tag groups as a list of the groups names
"""
# tag_groups is a list of dictionaries, each contains a tag group name and its description
results = []
if len(tag_groups) > 0:
for group in tag_groups:
tag_group_name = group.get('tag_group_name', '')
if tag_group_name:
results.append(tag_group_name)
return results
|
17,772 |
def from_bounds(left, bottom, right, top, transform=None,
height=None, width=None, precision=None):
"""Get the window corresponding to the bounding coordinates.
Parameters
----------
left: float, required
Left (west) bounding coordinates
bottom: float, required
Bottom (south) bounding coordinates
right: float, required
Right (east) bounding coordinates
top: float, required
Top (north) bounding coordinates
transform: Affine, required
Affine transform matrix.
height: int, required
Number of rows of the window.
width: int, required
Number of columns of the window.
precision: int, optional
Number of decimal points of precision when computing inverse
transform.
Returns
-------
Window
A new Window.
Raises
------
WindowError
If a window can't be calculated.
"""
if not isinstance(transform, Affine): # TODO: RPCs?
raise WindowError("A transform object is required to calculate the window")
row_start, col_start = rowcol(
transform, left, top, op=float, precision=precision)
row_stop, col_stop = rowcol(
transform, right, bottom, op=float, precision=precision)
return Window.from_slices(
(row_start, row_stop), (col_start, col_stop), height=height,
width=width, boundless=True)
|
def from_bounds(left, bottom, right, top, transform=None,
height=None, width=None, precision=None):
"""Get the window corresponding to the bounding coordinates.
Parameters
----------
left: float, required
Left (west) bounding coordinates
bottom: float, required
Bottom (south) bounding coordinates
right: float, required
Right (east) bounding coordinates
top: float, required
Top (north) bounding coordinates
transform: Affine, required
Affine transform matrix.
height: int, optional
Number of rows of the window.
width: int, required
Number of columns of the window.
precision: int, optional
Number of decimal points of precision when computing inverse
transform.
Returns
-------
Window
A new Window.
Raises
------
WindowError
If a window can't be calculated.
"""
if not isinstance(transform, Affine): # TODO: RPCs?
raise WindowError("A transform object is required to calculate the window")
row_start, col_start = rowcol(
transform, left, top, op=float, precision=precision)
row_stop, col_stop = rowcol(
transform, right, bottom, op=float, precision=precision)
return Window.from_slices(
(row_start, row_stop), (col_start, col_stop), height=height,
width=width, boundless=True)
|
5,445 |
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
"""
Install the named fileset(s)/rpm package(s).
.. versionadded:: 3005
preference to install rpm packages are to use in the following order:
/opt/freeware/bin/dnf
/opt/freeware/bin/yum
/usr/bin/yum
/usr/bin/rpm
.. note:
use of rpm to install implies that rpm's dependencies must have been previously installed.
dnf and yum automatically install rpm's dependencies as part of the install process
Alogrithm to install filesets or rpms is as follows:
if ends with '.rte' or '.bff'
process as fileset
if ends with '.rpm'
process as rpm
if unrecognised or no file extension
attempt process with dnf | yum
failure implies attempt process as fileset
Fileset needs to be available as a single path and filename
compound filesets are not handled and are not supported
an example is bos.adt.insttools which is part of bos.adt.other and is installed as follows
/usr/bin/installp -acXYg /cecc/repos/aix72/TL4/BASE/installp/ppc/bos.adt.other bos.adt.insttools
name
The name of the fileset or rpm package to be installed.
refresh
Whether or not to update the yum database before executing.
pkgs
A list of filesets and/or rpm packages to install.
Must be passed as a python list. The ``name`` parameter will be
ignored if this option is passed.
version
Install a specific version of a fileset/rpm package.
(Unused at present).
test
Verify that command functions correctly.
Returns a dict containing the new fileset(s)/rpm package(s) names and versions:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True
salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff
salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte
salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.net
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install libxml2
"""
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug(f"Installing these fileset(s)/rpm package(s) '{name}': '{targets}'")
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Install the fileset (normally ends with bff or rte) or rpm package(s)
errors = []
for target in targets:
filename = os.path.basename(target)
flag_fileset = False
flag_actual_rpm = False
flag_try_rpm_failed = False
cmd = ""
out = {}
if filename.endswith(".bff") or filename.endswith(".rte"):
flag_fileset = True
log.debug(f"install identified '{filename}' as fileset")
else:
if filename.endswith(".rpm"):
flag_actual_rpm = True
log.debug(f"install identified '{filename}' as rpm")
else:
log.debug(f"install, filename '{filename}' trying install as rpm")
# assume use dnf or yum
cmdflags = "install "
libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"}
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/dnf"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/usr/bin/yum").is_file():
# check for old yum first, removed if new dnf or yum
cmdexe = "/usr/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
else:
cmdexe = "/usr/bin/rpm"
cmdflags = "-Uivh "
if test:
cmdflags += "--test"
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](cmd, python_shell=False)
log.debug(f"result of command '{cmd}', out '{out}'")
if "retcode" in out and not (0 == out["retcode"] or 100 == out["retcode"]):
if not flag_actual_rpm:
flag_try_rpm_failed = True
log.debug(
f"install tried filename '{filename}' as rpm and failed, trying as fileset"
)
else:
errors.append(out["stderr"])
log.debug(
f"install error rpm path, out '{out}', resultant errors '{errors}'"
)
if flag_fileset or flag_try_rpm_failed:
# either identified as fileset, or failed trying install as rpm, try as fileset
cmd = "/usr/sbin/installp -acYXg"
if test:
cmd += "p"
cmd += " -d "
dirpath = os.path.dirname(target)
cmd += dirpath + " " + filename
log.debug(f"install fileset command '{cmd}'")
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if 0 != out["retcode"]:
errors.append(out["stderr"])
log.debug(
f"install error fileset path, out '{out}', resultant errors '{errors}'"
)
# Get a list of the packages after the uninstall
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problems encountered installing filesets(s)/package(s)",
info={"changes": ret, "errors": errors},
)
# No error occurred
if test:
return "Test succeeded."
return ret
|
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
"""
Install the named fileset(s)/rpm package(s).
.. versionadded:: 3005
preference to install rpm packages are to use in the following order:
/opt/freeware/bin/dnf
/opt/freeware/bin/yum
/usr/bin/yum
/usr/bin/rpm
.. note:
use of rpm to install implies that rpm's dependencies must have been previously installed.
dnf and yum automatically install rpm's dependencies as part of the install process
Alogrithm to install filesets or rpms is as follows:
if ends with '.rte' or '.bff'
process as fileset
if ends with '.rpm'
process as rpm
if unrecognised or no file extension
attempt process with dnf | yum
failure implies attempt process as fileset
Fileset needs to be available as a single path and filename
compound filesets are not handled and are not supported
An example is bos.adt.insttools which is part of bos.adt.other and is installed as follows
/usr/bin/installp -acXYg /cecc/repos/aix72/TL4/BASE/installp/ppc/bos.adt.other bos.adt.insttools
name
The name of the fileset or rpm package to be installed.
refresh
Whether or not to update the yum database before executing.
pkgs
A list of filesets and/or rpm packages to install.
Must be passed as a python list. The ``name`` parameter will be
ignored if this option is passed.
version
Install a specific version of a fileset/rpm package.
(Unused at present).
test
Verify that command functions correctly.
Returns a dict containing the new fileset(s)/rpm package(s) names and versions:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True
salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff
salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte
salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.net
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install libxml2
"""
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug(f"Installing these fileset(s)/rpm package(s) '{name}': '{targets}'")
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Install the fileset (normally ends with bff or rte) or rpm package(s)
errors = []
for target in targets:
filename = os.path.basename(target)
flag_fileset = False
flag_actual_rpm = False
flag_try_rpm_failed = False
cmd = ""
out = {}
if filename.endswith(".bff") or filename.endswith(".rte"):
flag_fileset = True
log.debug(f"install identified '{filename}' as fileset")
else:
if filename.endswith(".rpm"):
flag_actual_rpm = True
log.debug(f"install identified '{filename}' as rpm")
else:
log.debug(f"install, filename '{filename}' trying install as rpm")
# assume use dnf or yum
cmdflags = "install "
libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"}
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/dnf"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/usr/bin/yum").is_file():
# check for old yum first, removed if new dnf or yum
cmdexe = "/usr/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
else:
cmdexe = "/usr/bin/rpm"
cmdflags = "-Uivh "
if test:
cmdflags += "--test"
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](cmd, python_shell=False)
log.debug(f"result of command '{cmd}', out '{out}'")
if "retcode" in out and not (0 == out["retcode"] or 100 == out["retcode"]):
if not flag_actual_rpm:
flag_try_rpm_failed = True
log.debug(
f"install tried filename '{filename}' as rpm and failed, trying as fileset"
)
else:
errors.append(out["stderr"])
log.debug(
f"install error rpm path, out '{out}', resultant errors '{errors}'"
)
if flag_fileset or flag_try_rpm_failed:
# either identified as fileset, or failed trying install as rpm, try as fileset
cmd = "/usr/sbin/installp -acYXg"
if test:
cmd += "p"
cmd += " -d "
dirpath = os.path.dirname(target)
cmd += dirpath + " " + filename
log.debug(f"install fileset command '{cmd}'")
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if 0 != out["retcode"]:
errors.append(out["stderr"])
log.debug(
f"install error fileset path, out '{out}', resultant errors '{errors}'"
)
# Get a list of the packages after the uninstall
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problems encountered installing filesets(s)/package(s)",
info={"changes": ret, "errors": errors},
)
# No error occurred
if test:
return "Test succeeded."
return ret
|
30,356 |
def parse_date_string(date_string, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Parses date_string to corresponding datetime according to format.
Examples:
>>> parse_date_string('2019-09-17T06:16:39Z')
datetime(2019, 9, 17, 6, 16, 39)
>>> parse_date_string('2019-09-17T06:16:39.22Z')
datetime(2019, 9, 17, 6, 16, 39, 220000)
>>> parse_date_string('2019-09-17T06:16:39.4040+05:00', '%Y-%m-%dT%H:%M:%S+02:00')
datetime(2019, 9, 17, 6, 16, 39, 404000)
:type date_string: ``str``
:param date_string: The date string to be parsed. (required)
:type date_format: ``str``
:param date_format: The date format of the date string, should be provided if known. (optional)
:return: The parsed datetime.
:rtype: ``(datetime.datetime, datetime.datetime)``
"""
try:
return datetime.strptime(date_string, date_format)
except ValueError as e:
error_message = str(e)
date_format = '%Y-%m-%dT%H:%M:%S'
time_data_regex = r'time data \'(.*?)\''
time_data_match = re.findall(time_data_regex, error_message)
sliced_time_data = ''
if time_data_match:
# found time date which does not match date format
# example of caught error message:
# "time data '2019-09-17T06:16:39Z' does not match format '%Y-%m-%dT%H:%M:%S.%fZ'"
time_data = time_data_match[0]
# removing YYYY-MM-DDThh:mm:ss from the time data to keep only milliseconds and time zone
sliced_time_data = time_data[19:]
else:
unconverted_data_remains_regex = r'unconverted data remains: (.*)'
unconverted_data_remains_match = re.findall(unconverted_data_remains_regex, error_message)
if unconverted_data_remains_match:
# found unconverted_data_remains
# example of caught error message:
# "unconverted data remains: 22Z"
sliced_time_data = unconverted_data_remains_match[0]
if not sliced_time_data:
# did not catch expected error
raise ValueError(e)
if '.' in sliced_time_data:
# found milliseconds - appending ".%f" to date format
date_format += '.%f'
timezone_regex = r'[Zz+-].*'
time_zone = re.findall(timezone_regex, sliced_time_data)
if time_zone:
# found timezone - appending it to the date format
date_format += time_zone[0]
return datetime.strptime(date_string, date_format)
|
def parse_date_string(date_string, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Parses date_string to corresponding datetime according to format.
Examples:
>>> parse_date_string('2019-09-17T06:16:39Z')
datetime(2019, 9, 17, 6, 16, 39)
>>> parse_date_string('2019-09-17T06:16:39.22Z')
datetime(2019, 9, 17, 6, 16, 39, 220000)
>>> parse_date_string('2019-09-17T06:16:39.4040+05:00', '%Y-%m-%dT%H:%M:%S+02:00')
datetime(2019, 9, 17, 6, 16, 39, 404000)
:type date_string: ``str``
:param date_string: The date string to be parsed. (required)
:type date_format: ``str``
:param date_format: The date format of the date string, should be provided if known. (optional)
:return: The parsed datetime.
:rtype: ``(datetime.datetime, datetime.datetime)``
"""
try:
return datetime.strptime(date_string, date_format)
except ValueError as e:
error_message = str(e)
date_format = '%Y-%m-%dT%H:%M:%S'
time_data_regex = r'time data \'(.*?)\''
time_data_match = re.findall(time_data_regex, error_message)
sliced_time_data = ''
if time_data_match:
# found time date which does not match date format
# example of caught error message:
# "time data '2019-09-17T06:16:39Z' does not match format '%Y-%m-%dT%H:%M:%S.%fZ'"
time_data = time_data_match[0]
# removing YYYY-MM-DDThh:mm:ss from the time data to keep only milliseconds and time zone
sliced_time_data = time_data[19:]
else:
unconverted_data_remains_regex = r'unconverted data remains: (.*)'
unconverted_data_remains_match = re.findall(unconverted_data_remains_regex, error_message)
if unconverted_data_remains_match:
# found unconverted_data_remains
# example of caught error message:
# "unconverted data remains: 22Z"
sliced_time_data = unconverted_data_remains_match[0]
if not sliced_time_data:
# did not catch expected error
raise
if '.' in sliced_time_data:
# found milliseconds - appending ".%f" to date format
date_format += '.%f'
timezone_regex = r'[Zz+-].*'
time_zone = re.findall(timezone_regex, sliced_time_data)
if time_zone:
# found timezone - appending it to the date format
date_format += time_zone[0]
return datetime.strptime(date_string, date_format)
|
20,137 |
def iterative_train_test_split(X, y, test_size, random_state=None):
"""Iteratively stratified train/test split
Parameters
----------
test_size : float, [0,1]
the proportion of the dataset to include in the test split, the rest will be put in the train set
random_state : int
the random state seed (optional)
Returns
-------
X_train, y_train, X_test, y_test
stratified division into train/test split
"""
stratifier = IterativeStratification(n_splits=2, order=2, sample_distribution_per_fold=[test_size, 1.0-test_size],
random_state=random_state)
train_indexes, test_indexes = next(stratifier.split(X, y))
X_train, y_train = X[train_indexes, :], y[train_indexes, :]
X_test, y_test = X[test_indexes, :], y[test_indexes, :]
return X_train, y_train, X_test, y_test
|
def iterative_train_test_split(X, y, test_size, random_state=None):
"""Iteratively stratified train/test split
Parameters
----------
test_size : float, [0,1]
the proportion of the dataset to include in the test split, the rest will be put in the train set
random_state : None | int | instance of RandomState
the random state seed (optional)
Returns
-------
X_train, y_train, X_test, y_test
stratified division into train/test split
"""
stratifier = IterativeStratification(n_splits=2, order=2, sample_distribution_per_fold=[test_size, 1.0-test_size],
random_state=random_state)
train_indexes, test_indexes = next(stratifier.split(X, y))
X_train, y_train = X[train_indexes, :], y[train_indexes, :]
X_test, y_test = X[test_indexes, :], y[test_indexes, :]
return X_train, y_train, X_test, y_test
|
23,150 |
def make_timeseries(
start="2000-01-01",
end="2000-12-31",
dtypes={"name": str, "id": int, "x": float, "y": float},
freq="10s",
partition_freq="1M",
seed=None,
**kwargs,
):
"""Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
kwargs:
Keywords to pass down to individual column creation functions.
Keywords should be prefixed by the column name and then an underscore.
Examples
--------
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head() # doctest: +SKIP
id name value
2000-01-01 00:00:00 969 Jerry -0.309014
2000-01-01 02:00:00 1010 Ray -0.760675
2000-01-01 04:00:00 1016 Patricia -0.063261
2000-01-01 06:00:00 960 Charlie 0.788245
2000-01-01 08:00:00 1031 Kevin 0.466002
"""
divisions = list(pd.date_range(start=start, end=end, freq=partition_freq))
npartitions = len(divisions) - 1
if seed is None:
# Get random integer seed for each partition. We can
# call `random_state_data` in `MakeTimeseriesPart`
state_data = np.random.randint(2e9, size=npartitions)
else:
state_data = random_state_data(npartitions, seed)
# Build parts
parts = []
for i in range(len(divisions) - 1):
parts.append((divisions[i : i + 2], state_data[i]))
# Construct the output collection with from_map
return from_map(
MakeTimeseriesPart(dtypes, freq, kwargs),
parts,
meta=make_timeseries_part("2000", "2000", dtypes, "1H", state_data[0], kwargs),
divisions=divisions,
label="make-timeseries-",
token=tokenize(start, end, dtypes, freq, partition_freq, state_data),
enforce_metadata=False,
)
|
def make_timeseries(
start="2000-01-01",
end="2000-12-31",
dtypes={"name": str, "id": int, "x": float, "y": float},
freq="10s",
partition_freq="1M",
seed=None,
**kwargs,
):
"""Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
kwargs:
Keywords to pass down to individual column creation functions.
Keywords should be prefixed by the column name and then an underscore.
Examples
--------
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head() # doctest: +SKIP
id name value
2000-01-01 00:00:00 969 Jerry -0.309014
2000-01-01 02:00:00 1010 Ray -0.760675
2000-01-01 04:00:00 1016 Patricia -0.063261
2000-01-01 06:00:00 960 Charlie 0.788245
2000-01-01 08:00:00 1031 Kevin 0.466002
"""
divisions = list(pd.date_range(start=start, end=end, freq=partition_freq))
npartitions = len(divisions) - 1
if seed is None:
# Get random integer seed for each partition. We can
# call `random_state_data` in `MakeTimeseriesPart`
state_data = np.random.randint(2e9, size=npartitions)
else:
state_data = random_state_data(npartitions, seed)
# Build parts
parts = []
for i in range(len(divisions) - 1):
parts.append((divisions[i : i + 2], state_data[i]))
# Construct the output collection with from_map
return from_map(
MakeTimeseriesPart(dtypes, freq, kwargs),
parts,
meta=make_timeseries_part("2000", "2000", dtypes, "1H", state_data[0], kwargs),
divisions=divisions,
label="make-timeseries",
token=tokenize(start, end, dtypes, freq, partition_freq, state_data),
enforce_metadata=False,
)
|
48,946 |
def guess_gasteiger_charges(atomgroup):
"""Guess Gasteiger partial charges using RDKit
Parameters
----------
atomgroup : mda.core.groups.AtomGroup
Atoms for which the charges will be guessed
Returns
-------
charges : numpy.ndarray
Array of float values representing the charge of each atom
.. versionadded:: 2.0.0
"""
mol = atomgroup.convert_to("RDKIT")
from rdkit.Chem.rdPartialCharges import ComputeGasteigerCharges
ComputeGasteigerCharges(mol, throwOnParamFailure=True)
atoms = sorted(mol.GetAtoms(),
key=lambda a: a.GetIntProp("_MDAnalysis_index"))
return np.array([atom.GetDoubleProp("_GasteigerCharge") for atom in atoms],
dtype=np.float32)
|
def guess_gasteiger_charges(atomgroup):
"""Guess Gasteiger partial charges using RDKit
Parameters
----------
atomgroup : mda.core.groups.AtomGroup
Atoms for which the charges will be guessed
Returns
-------
charges : numpy.ndarray
Array of float values representing the charge of each atom
"""
mol = atomgroup.convert_to("RDKIT")
from rdkit.Chem.rdPartialCharges import ComputeGasteigerCharges
ComputeGasteigerCharges(mol, throwOnParamFailure=True)
atoms = sorted(mol.GetAtoms(),
key=lambda a: a.GetIntProp("_MDAnalysis_index"))
return np.array([atom.GetDoubleProp("_GasteigerCharge") for atom in atoms],
dtype=np.float32)
|
12,215 |
def find_tempfiles(paths: Iterable[str]) -> Tuple[str]:
tempfiles = []
for path in sorted(set(paths or [sys.prefix])):
# tempfiles are files in path
for root, _, files in walk(path):
for file in files:
# tempfiles also end in .c~ or .trash
if not file.endswith(CONDA_TEMP_EXTENSIONS):
continue
tempfiles.append(join(root, file))
return tempfiles
|
def find_tempfiles(paths: Iterable[str]) -> List[str]:
tempfiles = []
for path in sorted(set(paths or [sys.prefix])):
# tempfiles are files in path
for root, _, files in walk(path):
for file in files:
# tempfiles also end in .c~ or .trash
if not file.endswith(CONDA_TEMP_EXTENSIONS):
continue
tempfiles.append(join(root, file))
return tempfiles
|
9,092 |
def createStringObject(
string: Union[str, bytes],
forceEncoding: Union[None,str,list] = None
) -> Union[TextStringObject, ByteStringObject]:
"""
Given a string, create a ByteStringObject or a TextStringObject to
represent the string.
:param string: A string
:raises TypeError: If string is not of type str or bytes.
"""
if isinstance(forceEncoding,list):
out = ""
for x in string:
try:
out += forceEncoding[x]
except:
out += x
return x
elif isinstance(forceEncoding,str):
return TextStringObject(string.decode(forceEncoding))
elif isinstance(string, str):
return TextStringObject(string)
elif isinstance(string, bytes_type):
try:
if string.startswith(codecs.BOM_UTF16_BE):
retval = TextStringObject(string.decode("utf-16"))
retval.autodetect_utf16 = True
return retval
else:
# This is probably a big performance hit here, but we need to
# convert string objects into the text/unicode-aware version if
# possible... and the only way to check if that's possible is
# to try. Some strings are strings, some are just byte arrays.
retval = TextStringObject(decode_pdfdocencoding(string))
retval.autodetect_pdfdocencoding = True
return retval
except UnicodeDecodeError:
return ByteStringObject(string)
else:
raise TypeError("createStringObject should have str or unicode arg")
|
def createStringObject(
string: Union[str, bytes],
forceEncoding: Union[None,str,list] = None
) -> Union[TextStringObject, ByteStringObject]:
"""
Given a string, create a ByteStringObject or a TextStringObject to
represent the string.
:param string: A string
:raises TypeError: If string is not of type str or bytes.
"""
if isinstance(forceEncoding,list):
out = ""
for x in string:
try:
out += forceEncoding[x]
except Exception:
out += x
return x
elif isinstance(forceEncoding,str):
return TextStringObject(string.decode(forceEncoding))
elif isinstance(string, str):
return TextStringObject(string)
elif isinstance(string, bytes_type):
try:
if string.startswith(codecs.BOM_UTF16_BE):
retval = TextStringObject(string.decode("utf-16"))
retval.autodetect_utf16 = True
return retval
else:
# This is probably a big performance hit here, but we need to
# convert string objects into the text/unicode-aware version if
# possible... and the only way to check if that's possible is
# to try. Some strings are strings, some are just byte arrays.
retval = TextStringObject(decode_pdfdocencoding(string))
retval.autodetect_pdfdocencoding = True
return retval
except UnicodeDecodeError:
return ByteStringObject(string)
else:
raise TypeError("createStringObject should have str or unicode arg")
|
47,912 |
def main():
args = parse_args()
omz_dir = (Path(__file__).parent / '../..').resolve()
demos_dir = omz_dir / 'demos'
auto_tools_dir = omz_dir / 'tools/downloader'
model_info_list = json.loads(subprocess.check_output(
[sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'), '--all'],
universal_newlines=True))
model_info = {model['name']: model for model in model_info_list}
if args.demos is not None:
names_of_demos_to_test = set(args.demos.split(','))
demos_to_test = [demo for demo in DEMOS if demo.full_name in names_of_demos_to_test]
else:
demos_to_test = DEMOS
with temp_dir_as_path() as global_temp_dir:
dl_dir = prepare_models(auto_tools_dir, args.downloader_cache_dir, args.mo, global_temp_dir, demos_to_test)
num_failures = 0
os.putenv('PYTHONPATH', "{}:{}/lib".format(os.environ['PYTHONPATH'], args.demo_build_dir))
fps_finder = re.compile(r'(?<=FPS:\s|fps:\s)[0-9]+\.?[0-9]*(?=\s)|(?<=\s)[0-9]+\.?[0-9]*(?= ?FPS| ?fps)')
for demo in demos_to_test:
print('Testing {}...'.format(demo.full_name))
print()
declared_model_names = {model['name']
for model in json.loads(subprocess.check_output(
[sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'),
'--list', str(demo.models_lst_path(demos_dir))],
universal_newlines=True))}
with temp_dir_as_path() as temp_dir:
arg_context = ArgContext(
source_dir=demos_dir / demo.subdirectory,
dl_dir=dl_dir,
data_sequence_dir=temp_dir / 'data_seq',
data_sequences=DATA_SEQUENCES,
model_info=model_info,
test_data_dir=args.test_data_dir,
)
def resolve_arg(arg):
if isinstance(arg, str): return arg
return arg.resolve(arg_context)
def option_to_args(key, value):
if value is None: return [key]
if isinstance(value, list): return [key, *map(resolve_arg, value)]
return [key, resolve_arg(value)]
fixed_args = demo.fixed_args(demos_dir, args.demo_build_dir)
print('Fixed arguments:', ' '.join(map(shlex.quote, fixed_args)))
print()
device_args = demo.device_args(args.devices.split())
for test_case_index, test_case in enumerate(demo.test_cases):
case_args = [demo_arg
for key, value in sorted(test_case.options.items())
for demo_arg in option_to_args(key, value)]
case_model_names = {arg.name for arg in test_case.options.values() if isinstance(arg, ModelArg)}
undeclared_case_model_names = case_model_names - declared_model_names
if undeclared_case_model_names:
print("Test case #{}: models not listed in demo's models.lst: {}".format(
test_case_index, ' '.join(sorted(undeclared_case_model_names))))
print()
num_failures += 1
continue
for device, dev_arg in device_args.items():
print('Test case #{}/{}:'.format(test_case_index, device),
' '.join(shlex.quote(str(arg)) for arg in dev_arg + case_args))
print(flush=True)
try:
start_time = timeit.default_timer()
demo_output = subprocess.check_output(fixed_args + dev_arg + case_args,
stderr=subprocess.STDOUT, universal_newlines=True)
execution_time = timeit.default_timer() - start_time
match_fps = fps_finder.search(demo_output)
if match_fps is not None:
fps = match_fps.group(0)
else:
fps = 'N/A'
except subprocess.CalledProcessError as e:
print(e.output)
print('Exit code:', e.returncode)
num_failures += 1
execution_time = -1
fps = -1
if args.report_file:
collect_result(demo.full_name, device, case_model_names, execution_time, fps, args.report_file)
print()
print("Failures: {}".format(num_failures))
sys.exit(0 if num_failures == 0 else 1)
|
def main():
args = parse_args()
omz_dir = (Path(__file__).parent / '../..').resolve()
demos_dir = omz_dir / 'demos'
auto_tools_dir = omz_dir / 'tools/downloader'
model_info_list = json.loads(subprocess.check_output(
[sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'), '--all'],
universal_newlines=True))
model_info = {model['name']: model for model in model_info_list}
if args.demos is not None:
names_of_demos_to_test = set(args.demos.split(','))
demos_to_test = [demo for demo in DEMOS if demo.full_name in names_of_demos_to_test]
else:
demos_to_test = DEMOS
with temp_dir_as_path() as global_temp_dir:
dl_dir = prepare_models(auto_tools_dir, args.downloader_cache_dir, args.mo, global_temp_dir, demos_to_test)
num_failures = 0
os.putenv('PYTHONPATH', "{}:{}/lib".format(os.environ['PYTHONPATH'], args.demo_build_dir))
fps_finder = re.compile(r'(?<=FPS:\s|fps:\s)[0-9]+\.?[0-9]*(?=\s)|(?<=\s)[0-9]+\.?[0-9]*(?= ?FPS| ?fps)')
for demo in demos_to_test:
print('Testing {}...'.format(demo.full_name))
print()
declared_model_names = {model['name']
for model in json.loads(subprocess.check_output(
[sys.executable, '--', str(auto_tools_dir / 'info_dumper.py'),
'--list', str(demo.models_lst_path(demos_dir))],
universal_newlines=True))}
with temp_dir_as_path() as temp_dir:
arg_context = ArgContext(
source_dir=demos_dir / demo.subdirectory,
dl_dir=dl_dir,
data_sequence_dir=temp_dir / 'data_seq',
data_sequences=DATA_SEQUENCES,
model_info=model_info,
test_data_dir=args.test_data_dir,
)
def resolve_arg(arg):
if isinstance(arg, str): return arg
return arg.resolve(arg_context)
def option_to_args(key, value):
if value is None: return [key]
if isinstance(value, list): return [key, *map(resolve_arg, value)]
return [key, resolve_arg(value)]
fixed_args = demo.fixed_args(demos_dir, args.demo_build_dir)
print('Fixed arguments:', ' '.join(map(shlex.quote, fixed_args)))
print()
device_args = demo.device_args(args.devices.split())
for test_case_index, test_case in enumerate(demo.test_cases):
case_args = [demo_arg
for key, value in sorted(test_case.options.items())
for demo_arg in option_to_args(key, value)]
case_model_names = {arg.name for arg in test_case.options.values() if isinstance(arg, ModelArg)}
undeclared_case_model_names = case_model_names - declared_model_names
if undeclared_case_model_names:
print("Test case #{}: models not listed in demo's models.lst: {}".format(
test_case_index, ' '.join(sorted(undeclared_case_model_names))))
print()
num_failures += 1
continue
for device, dev_arg in device_args.items():
print('Test case #{}/{}:'.format(test_case_index, device),
' '.join(shlex.quote(str(arg)) for arg in dev_arg + case_args))
print(flush=True)
try:
start_time = timeit.default_timer()
demo_output = subprocess.check_output(fixed_args + dev_arg + case_args,
stderr=subprocess.STDOUT, universal_newlines=True)
execution_time = timeit.default_timer() - start_time
match_fps = fps_finder.search(demo_output)
fps = match_fps.group(0) if match_fps else 'N/A'
except subprocess.CalledProcessError as e:
print(e.output)
print('Exit code:', e.returncode)
num_failures += 1
execution_time = -1
fps = -1
if args.report_file:
collect_result(demo.full_name, device, case_model_names, execution_time, fps, args.report_file)
print()
print("Failures: {}".format(num_failures))
sys.exit(0 if num_failures == 0 else 1)
|
28,030 |
def validate_proxy_format(transport):
"""
It will check the proxy settings once and validate the proxy settings.
If the proxy settings are invalid, it will print an error message and
stop the program.
"""
global PROXY_VALIDATION_NEEDED
if PROXY_VALIDATION_NEEDED:
if not proxy_settings_are_valid(transport):
LOG.error("Invalid proxy format! Check your "
"HTTP_PROXY/HTTPS_PROXY environment variables if "
"these are in the right format:"
"'http[s]://host:port'.")
sys.exit(1)
PROXY_VALIDATION_NEEDED = False
|
def validate_proxy_format(transport):
"""
It will check the proxy settings once and validate the proxy settings.
If the proxy settings are invalid, it will print an error message and
stop the program.
"""
global PROXY_VALIDATION_NEEDED
if PROXY_VALIDATION_NEEDED:
if not proxy_settings_are_valid(transport):
LOG.error("Invalid proxy format! Check your "
"HTTP_PROXY/HTTPS_PROXY environment variables if "
"these are in the right format: "
"'http[s]://host:port'.")
sys.exit(1)
PROXY_VALIDATION_NEEDED = False
|
49,109 |
def test_issue_5654():
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/(4*a**3)
assert residue( (1)/s*1/(z-exp(s)), s, -0) == 1/(z - 1)
assert residue((1 + k)/s*1/(z-exp(s)), s, -0) == k/(z - 1) + 1/(z - 1)
|
def test_issue_5654():
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/(4*a**3)
assert residue(1/s*1/(z - exp(s)), s, 0) == 1/(z - 1)
assert residue((1 + k)/s*1/(z - exp(s)), s, 0) == k/(z - 1) + 1/(z - 1)
|
17,501 |
def get_example_data(case: int) -> xr.DataArray:
x = np.linspace(0, 1, 100)
y = np.linspace(0, 0.1, 30)
data = xr.DataArray(
np.sin(x[:, np.newaxis]) * np.cos(y),
dims=["x", "y"],
coords={"x": x, "y": y, "x2": ("x", x**2)},
)
if case == 0:
# 2D
return data
elif case == 1:
# 2D chunked single
return data.chunk({"y": 3})
elif case == 2:
# 2D chunged both
return data.chunk({"x": 25, "y": 3})
elif case == 3:
# 3D
x = np.linspace(0, 1, 100)
y = np.linspace(0, 0.1, 30)
z = np.linspace(0.1, 0.2, 10)
return xr.DataArray(
np.sin(x[:, np.newaxis, np.newaxis]) * np.cos(y[:, np.newaxis]) * z,
dims=["x", "y", "z"],
coords={"x": x, "y": y, "x2": ("x", x**2), "z": z},
)
elif case == 4:
# 3D chunked
return get_example_data(3).chunk({"z": 5})
else:
raise ValueError("case must be 1-4")
|
def get_example_data(case: int) -> xr.DataArray:
x = np.linspace(0, 1, 100)
y = np.linspace(0, 0.1, 30)
data = xr.DataArray(
np.sin(x[:, np.newaxis]) * np.cos(y),
dims=["x", "y"],
coords={"x": x, "y": y, "x2": ("x", x**2)},
)
if case == 0:
# 2D
return data
elif case == 1:
# 2D chunked single
return data.chunk({"y": 3})
elif case == 2:
# 2D chunked both
return data.chunk({"x": 25, "y": 3})
elif case == 3:
# 3D
x = np.linspace(0, 1, 100)
y = np.linspace(0, 0.1, 30)
z = np.linspace(0.1, 0.2, 10)
return xr.DataArray(
np.sin(x[:, np.newaxis, np.newaxis]) * np.cos(y[:, np.newaxis]) * z,
dims=["x", "y", "z"],
coords={"x": x, "y": y, "x2": ("x", x**2), "z": z},
)
elif case == 4:
# 3D chunked
return get_example_data(3).chunk({"z": 5})
else:
raise ValueError("case must be 1-4")
|
9,045 |
def rate(
user: int = 0,
channel: int = 0,
server: int = 0,
*, message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited.
:param user: seconds between permitted calls of this function by the same
user
:param channel: seconds between permitted calls of this function in the
same channel, regardless of triggering user
:param server: seconds between permitted calls of this function no matter
who triggered it or where
:param message: optional keyword argument; default message send as notice
when a rate limit is reached
How often a function can be triggered on a per-user basis, in a channel,
or across the server (bot) can be controlled with this decorator. A value
of ``0`` means no limit. If a function is given a rate of 20, that
function may only be used once every 20 seconds in the scope corresponding
to the parameter::
from sopel import plugin
@plugin.rate(10)
# won't trigger if used more than once per 10s by a user
@plugin.rate(10, 10)
# won't trigger if used more than once per 10s by a user/channel
@plugin.rate(10, 10, 2)
# won't trigger if used more than once per 10s by a user/channel
# and never more than once every 2s
If a ``message`` is provided, it will be used as the default message sent
as a ``NOTICE`` to the user who hit the rate limit::
@rate(10, 10, 10, message='Hit the rate limit for this function.')
# will send a NOTICE
Rate-limited functions that use scheduled future commands should import
:class:`threading.Timer` instead of :mod:`sched`, or rate limiting will
not work properly.
.. versionchanged:: 8.0
Optional keyword argument ``message`` was added in Sopel 8.
.. note::
Users on the admin list in Sopel’s configuration are exempted from rate
limits.
.. seealso::
You can control each rate limit separatly, with their own custom
message using :func:`rate_user`, :func:`rate_channel`, or
:func:`rate_server`.
"""
def add_attribute(function):
if not hasattr(function, 'rate'):
function.rate = user
if not hasattr(function, 'channel_rate'):
function.channel_rate = channel
if not hasattr(function, 'global_rate'):
function.global_rate = server
function.default_rate_message = message
return function
return add_attribute
|
def rate(
user: int = 0,
channel: int = 0,
server: int = 0,
*, message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited.
:param user: seconds between permitted calls of this function by the same
user
:param channel: seconds between permitted calls of this function in the
same channel, regardless of triggering user
:param server: seconds between permitted calls of this function no matter
who triggered it or where
:param message: optional keyword argument; default message send as notice
when a rate limit is reached
How often a function can be triggered on a per-user basis, in a channel,
or across the server (bot) can be controlled with this decorator. A value
of ``0`` means no limit. If a function is given a rate of 20, that
function may only be used once every 20 seconds in the scope corresponding
to the parameter::
from sopel import plugin
@plugin.rate(10)
# won't trigger if used more than once per 10s by a user
@plugin.rate(10, 10)
# won't trigger if used more than once per 10s by a user/channel
@plugin.rate(10, 10, 2)
# won't trigger if used more than once per 10s by a user/channel
# and never more than once every 2s
If a ``message`` is provided, it will be used as the default message sent
as a ``NOTICE`` to the user who hit the rate limit::
@rate(10, 10, 10, message='Hit the rate limit for this function.')
# will send a NOTICE
Rate-limited functions that use scheduled future commands should import
:class:`threading.Timer` instead of :mod:`sched`, or rate limiting will
not work properly.
.. versionchanged:: 8.0
Optional keyword argument ``message`` was added in Sopel 8.
.. note::
Users on the admin list in Sopel’s configuration are exempted from rate
limits.
.. seealso::
You can control each rate limit separately, with their own custom
message using :func:`rate_user`, :func:`rate_channel`, or
:func:`rate_server`.
"""
def add_attribute(function):
if not hasattr(function, 'rate'):
function.rate = user
if not hasattr(function, 'channel_rate'):
function.channel_rate = channel
if not hasattr(function, 'global_rate'):
function.global_rate = server
function.default_rate_message = message
return function
return add_attribute
|
19,871 |
def items_for_result(view, result, request):
"""
Generates the actual list of data.
"""
modeladmin = view.model_admin
for field_name in view.list_display:
empty_value_display = modeladmin.get_empty_value_display(field_name)
row_classes = ["field-%s" % field_name, "title"]
try:
f, attr, value = lookup_field(field_name, result, modeladmin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(
attr, "empty_value_display", empty_value_display
)
if f is None or f.auto_created:
allow_tags = getattr(attr, "allow_tags", False)
boolean = getattr(attr, "boolean", False)
if boolean or not value:
allow_tags = True
result_repr = display_for_value(value, empty_value_display, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append("nowrap")
else:
if isinstance(f, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(
f, (models.DateField, models.TimeField, models.ForeignKey)
):
row_classes.append("nowrap")
if force_str(result_repr) == "":
result_repr = mark_safe(" ")
row_classes.extend(
modeladmin.get_extra_class_names_for_field_col(result, field_name)
)
row_attrs = modeladmin.get_extra_attrs_for_field_col(result, field_name)
row_attrs["class"] = " ".join(row_classes)
row_attrs_flat = flatatt(row_attrs)
primary_button = None
if field_name == modeladmin.get_list_display_add_buttons(request):
primary_button = view.button_helper.get_primary_button_for_obj(result)
if primary_button is not None and primary_button.get("url"):
yield format_html(
'<td{}><div class="title-wrapper"><a href="{}" title="{}">{}</a></div></td>',
row_attrs_flat,
primary_button.get("url"),
primary_button.get("title", ""),
result_repr,
)
else:
yield format_html("<td{}>{}</td>", row_attrs_flat, result_repr)
|
def items_for_result(view, result, request):
"""
Generates the actual list of data.
"""
modeladmin = view.model_admin
for field_name in view.list_display:
empty_value_display = modeladmin.get_empty_value_display(field_name)
row_classes = ["field-%s" % field_name, "title"]
try:
f, attr, value = lookup_field(field_name, result, modeladmin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(
attr, "empty_value_display", empty_value_display
)
if f is None or f.auto_created:
allow_tags = getattr(attr, "allow_tags", False)
boolean = getattr(attr, "boolean", False)
if boolean or not value:
allow_tags = True
result_repr = display_for_value(value, empty_value_display, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append("nowrap")
else:
if isinstance(f, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(
f, (models.DateField, models.TimeField, models.ForeignKey)
):
row_classes.append("nowrap")
if force_str(result_repr) == "":
result_repr = mark_safe(" ")
row_classes.extend(
modeladmin.get_extra_class_names_for_field_col(result, field_name)
)
row_attrs = modeladmin.get_extra_attrs_for_field_col(result, field_name)
row_attrs["class"] = " ".join(row_classes)
row_attrs_flat = flatatt(row_attrs)
primary_button = None
if field_name == modeladmin.get_list_display_add_buttons(request):
primary_button = view.button_helper.get_primary_button_for_obj(result)
if primary_button is not None and primary_button.get("url"):
yield format_html(
'<td{}><div class="title-wrapper"><a href="{}" title="{}">{}</a></div></td>',
row_attrs_flat,
primary_button["url"],
primary_button.get("title", ""),
result_repr,
)
else:
yield format_html("<td{}>{}</td>", row_attrs_flat, result_repr)
|
6,789 |
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""
Deletes a doc(dt, dn) and validates if it is not submitted and not linked in a live record
"""
if not ignore_doctypes: ignore_doctypes = []
# get from form
if not doctype:
doctype = frappe.form_dict.get('dt')
name = frappe.form_dict.get('dn')
names = name
if isinstance(name, string_types) or isinstance(name, integer_types):
names = [name]
for name in names or []:
# already deleted..?
if not frappe.db.exists(doctype, name):
if not ignore_missing:
raise frappe.DoesNotExistError
else:
return False
# delete passwords
delete_all_passwords_for(doctype, name)
doc = None
if doctype=="DocType":
if for_reload:
try:
doc = frappe.get_doc(doctype, name)
except frappe.DoesNotExistError:
pass
else:
doc.run_method("before_reload")
else:
doc = frappe.get_doc(doctype, name)
update_flags(doc, flags, ignore_permissions)
check_permission_and_not_submitted(doc)
frappe.db.sql("delete from `tabCustom Field` where dt = %s", name)
frappe.db.sql("delete from `tabCustom Script` where dt = %s", name)
frappe.db.sql("delete from `tabProperty Setter` where doc_type = %s", name)
frappe.db.sql("delete from `tabReport` where ref_doctype=%s", name)
frappe.db.sql("delete from `tabCustom DocPerm` where parent=%s", name)
frappe.db.sql("delete from `__global_search` where doctype=%s", name)
delete_from_table(doctype, name, ignore_doctypes, None)
if not (for_reload or frappe.flags.in_migrate or frappe.flags.in_install or frappe.flags.in_test):
try:
delete_controllers(name, doc.module)
except (FileNotFoundError, OSError):
# in case a doctype doesnt have any controller code
pass
else:
doc = frappe.get_doc(doctype, name)
if not for_reload:
update_flags(doc, flags, ignore_permissions)
check_permission_and_not_submitted(doc)
if not ignore_on_trash:
doc.run_method("on_trash")
doc.flags.in_delete = True
doc.run_method('on_change')
# check if links exist
if not force:
check_if_doc_is_linked(doc)
check_if_doc_is_dynamically_linked(doc)
update_naming_series(doc)
delete_from_table(doctype, name, ignore_doctypes, doc)
doc.run_method("after_delete")
# delete attachments
remove_all(doctype, name, from_delete=True)
if not for_reload:
# Enqueued at the end, because it gets committed
# All the linked docs should be checked beforehand
frappe.enqueue('frappe.model.delete_doc.delete_dynamic_links',
doctype=doc.doctype, name=doc.name,
is_async=False if frappe.flags.in_test else True)
# delete global search entry
delete_for_document(doc)
# delete tags from __global_tags
delete_tags_for_document(doc)
if doc and not for_reload:
add_to_deleted_document(doc)
if not frappe.flags.in_patch:
try:
doc.notify_update()
insert_feed(doc)
except ImportError:
pass
# delete user_permissions
frappe.defaults.clear_default(parenttype="User Permission", key=doctype, value=name)
|
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""
Deletes a doc(dt, dn) and validates if it is not submitted and not linked in a live record
"""
if not ignore_doctypes: ignore_doctypes = []
# get from form
if not doctype:
doctype = frappe.form_dict.get('dt')
name = frappe.form_dict.get('dn')
names = name
if isinstance(name, string_types) or isinstance(name, integer_types):
names = [name]
for name in names or []:
# already deleted..?
if not frappe.db.exists(doctype, name):
if not ignore_missing:
raise frappe.DoesNotExistError
else:
return False
# delete passwords
delete_all_passwords_for(doctype, name)
doc = None
if doctype=="DocType":
if for_reload:
try:
doc = frappe.get_doc(doctype, name)
except frappe.DoesNotExistError:
pass
else:
doc.run_method("before_reload")
else:
doc = frappe.get_doc(doctype, name)
update_flags(doc, flags, ignore_permissions)
check_permission_and_not_submitted(doc)
frappe.db.sql("delete from `tabCustom Field` where dt = %s", name)
frappe.db.sql("delete from `tabCustom Script` where dt = %s", name)
frappe.db.sql("delete from `tabProperty Setter` where doc_type = %s", name)
frappe.db.sql("delete from `tabReport` where ref_doctype=%s", name)
frappe.db.sql("delete from `tabCustom DocPerm` where parent=%s", name)
frappe.db.sql("delete from `__global_search` where doctype=%s", name)
delete_from_table(doctype, name, ignore_doctypes, None)
if not (for_reload or frappe.flags.in_migrate or frappe.flags.in_install or frappe.flags.in_test):
try:
delete_controllers(name, doc.module)
except (FileNotFoundError, OSError):
# in case a doctype doesnt have any controller code
pass
else:
doc = frappe.get_doc(doctype, name)
if not for_reload:
update_flags(doc, flags, ignore_permissions)
check_permission_and_not_submitted(doc)
if not ignore_on_trash:
doc.run_method("on_trash")
doc.flags.in_delete = True
doc.run_method('on_change')
# check if links exist
if not force:
check_if_doc_is_linked(doc)
check_if_doc_is_dynamically_linked(doc)
update_naming_series(doc)
delete_from_table(doctype, name, ignore_doctypes, doc)
doc.run_method("after_delete")
# delete attachments
remove_all(doctype, name, from_delete=True)
if not for_reload:
# Enqueued at the end, because it gets committed
# All the linked docs should be checked beforehand
frappe.enqueue('frappe.model.delete_doc.delete_dynamic_links',
doctype=doc.doctype, name=doc.name,
is_async=False if frappe.flags.in_test else True)
# delete global search entry
delete_for_document(doc)
# delete tags from Tag Link
delete_tags_for_document(doc)
if doc and not for_reload:
add_to_deleted_document(doc)
if not frappe.flags.in_patch:
try:
doc.notify_update()
insert_feed(doc)
except ImportError:
pass
# delete user_permissions
frappe.defaults.clear_default(parenttype="User Permission", key=doctype, value=name)
|
41,721 |
def objective(trial):
data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.25)
dtrain = lgb.Dataset(train_x, label=train_y)
param = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbosity': -1,
'boosting_type': 'gbdt',
'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0),
'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0),
'num_leaves': trial.suggest_int('num_leaves', 2, 256),
'feature_fraction': min(trial.suggest_uniform('feature_fraction', 0.4, 1.0 + EPS), 1.0),
'bagging_fraction': min(trial.suggest_uniform('bagging_fraction', 0.4, 1.0 + EPS), 1.0),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
'min_child_samples': int(trial.suggest_uniform('min_child_samples', 5, 100 + EPS)),
}
gbm = lgb.train(param, dtrain)
preds = gbm.predict(test_x)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(test_y, pred_labels)
return accuracy
|
def objective(trial):
data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.25)
dtrain = lgb.Dataset(train_x, label=train_y)
param = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbosity': -1,
'boosting_type': 'gbdt',
'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0),
'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0),
'num_leaves': trial.suggest_int('num_leaves', 2, 256),
'feature_fraction': min(trial.suggest_uniform('feature_fraction', 0.4, 1.0 + EPS), 1.0),
'bagging_fraction': min(trial.suggest_uniform('bagging_fraction', 0.4, 1.0 + EPS), 1.0),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
}
gbm = lgb.train(param, dtrain)
preds = gbm.predict(test_x)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(test_y, pred_labels)
return accuracy
|
43,125 |
def get_model_name(queue, task_id: str):
dependency_task = queue.task(task_id)
# Check the route to detect training tasks
for route in dependency_task["routes"]:
if fnmatch(route, QUEUE_ROUTE_PATTERN):
model_name = route.split(".")[4] # model_name = "train_component"
return model_name[6:]
# Show a warning if no matching route was found, this can happens when the
# current task has a dependency to a non-training task or if the route
# pattern changes.
LOGGER.warning(f"No matching route found for task id {task_id}")
|
def get_model_name(queue, task_id: str):
dependency_task = queue.task(task_id)
# Check the route to detect training tasks
for route in dependency_task["routes"]:
if fnmatch(route, QUEUE_ROUTE_PATTERN):
model_name = route.split(".")[4] # model_name = "train_component"
return model_name[6:]
# Show a warning if no matching route was found, this can happen when the
# current task has a dependency to a non-training task or if the route
# pattern changes.
LOGGER.warning(f"No matching route found for task id {task_id}")
|
41,999 |
def _create_zmatrix_from_zmap(zmap: Dict[complex, Union[int, float]], shape: int) -> np.ndarray:
# converts hashmap of coordinates to grid
zmatrix = np.zeros(shape=(shape, shape))
for coord, value in zmap.items():
zmatrix[int(coord.imag), int(coord.real)] += value
return zmatrix
|
def _create_zmatrix_from_zmap(zmap: Dict[complex, float], shape: int) -> np.ndarray:
# converts hashmap of coordinates to grid
zmatrix = np.zeros(shape=(shape, shape))
for coord, value in zmap.items():
zmatrix[int(coord.imag), int(coord.real)] += value
return zmatrix
|
3,304 |
def nice_int(x):
"""
Round up to the nearest "nice" number.
"""
if x == 0:
return 0
exp = int(math.log10(x))
if x < 10:
rounded = 10 ** exp
steps = [1, 2, 5, 10]
elif x < 100:
rounded = 10 ** (exp - 1)
steps = [10, 20, 25, 50, 100]
else:
rounded = 10 ** (exp - 2)
steps = [100, 120, 200, 250, 500, 750, 1000]
nice_frac = steps[-1]
frac = x / rounded
for step in steps:
if frac <= step:
nice_frac = step
break
return nice_frac * rounded
|
def nice_int(x):
"""
Round up to the nearest "nice" number.
"""
if x < 0:
return 0
exp = int(math.log10(x))
if x < 10:
rounded = 10 ** exp
steps = [1, 2, 5, 10]
elif x < 100:
rounded = 10 ** (exp - 1)
steps = [10, 20, 25, 50, 100]
else:
rounded = 10 ** (exp - 2)
steps = [100, 120, 200, 250, 500, 750, 1000]
nice_frac = steps[-1]
frac = x / rounded
for step in steps:
if frac <= step:
nice_frac = step
break
return nice_frac * rounded
|
40,795 |
def _torch_median_kthval(output: torch.Tensor) -> float:
output = output.view(-1)
len_ = len(output)
if len_ % 2 == 0:
return float((torch.kthvalue(output, len_ // 2)[0] + torch.kthvalue(output, len_ // 2 + 1)[0]) / 2)
else:
return float(torch.kthvalue(output, len_ // 2 + 1)[0])
|
def _torch_median(output: torch.Tensor) -> float:
output = output.view(-1)
len_ = len(output)
if len_ % 2 == 0:
return float((torch.kthvalue(output, len_ // 2)[0] + torch.kthvalue(output, len_ // 2 + 1)[0]) / 2)
else:
return float(torch.kthvalue(output, len_ // 2 + 1)[0])
|
786 |
def combine_bundles(bundle1, bundle2, comb_method='rlap', distance='mdf',
n_stream=2000):
"""Combine two bundles.
Combines two bundles into a single one by using different methods to match,
average and pick the streamlines from the two bundles. Bundles need to be
already in the same space and streamlines must have the same number of
points.
Parameters
----------
bundle1 : list
Streamline coordinates as a list of 2D ndarrays of shape[-1]==3
bundle2 : list
Streamline coordinates as a list of 2D ndarrays of shape[-1]==3
comb_method : str, optional
Method to be used to combine the two bundles. Default is 'rlap_keep'.
distance : str, optional
Distance used for streamline matching. Used by all methods except for
'random_pick'. Default is 'mdf'. The 'mdf_se' distance uses only the
start and end points.
n_stream : int, optional
Number of streamlines to be selected when comb_method='random_pick'.
Default is 2000.
Returns
-------
combined : list
Streamline coordinates of the combined bundle as a list of 2D ndarrays
of shape[-1]==3.
"""
# If random_pick just merge all streamlines and pick n_stream randomly
if comb_method == 'random_pick':
bundles = np.concatenate((bundle1, bundle2))
return select_random_set_of_streamlines(bundles, n_stream)
def distance_matrix_mdf_start_end(bundle_1, bundle_2):
bundle_1 = set_number_of_points(bundle_1, 2)
bundle_2 = set_number_of_points(bundle_2, 2)
return distance_matrix_mdf(bundle_1, bundle_2)
if distance == 'mdf':
distance = distance_matrix_mdf
elif distance == 'mdf_se':
distance = distance_matrix_mdf_start_end
else:
raise ValueError("Incorrect distance metric")
# Set as bundle 1 the bundle with less streamlines
if len(bundle2) < len(bundle1):
aux = bundle1.copy()
bundle1 = bundle2
bundle2 = aux
# Compute distance matrix
cost = distance(bundle1, bundle2)
combined = []
if comb_method == 'rlap':
# Minimize the sum of distances (RLAP)
matched_pairs = np.asarray(linear_sum_assignment(cost)).T
# For each matched pair, reorient and average
for ind1, ind2 in matched_pairs:
stream1 = bundle1[ind1]
stream2 = bundle2[ind2]
stream2 = orient_by_streamline([stream2], stream1)
stream2, _ = unlist_streamlines(stream2)
stream_mean = np.mean([stream1, stream2], axis=0)
combined.append(stream_mean)
elif comb_method == 'rlap_closest':
n_stream = len(bundle2)
# Solve the linear assignment problem
ind_lap1, ind_lap2 = linear_sum_assignment(cost)
for ind2 in range(n_stream):
# Check if streamline already matched by RLAP
aux = np.argwhere(ind_lap2 == ind2)
if aux.size > 0:
ind1 = ind_lap1[aux[0][0]]
# If not, find the closest streamline
else:
ind1 = np.argmin(cost[:, ind2])
# Get matched streamline pair, reorient and average
stream1 = bundle1[ind1]
stream2 = bundle2[ind2]
stream2 = orient_by_streamline([stream2], stream1)
stream2, _ = unlist_streamlines(stream2)
stream_mean = np.mean([stream1, stream2], axis=0)
combined.append(stream_mean)
elif comb_method == 'rlap_keep':
n_stream = len(bundle2)
# Solve the linear assignment problem
ind_lap1, ind_lap2 = linear_sum_assignment(cost)
for ind2 in range(n_stream):
# If streamline already matched by RLAP, average them
aux = np.argwhere(ind_lap2 == ind2)
if aux.size > 0:
ind1 = ind_lap1[aux[0][0]]
stream1 = bundle1[ind1]
stream2 = bundle2[ind2]
stream2 = orient_by_streamline([stream2], stream1)
stream2, _ = unlist_streamlines(stream2)
combined.append(np.mean([stream1, stream2], axis=0))
# If not matched keep it as it is
else:
combined.append(bundle2[ind2])
else:
raise ValueError("Not supported bundle combination method")
return combined
|
def combine_bundles(bundle1, bundle2, comb_method='rlap', distance='mdf',
n_stream=2000):
"""Combine two bundles.
Combines two bundles into a single one by using different methods to match,
average and pick the streamlines from the two bundles. Bundles need to be
already in the same space and streamlines must have the same number of
points.
Parameters
----------
bundle1 : list
Streamline coordinates as a list of 2D ndarrays of shape[-1]==3
bundle2 : list
Streamline coordinates as a list of 2D ndarrays of shape[-1]==3
comb_method : str, optional
Method to be used to combine the two bundles. Default is 'rlap_keep'.
distance : str, optional
Distance used for streamline matching. Used by all methods except for
'random_pick'. Default is 'mdf'. The 'mdf_se' distance uses only the
start and end points.
n_stream : int, optional
Number of streamlines to be selected when comb_method='random_pick'.
Default is 2000.
Returns
-------
combined : list
Streamline coordinates of the combined bundle as a list of 2D ndarrays
of shape[-1]==3.
"""
# If random_pick just merge all streamlines and pick n_stream randomly
if comb_method == 'random_pick':
bundles = np.concatenate((bundle1, bundle2))
return select_random_set_of_streamlines(bundles, n_stream)
def distance_matrix_mdf_start_end(bundle_1, bundle_2):
bundle_1 = set_number_of_points(bundle_1, 2)
bundle_2 = set_number_of_points(bundle_2, 2)
return distance_matrix_mdf(bundle_1, bundle_2)
if distance == 'mdf':
distance = distance_matrix_mdf
elif distance == 'mdf_se':
distance = distance_matrix_mdf_start_end
else:
raise ValueError(f'You provided a distance input {distance}, but the possible options are: "mdf" or "mdf_se"')
# Set as bundle 1 the bundle with less streamlines
if len(bundle2) < len(bundle1):
aux = bundle1.copy()
bundle1 = bundle2
bundle2 = aux
# Compute distance matrix
cost = distance(bundle1, bundle2)
combined = []
if comb_method == 'rlap':
# Minimize the sum of distances (RLAP)
matched_pairs = np.asarray(linear_sum_assignment(cost)).T
# For each matched pair, reorient and average
for ind1, ind2 in matched_pairs:
stream1 = bundle1[ind1]
stream2 = bundle2[ind2]
stream2 = orient_by_streamline([stream2], stream1)
stream2, _ = unlist_streamlines(stream2)
stream_mean = np.mean([stream1, stream2], axis=0)
combined.append(stream_mean)
elif comb_method == 'rlap_closest':
n_stream = len(bundle2)
# Solve the linear assignment problem
ind_lap1, ind_lap2 = linear_sum_assignment(cost)
for ind2 in range(n_stream):
# Check if streamline already matched by RLAP
aux = np.argwhere(ind_lap2 == ind2)
if aux.size > 0:
ind1 = ind_lap1[aux[0][0]]
# If not, find the closest streamline
else:
ind1 = np.argmin(cost[:, ind2])
# Get matched streamline pair, reorient and average
stream1 = bundle1[ind1]
stream2 = bundle2[ind2]
stream2 = orient_by_streamline([stream2], stream1)
stream2, _ = unlist_streamlines(stream2)
stream_mean = np.mean([stream1, stream2], axis=0)
combined.append(stream_mean)
elif comb_method == 'rlap_keep':
n_stream = len(bundle2)
# Solve the linear assignment problem
ind_lap1, ind_lap2 = linear_sum_assignment(cost)
for ind2 in range(n_stream):
# If streamline already matched by RLAP, average them
aux = np.argwhere(ind_lap2 == ind2)
if aux.size > 0:
ind1 = ind_lap1[aux[0][0]]
stream1 = bundle1[ind1]
stream2 = bundle2[ind2]
stream2 = orient_by_streamline([stream2], stream1)
stream2, _ = unlist_streamlines(stream2)
combined.append(np.mean([stream1, stream2], axis=0))
# If not matched keep it as it is
else:
combined.append(bundle2[ind2])
else:
raise ValueError("Not supported bundle combination method")
return combined
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.